]> err.no Git - linux-2.6/blob - arch/powerpc/platforms/pseries/xics.c
c7f04420066d16a532ad61bf576f06e896d9b9fa
[linux-2.6] / arch / powerpc / platforms / pseries / xics.c
1 /*
2  * arch/powerpc/platforms/pseries/xics.c
3  *
4  * Copyright 2000 IBM Corporation.
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version
9  *  2 of the License, or (at your option) any later version.
10  */
11 #include <linux/types.h>
12 #include <linux/threads.h>
13 #include <linux/kernel.h>
14 #include <linux/irq.h>
15 #include <linux/smp.h>
16 #include <linux/interrupt.h>
17 #include <linux/signal.h>
18 #include <linux/init.h>
19 #include <linux/gfp.h>
20 #include <linux/radix-tree.h>
21 #include <linux/cpu.h>
22 #include <asm/firmware.h>
23 #include <asm/prom.h>
24 #include <asm/io.h>
25 #include <asm/pgtable.h>
26 #include <asm/smp.h>
27 #include <asm/rtas.h>
28 #include <asm/hvcall.h>
29 #include <asm/machdep.h>
30 #include <asm/i8259.h>
31
32 #include "xics.h"
33
34 /* This is used to map real irq numbers to virtual */
35 static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
36
37 #define XICS_IPI                2
38 #define XICS_IRQ_SPURIOUS       0
39
40 /* Want a priority other than 0.  Various HW issues require this. */
41 #define DEFAULT_PRIORITY        5
42
43 /*
44  * Mark IPIs as higher priority so we can take them inside interrupts that
45  * arent marked IRQF_DISABLED
46  */
47 #define IPI_PRIORITY            4
48
49 struct xics_ipl {
50         union {
51                 u32 word;
52                 u8 bytes[4];
53         } xirr_poll;
54         union {
55                 u32 word;
56                 u8 bytes[4];
57         } xirr;
58         u32 dummy;
59         union {
60                 u32 word;
61                 u8 bytes[4];
62         } qirr;
63 };
64
65 static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
66
67 static int xics_irq_8259_cascade = 0;
68 static int xics_irq_8259_cascade_real = 0;
69 static unsigned int default_server = 0xFF;
70 static unsigned int default_distrib_server = 0;
71 static unsigned int interrupt_server_size = 8;
72
73 /*
74  * XICS only has a single IPI, so encode the messages per CPU
75  */
76 struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
77
78 /* RTAS service tokens */
79 static int ibm_get_xive;
80 static int ibm_set_xive;
81 static int ibm_int_on;
82 static int ibm_int_off;
83
84
85 /* Direct HW low level accessors */
86
87
88 static inline int direct_xirr_info_get(int n_cpu)
89 {
90         return in_be32(&xics_per_cpu[n_cpu]->xirr.word);
91 }
92
93 static inline void direct_xirr_info_set(int n_cpu, int value)
94 {
95         out_be32(&xics_per_cpu[n_cpu]->xirr.word, value);
96 }
97
98 static inline void direct_cppr_info(int n_cpu, u8 value)
99 {
100         out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value);
101 }
102
103 static inline void direct_qirr_info(int n_cpu, u8 value)
104 {
105         out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
106 }
107
108
109 /* LPAR low level accessors */
110
111
112 static inline long plpar_eoi(unsigned long xirr)
113 {
114         return plpar_hcall_norets(H_EOI, xirr);
115 }
116
117 static inline long plpar_cppr(unsigned long cppr)
118 {
119         return plpar_hcall_norets(H_CPPR, cppr);
120 }
121
122 static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
123 {
124         return plpar_hcall_norets(H_IPI, servernum, mfrr);
125 }
126
127 static inline long plpar_xirr(unsigned long *xirr_ret)
128 {
129         unsigned long dummy;
130         return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy);
131 }
132
133 static inline int lpar_xirr_info_get(int n_cpu)
134 {
135         unsigned long lpar_rc;
136         unsigned long return_value;
137
138         lpar_rc = plpar_xirr(&return_value);
139         if (lpar_rc != H_SUCCESS)
140                 panic(" bad return code xirr - rc = %lx \n", lpar_rc);
141         return (int)return_value;
142 }
143
144 static inline void lpar_xirr_info_set(int n_cpu, int value)
145 {
146         unsigned long lpar_rc;
147         unsigned long val64 = value & 0xffffffff;
148
149         lpar_rc = plpar_eoi(val64);
150         if (lpar_rc != H_SUCCESS)
151                 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
152                       val64);
153 }
154
155 static inline void lpar_cppr_info(int n_cpu, u8 value)
156 {
157         unsigned long lpar_rc;
158
159         lpar_rc = plpar_cppr(value);
160         if (lpar_rc != H_SUCCESS)
161                 panic("bad return code cppr - rc = %lx\n", lpar_rc);
162 }
163
164 static inline void lpar_qirr_info(int n_cpu , u8 value)
165 {
166         unsigned long lpar_rc;
167
168         lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
169         if (lpar_rc != H_SUCCESS)
170                 panic("bad return code qirr - rc = %lx\n", lpar_rc);
171 }
172
173
174 /* High level handlers and init code */
175
176
177 #ifdef CONFIG_SMP
178 static int get_irq_server(unsigned int irq)
179 {
180         unsigned int server;
181         /* For the moment only implement delivery to all cpus or one cpu */
182         cpumask_t cpumask = irq_desc[irq].affinity;
183         cpumask_t tmp = CPU_MASK_NONE;
184
185         if (!distribute_irqs)
186                 return default_server;
187
188         if (cpus_equal(cpumask, CPU_MASK_ALL)) {
189                 server = default_distrib_server;
190         } else {
191                 cpus_and(tmp, cpu_online_map, cpumask);
192
193                 if (cpus_empty(tmp))
194                         server = default_distrib_server;
195                 else
196                         server = get_hard_smp_processor_id(first_cpu(tmp));
197         }
198
199         return server;
200
201 }
202 #else
203 static int get_irq_server(unsigned int irq)
204 {
205         return default_server;
206 }
207 #endif
208
209
210 static void xics_unmask_irq(unsigned int virq)
211 {
212         unsigned int irq;
213         int call_status;
214         unsigned int server;
215
216         irq = virt_irq_to_real(irq_offset_down(virq));
217         WARN_ON(irq == NO_IRQ);
218         if (irq == XICS_IPI || irq == NO_IRQ)
219                 return;
220
221         server = get_irq_server(virq);
222
223         call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
224                                 DEFAULT_PRIORITY);
225         if (call_status != 0) {
226                 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_set_xive "
227                        "returned %d\n", irq, call_status);
228                 printk("set_xive %x, server %x\n", ibm_set_xive, server);
229                 return;
230         }
231
232         /* Now unmask the interrupt (often a no-op) */
233         call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
234         if (call_status != 0) {
235                 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_int_on "
236                        "returned %d\n", irq, call_status);
237                 return;
238         }
239 }
240
241 static void xics_mask_real_irq(unsigned int irq)
242 {
243         int call_status;
244         unsigned int server;
245
246         if (irq == XICS_IPI)
247                 return;
248
249         call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
250         if (call_status != 0) {
251                 printk(KERN_ERR "xics_disable_real_irq: irq=%u: "
252                        "ibm_int_off returned %d\n", irq, call_status);
253                 return;
254         }
255
256         server = get_irq_server(irq);
257         /* Have to set XIVE to 0xff to be able to remove a slot */
258         call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
259         if (call_status != 0) {
260                 printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
261                        " returned %d\n", irq, call_status);
262                 return;
263         }
264 }
265
266 static void xics_mask_irq(unsigned int virq)
267 {
268         unsigned int irq;
269
270         irq = virt_irq_to_real(irq_offset_down(virq));
271         WARN_ON(irq == NO_IRQ);
272         if (irq != NO_IRQ)
273                 xics_mask_real_irq(irq);
274 }
275
276 static void xics_set_irq_revmap(unsigned int virq)
277 {
278         unsigned int irq;
279
280         irq = irq_offset_down(virq);
281         if (radix_tree_insert(&irq_map, virt_irq_to_real(irq),
282                               &virt_irq_to_real_map[irq]) == -ENOMEM)
283                 printk(KERN_CRIT "Out of memory creating real -> virtual"
284                        " IRQ mapping for irq %u (real 0x%x)\n",
285                        virq, virt_irq_to_real(irq));
286 }
287
288 static unsigned int xics_startup(unsigned int virq)
289 {
290         xics_set_irq_revmap(virq);
291         xics_unmask_irq(virq);
292         return 0;
293 }
294
295 static unsigned int real_irq_to_virt(unsigned int real_irq)
296 {
297         unsigned int *ptr;
298
299         ptr = radix_tree_lookup(&irq_map, real_irq);
300         if (ptr == NULL)
301                 return NO_IRQ;
302         return ptr - virt_irq_to_real_map;
303 }
304
305 static void xics_eoi_direct(unsigned int irq)
306 {
307         int cpu = smp_processor_id();
308
309         iosync();
310         direct_xirr_info_set(cpu, ((0xff << 24) |
311                                    (virt_irq_to_real(irq_offset_down(irq)))));
312 }
313
314
315 static void xics_eoi_lpar(unsigned int irq)
316 {
317         int cpu = smp_processor_id();
318
319         iosync();
320         lpar_xirr_info_set(cpu, ((0xff << 24) |
321                                  (virt_irq_to_real(irq_offset_down(irq)))));
322
323 }
324
325 static inline int xics_remap_irq(int vec)
326 {
327         int irq;
328
329         vec &= 0x00ffffff;
330
331         if (vec == XICS_IRQ_SPURIOUS)
332                 return NO_IRQ;
333
334         irq = real_irq_to_virt(vec);
335         if (irq == NO_IRQ)
336                 irq = real_irq_to_virt_slowpath(vec);
337         if (likely(irq != NO_IRQ))
338                 return irq_offset_up(irq);
339
340         printk(KERN_ERR "Interrupt %u (real) is invalid,"
341                " disabling it.\n", vec);
342         xics_mask_real_irq(vec);
343         return NO_IRQ;
344 }
345
346 static int xics_get_irq_direct(struct pt_regs *regs)
347 {
348         unsigned int cpu = smp_processor_id();
349
350         return xics_remap_irq(direct_xirr_info_get(cpu));
351 }
352
353 static int xics_get_irq_lpar(struct pt_regs *regs)
354 {
355         unsigned int cpu = smp_processor_id();
356
357         return xics_remap_irq(lpar_xirr_info_get(cpu));
358 }
359
360 #ifdef CONFIG_SMP
361
362 static irqreturn_t xics_ipi_dispatch(int cpu, struct pt_regs *regs)
363 {
364         WARN_ON(cpu_is_offline(cpu));
365
366         while (xics_ipi_message[cpu].value) {
367                 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
368                                        &xics_ipi_message[cpu].value)) {
369                         mb();
370                         smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
371                 }
372                 if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
373                                        &xics_ipi_message[cpu].value)) {
374                         mb();
375                         smp_message_recv(PPC_MSG_RESCHEDULE, regs);
376                 }
377 #if 0
378                 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
379                                        &xics_ipi_message[cpu].value)) {
380                         mb();
381                         smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
382                 }
383 #endif
384 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
385                 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
386                                        &xics_ipi_message[cpu].value)) {
387                         mb();
388                         smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs);
389                 }
390 #endif
391         }
392         return IRQ_HANDLED;
393 }
394
395 static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id, struct pt_regs *regs)
396 {
397         int cpu = smp_processor_id();
398
399         direct_qirr_info(cpu, 0xff);
400
401         return xics_ipi_dispatch(cpu, regs);
402 }
403
404 static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id, struct pt_regs *regs)
405 {
406         int cpu = smp_processor_id();
407
408         lpar_qirr_info(cpu, 0xff);
409
410         return xics_ipi_dispatch(cpu, regs);
411 }
412
413 void xics_cause_IPI(int cpu)
414 {
415         if (firmware_has_feature(FW_FEATURE_LPAR))
416                 lpar_qirr_info(cpu, IPI_PRIORITY);
417         else
418                 direct_qirr_info(cpu, IPI_PRIORITY);
419 }
420
421 #endif /* CONFIG_SMP */
422
423 static void xics_set_cpu_priority(int cpu, unsigned char cppr)
424 {
425         if (firmware_has_feature(FW_FEATURE_LPAR))
426                 lpar_cppr_info(cpu, cppr);
427         else
428                 direct_cppr_info(cpu, cppr);
429         iosync();
430 }
431
432 static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
433 {
434         unsigned int irq;
435         int status;
436         int xics_status[2];
437         unsigned long newmask;
438         cpumask_t tmp = CPU_MASK_NONE;
439
440         irq = virt_irq_to_real(irq_offset_down(virq));
441         if (irq == XICS_IPI || irq == NO_IRQ)
442                 return;
443
444         status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
445
446         if (status) {
447                 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
448                        "returns %d\n", irq, status);
449                 return;
450         }
451
452         /* For the moment only implement delivery to all cpus or one cpu */
453         if (cpus_equal(cpumask, CPU_MASK_ALL)) {
454                 newmask = default_distrib_server;
455         } else {
456                 cpus_and(tmp, cpu_online_map, cpumask);
457                 if (cpus_empty(tmp))
458                         return;
459                 newmask = get_hard_smp_processor_id(first_cpu(tmp));
460         }
461
462         status = rtas_call(ibm_set_xive, 3, 1, NULL,
463                                 irq, newmask, xics_status[1]);
464
465         if (status) {
466                 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
467                        "returns %d\n", irq, status);
468                 return;
469         }
470 }
471
472 static struct irq_chip xics_pic_direct = {
473         .typename = " XICS     ",
474         .startup = xics_startup,
475         .mask = xics_mask_irq,
476         .unmask = xics_unmask_irq,
477         .eoi = xics_eoi_direct,
478         .set_affinity = xics_set_affinity
479 };
480
481
482 static struct irq_chip xics_pic_lpar = {
483         .typename = " XICS     ",
484         .startup = xics_startup,
485         .mask = xics_mask_irq,
486         .unmask = xics_unmask_irq,
487         .eoi = xics_eoi_lpar,
488         .set_affinity = xics_set_affinity
489 };
490
491
492 void xics_setup_cpu(void)
493 {
494         int cpu = smp_processor_id();
495
496         xics_set_cpu_priority(cpu, 0xff);
497
498         /*
499          * Put the calling processor into the GIQ.  This is really only
500          * necessary from a secondary thread as the OF start-cpu interface
501          * performs this function for us on primary threads.
502          *
503          * XXX: undo of teardown on kexec needs this too, as may hotplug
504          */
505         rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
506                 (1UL << interrupt_server_size) - 1 - default_distrib_server, 1);
507 }
508
509 void xics_init_IRQ(void)
510 {
511         int i;
512         unsigned long intr_size = 0;
513         struct device_node *np;
514         uint *ireg, ilen, indx = 0;
515         unsigned long intr_base = 0;
516         struct xics_interrupt_node {
517                 unsigned long addr;
518                 unsigned long size;
519         } intnodes[NR_CPUS];
520         struct irq_chip *chip;
521
522         ppc64_boot_msg(0x20, "XICS Init");
523
524         ibm_get_xive = rtas_token("ibm,get-xive");
525         ibm_set_xive = rtas_token("ibm,set-xive");
526         ibm_int_on  = rtas_token("ibm,int-on");
527         ibm_int_off = rtas_token("ibm,int-off");
528
529         np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation");
530         if (!np)
531                 panic("xics_init_IRQ: can't find interrupt presentation");
532
533 nextnode:
534         ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL);
535         if (ireg) {
536                 /*
537                  * set node starting index for this node
538                  */
539                 indx = *ireg;
540         }
541
542         ireg = (uint *)get_property(np, "reg", &ilen);
543         if (!ireg)
544                 panic("xics_init_IRQ: can't find interrupt reg property");
545
546         while (ilen) {
547                 intnodes[indx].addr = (unsigned long)*ireg++ << 32;
548                 ilen -= sizeof(uint);
549                 intnodes[indx].addr |= *ireg++;
550                 ilen -= sizeof(uint);
551                 intnodes[indx].size = (unsigned long)*ireg++ << 32;
552                 ilen -= sizeof(uint);
553                 intnodes[indx].size |= *ireg++;
554                 ilen -= sizeof(uint);
555                 indx++;
556                 if (indx >= NR_CPUS) break;
557         }
558
559         np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation");
560         if ((indx < NR_CPUS) && np) goto nextnode;
561
562         /* Find the server numbers for the boot cpu. */
563         for (np = of_find_node_by_type(NULL, "cpu");
564              np;
565              np = of_find_node_by_type(np, "cpu")) {
566                 ireg = (uint *)get_property(np, "reg", &ilen);
567                 if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) {
568                         ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s",
569                                                     &ilen);
570                         i = ilen / sizeof(int);
571                         if (ireg && i > 0) {
572                                 default_server = ireg[0];
573                                 default_distrib_server = ireg[i-1]; /* take last element */
574                         }
575                         ireg = (uint *)get_property(np,
576                                         "ibm,interrupt-server#-size", NULL);
577                         if (ireg)
578                                 interrupt_server_size = *ireg;
579                         break;
580                 }
581         }
582         of_node_put(np);
583
584         intr_base = intnodes[0].addr;
585         intr_size = intnodes[0].size;
586
587         if (firmware_has_feature(FW_FEATURE_LPAR)) {
588                 ppc_md.get_irq = xics_get_irq_lpar;
589                 chip = &xics_pic_lpar;
590         } else {
591 #ifdef CONFIG_SMP
592                 for_each_possible_cpu(i) {
593                         int hard_id;
594
595                         /* FIXME: Do this dynamically! --RR */
596                         if (!cpu_present(i))
597                                 continue;
598
599                         hard_id = get_hard_smp_processor_id(i);
600                         xics_per_cpu[i] = ioremap(intnodes[hard_id].addr,
601                                                   intnodes[hard_id].size);
602                 }
603 #else
604                 xics_per_cpu[0] = ioremap(intr_base, intr_size);
605 #endif /* CONFIG_SMP */
606                 ppc_md.get_irq = xics_get_irq_direct;
607                 chip = &xics_pic_direct;
608
609         }
610
611         for (i = irq_offset_value(); i < NR_IRQS; ++i) {
612                 /* All IRQs on XICS are level for now. MSI code may want to modify
613                  * that for reporting purposes
614                  */
615                 get_irq_desc(i)->status |= IRQ_LEVEL;
616                 set_irq_chip_and_handler(i, chip, handle_fasteoi_irq);
617         }
618
619         xics_setup_cpu();
620
621         ppc64_boot_msg(0x21, "XICS Done");
622 }
623
624 static int xics_setup_8259_cascade(void)
625 {
626         struct device_node *np;
627         uint *ireg;
628
629         np = of_find_node_by_type(NULL, "interrupt-controller");
630         if (np == NULL) {
631                 printk(KERN_WARNING "xics: no ISA interrupt controller\n");
632                 xics_irq_8259_cascade_real = -1;
633                 xics_irq_8259_cascade = -1;
634                 return 0;
635         }
636
637         ireg = (uint *) get_property(np, "interrupts", NULL);
638         if (!ireg)
639                 panic("xics_init_IRQ: can't find ISA interrupts property");
640
641         xics_irq_8259_cascade_real = *ireg;
642         xics_irq_8259_cascade = irq_offset_up
643                 (virt_irq_create_mapping(xics_irq_8259_cascade_real));
644         i8259_init(0, 0);
645         of_node_put(np);
646
647         xics_set_irq_revmap(xics_irq_8259_cascade);
648         set_irq_chained_handler(xics_irq_8259_cascade, pSeries_8259_cascade);
649
650         return 0;
651 }
652 arch_initcall(xics_setup_8259_cascade);
653
654
655 #ifdef CONFIG_SMP
656 void xics_request_IPIs(void)
657 {
658         virt_irq_to_real_map[XICS_IPI] = XICS_IPI;
659
660         /*
661          * IPIs are marked IRQF_DISABLED as they must run with irqs
662          * disabled
663          */
664         set_irq_handler(irq_offset_up(XICS_IPI), handle_percpu_irq);
665         if (firmware_has_feature(FW_FEATURE_LPAR))
666                 request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_lpar,
667                             SA_INTERRUPT, "IPI", NULL);
668         else
669                 request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_direct,
670                             SA_INTERRUPT, "IPI", NULL);
671 }
672 #endif /* CONFIG_SMP */
673
674 void xics_teardown_cpu(int secondary)
675 {
676         struct irq_desc *desc = get_irq_desc(irq_offset_up(XICS_IPI));
677         int cpu = smp_processor_id();
678
679         xics_set_cpu_priority(cpu, 0);
680
681         /*
682          * we need to EOI the IPI if we got here from kexec down IPI
683          *
684          * probably need to check all the other interrupts too
685          * should we be flagging idle loop instead?
686          * or creating some task to be scheduled?
687          */
688         if (desc->chip && desc->chip->eoi)
689                 desc->chip->eoi(XICS_IPI);
690
691         /*
692          * Some machines need to have at least one cpu in the GIQ,
693          * so leave the master cpu in the group.
694          */
695         if (secondary)
696                 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
697                         (1UL << interrupt_server_size) - 1 -
698                         default_distrib_server, 0);
699 }
700
701 #ifdef CONFIG_HOTPLUG_CPU
702
703 /* Interrupts are disabled. */
704 void xics_migrate_irqs_away(void)
705 {
706         int status;
707         unsigned int irq, virq, cpu = smp_processor_id();
708
709         /* Reject any interrupt that was queued to us... */
710         xics_set_cpu_priority(cpu, 0);
711
712         /* remove ourselves from the global interrupt queue */
713         status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
714                 (1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
715         WARN_ON(status < 0);
716
717         /* Allow IPIs again... */
718         xics_set_cpu_priority(cpu, DEFAULT_PRIORITY);
719
720         for_each_irq(virq) {
721                 struct irq_desc *desc;
722                 int xics_status[2];
723                 unsigned long flags;
724
725                 /* We cant set affinity on ISA interrupts */
726                 if (virq < irq_offset_value())
727                         continue;
728
729                 desc = get_irq_desc(virq);
730                 irq = virt_irq_to_real(irq_offset_down(virq));
731
732                 /* We need to get IPIs still. */
733                 if (irq == XICS_IPI || irq == NO_IRQ)
734                         continue;
735
736                 /* We only need to migrate enabled IRQS */
737                 if (desc == NULL || desc->chip == NULL
738                     || desc->action == NULL
739                     || desc->chip->set_affinity == NULL)
740                         continue;
741
742                 spin_lock_irqsave(&desc->lock, flags);
743
744                 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
745                 if (status) {
746                         printk(KERN_ERR "migrate_irqs_away: irq=%u "
747                                         "ibm,get-xive returns %d\n",
748                                         virq, status);
749                         goto unlock;
750                 }
751
752                 /*
753                  * We only support delivery to all cpus or to one cpu.
754                  * The irq has to be migrated only in the single cpu
755                  * case.
756                  */
757                 if (xics_status[0] != get_hard_smp_processor_id(cpu))
758                         goto unlock;
759
760                 printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
761                        virq, cpu);
762
763                 /* Reset affinity to all cpus */
764                 desc->chip->set_affinity(virq, CPU_MASK_ALL);
765                 irq_desc[irq].affinity = CPU_MASK_ALL;
766 unlock:
767                 spin_unlock_irqrestore(&desc->lock, flags);
768         }
769 }
770 #endif