2 * arch/powerpc/platforms/pseries/xics.c
4 * Copyright 2000 IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/types.h>
12 #include <linux/threads.h>
13 #include <linux/kernel.h>
14 #include <linux/irq.h>
15 #include <linux/smp.h>
16 #include <linux/interrupt.h>
17 #include <linux/signal.h>
18 #include <linux/init.h>
19 #include <linux/gfp.h>
20 #include <linux/radix-tree.h>
21 #include <linux/cpu.h>
22 #include <asm/firmware.h>
25 #include <asm/pgtable.h>
28 #include <asm/hvcall.h>
29 #include <asm/machdep.h>
30 #include <asm/i8259.h>
34 /* This is used to map real irq numbers to virtual */
35 static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
38 #define XICS_IRQ_SPURIOUS 0
40 /* Want a priority other than 0. Various HW issues require this. */
41 #define DEFAULT_PRIORITY 5
44 * Mark IPIs as higher priority so we can take them inside interrupts that
45 * arent marked IRQF_DISABLED
47 #define IPI_PRIORITY 4
65 static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
67 static int xics_irq_8259_cascade = 0;
68 static int xics_irq_8259_cascade_real = 0;
69 static unsigned int default_server = 0xFF;
70 static unsigned int default_distrib_server = 0;
71 static unsigned int interrupt_server_size = 8;
74 * XICS only has a single IPI, so encode the messages per CPU
76 struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
78 /* RTAS service tokens */
79 static int ibm_get_xive;
80 static int ibm_set_xive;
81 static int ibm_int_on;
82 static int ibm_int_off;
85 /* Direct HW low level accessors */
88 static inline int direct_xirr_info_get(int n_cpu)
90 return in_be32(&xics_per_cpu[n_cpu]->xirr.word);
93 static inline void direct_xirr_info_set(int n_cpu, int value)
95 out_be32(&xics_per_cpu[n_cpu]->xirr.word, value);
98 static inline void direct_cppr_info(int n_cpu, u8 value)
100 out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value);
103 static inline void direct_qirr_info(int n_cpu, u8 value)
105 out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
109 /* LPAR low level accessors */
112 static inline long plpar_eoi(unsigned long xirr)
114 return plpar_hcall_norets(H_EOI, xirr);
117 static inline long plpar_cppr(unsigned long cppr)
119 return plpar_hcall_norets(H_CPPR, cppr);
122 static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
124 return plpar_hcall_norets(H_IPI, servernum, mfrr);
127 static inline long plpar_xirr(unsigned long *xirr_ret)
130 return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy);
133 static inline int lpar_xirr_info_get(int n_cpu)
135 unsigned long lpar_rc;
136 unsigned long return_value;
138 lpar_rc = plpar_xirr(&return_value);
139 if (lpar_rc != H_SUCCESS)
140 panic(" bad return code xirr - rc = %lx \n", lpar_rc);
141 return (int)return_value;
144 static inline void lpar_xirr_info_set(int n_cpu, int value)
146 unsigned long lpar_rc;
147 unsigned long val64 = value & 0xffffffff;
149 lpar_rc = plpar_eoi(val64);
150 if (lpar_rc != H_SUCCESS)
151 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
155 static inline void lpar_cppr_info(int n_cpu, u8 value)
157 unsigned long lpar_rc;
159 lpar_rc = plpar_cppr(value);
160 if (lpar_rc != H_SUCCESS)
161 panic("bad return code cppr - rc = %lx\n", lpar_rc);
164 static inline void lpar_qirr_info(int n_cpu , u8 value)
166 unsigned long lpar_rc;
168 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
169 if (lpar_rc != H_SUCCESS)
170 panic("bad return code qirr - rc = %lx\n", lpar_rc);
174 /* High level handlers and init code */
178 static int get_irq_server(unsigned int irq)
181 /* For the moment only implement delivery to all cpus or one cpu */
182 cpumask_t cpumask = irq_desc[irq].affinity;
183 cpumask_t tmp = CPU_MASK_NONE;
185 if (!distribute_irqs)
186 return default_server;
188 if (cpus_equal(cpumask, CPU_MASK_ALL)) {
189 server = default_distrib_server;
191 cpus_and(tmp, cpu_online_map, cpumask);
194 server = default_distrib_server;
196 server = get_hard_smp_processor_id(first_cpu(tmp));
203 static int get_irq_server(unsigned int irq)
205 return default_server;
210 static void xics_unmask_irq(unsigned int virq)
216 irq = virt_irq_to_real(irq_offset_down(virq));
217 WARN_ON(irq == NO_IRQ);
218 if (irq == XICS_IPI || irq == NO_IRQ)
221 server = get_irq_server(virq);
223 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
225 if (call_status != 0) {
226 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_set_xive "
227 "returned %d\n", irq, call_status);
228 printk("set_xive %x, server %x\n", ibm_set_xive, server);
232 /* Now unmask the interrupt (often a no-op) */
233 call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
234 if (call_status != 0) {
235 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_int_on "
236 "returned %d\n", irq, call_status);
241 static void xics_mask_real_irq(unsigned int irq)
249 call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
250 if (call_status != 0) {
251 printk(KERN_ERR "xics_disable_real_irq: irq=%u: "
252 "ibm_int_off returned %d\n", irq, call_status);
256 server = get_irq_server(irq);
257 /* Have to set XIVE to 0xff to be able to remove a slot */
258 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
259 if (call_status != 0) {
260 printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
261 " returned %d\n", irq, call_status);
266 static void xics_mask_irq(unsigned int virq)
270 irq = virt_irq_to_real(irq_offset_down(virq));
271 WARN_ON(irq == NO_IRQ);
273 xics_mask_real_irq(irq);
276 static void xics_set_irq_revmap(unsigned int virq)
280 irq = irq_offset_down(virq);
281 if (radix_tree_insert(&irq_map, virt_irq_to_real(irq),
282 &virt_irq_to_real_map[irq]) == -ENOMEM)
283 printk(KERN_CRIT "Out of memory creating real -> virtual"
284 " IRQ mapping for irq %u (real 0x%x)\n",
285 virq, virt_irq_to_real(irq));
288 static unsigned int xics_startup(unsigned int virq)
290 xics_set_irq_revmap(virq);
291 xics_unmask_irq(virq);
295 static unsigned int real_irq_to_virt(unsigned int real_irq)
299 ptr = radix_tree_lookup(&irq_map, real_irq);
302 return ptr - virt_irq_to_real_map;
305 static void xics_eoi_direct(unsigned int irq)
307 int cpu = smp_processor_id();
310 direct_xirr_info_set(cpu, ((0xff << 24) |
311 (virt_irq_to_real(irq_offset_down(irq)))));
315 static void xics_eoi_lpar(unsigned int irq)
317 int cpu = smp_processor_id();
320 lpar_xirr_info_set(cpu, ((0xff << 24) |
321 (virt_irq_to_real(irq_offset_down(irq)))));
325 static inline int xics_remap_irq(int vec)
331 if (vec == XICS_IRQ_SPURIOUS)
334 irq = real_irq_to_virt(vec);
336 irq = real_irq_to_virt_slowpath(vec);
337 if (likely(irq != NO_IRQ))
338 return irq_offset_up(irq);
340 printk(KERN_ERR "Interrupt %u (real) is invalid,"
341 " disabling it.\n", vec);
342 xics_mask_real_irq(vec);
346 static int xics_get_irq_direct(struct pt_regs *regs)
348 unsigned int cpu = smp_processor_id();
350 return xics_remap_irq(direct_xirr_info_get(cpu));
353 static int xics_get_irq_lpar(struct pt_regs *regs)
355 unsigned int cpu = smp_processor_id();
357 return xics_remap_irq(lpar_xirr_info_get(cpu));
362 static irqreturn_t xics_ipi_dispatch(int cpu, struct pt_regs *regs)
364 WARN_ON(cpu_is_offline(cpu));
366 while (xics_ipi_message[cpu].value) {
367 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
368 &xics_ipi_message[cpu].value)) {
370 smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
372 if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
373 &xics_ipi_message[cpu].value)) {
375 smp_message_recv(PPC_MSG_RESCHEDULE, regs);
378 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
379 &xics_ipi_message[cpu].value)) {
381 smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
384 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
385 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
386 &xics_ipi_message[cpu].value)) {
388 smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs);
395 static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id, struct pt_regs *regs)
397 int cpu = smp_processor_id();
399 direct_qirr_info(cpu, 0xff);
401 return xics_ipi_dispatch(cpu, regs);
404 static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id, struct pt_regs *regs)
406 int cpu = smp_processor_id();
408 lpar_qirr_info(cpu, 0xff);
410 return xics_ipi_dispatch(cpu, regs);
413 void xics_cause_IPI(int cpu)
415 if (firmware_has_feature(FW_FEATURE_LPAR))
416 lpar_qirr_info(cpu, IPI_PRIORITY);
418 direct_qirr_info(cpu, IPI_PRIORITY);
421 #endif /* CONFIG_SMP */
423 static void xics_set_cpu_priority(int cpu, unsigned char cppr)
425 if (firmware_has_feature(FW_FEATURE_LPAR))
426 lpar_cppr_info(cpu, cppr);
428 direct_cppr_info(cpu, cppr);
432 static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
437 unsigned long newmask;
438 cpumask_t tmp = CPU_MASK_NONE;
440 irq = virt_irq_to_real(irq_offset_down(virq));
441 if (irq == XICS_IPI || irq == NO_IRQ)
444 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
447 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
448 "returns %d\n", irq, status);
452 /* For the moment only implement delivery to all cpus or one cpu */
453 if (cpus_equal(cpumask, CPU_MASK_ALL)) {
454 newmask = default_distrib_server;
456 cpus_and(tmp, cpu_online_map, cpumask);
459 newmask = get_hard_smp_processor_id(first_cpu(tmp));
462 status = rtas_call(ibm_set_xive, 3, 1, NULL,
463 irq, newmask, xics_status[1]);
466 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
467 "returns %d\n", irq, status);
472 static struct irq_chip xics_pic_direct = {
473 .typename = " XICS ",
474 .startup = xics_startup,
475 .mask = xics_mask_irq,
476 .unmask = xics_unmask_irq,
477 .eoi = xics_eoi_direct,
478 .set_affinity = xics_set_affinity
482 static struct irq_chip xics_pic_lpar = {
483 .typename = " XICS ",
484 .startup = xics_startup,
485 .mask = xics_mask_irq,
486 .unmask = xics_unmask_irq,
487 .eoi = xics_eoi_lpar,
488 .set_affinity = xics_set_affinity
492 void xics_setup_cpu(void)
494 int cpu = smp_processor_id();
496 xics_set_cpu_priority(cpu, 0xff);
499 * Put the calling processor into the GIQ. This is really only
500 * necessary from a secondary thread as the OF start-cpu interface
501 * performs this function for us on primary threads.
503 * XXX: undo of teardown on kexec needs this too, as may hotplug
505 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
506 (1UL << interrupt_server_size) - 1 - default_distrib_server, 1);
509 void xics_init_IRQ(void)
512 unsigned long intr_size = 0;
513 struct device_node *np;
514 uint *ireg, ilen, indx = 0;
515 unsigned long intr_base = 0;
516 struct xics_interrupt_node {
520 struct irq_chip *chip;
522 ppc64_boot_msg(0x20, "XICS Init");
524 ibm_get_xive = rtas_token("ibm,get-xive");
525 ibm_set_xive = rtas_token("ibm,set-xive");
526 ibm_int_on = rtas_token("ibm,int-on");
527 ibm_int_off = rtas_token("ibm,int-off");
529 np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation");
531 panic("xics_init_IRQ: can't find interrupt presentation");
534 ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL);
537 * set node starting index for this node
542 ireg = (uint *)get_property(np, "reg", &ilen);
544 panic("xics_init_IRQ: can't find interrupt reg property");
547 intnodes[indx].addr = (unsigned long)*ireg++ << 32;
548 ilen -= sizeof(uint);
549 intnodes[indx].addr |= *ireg++;
550 ilen -= sizeof(uint);
551 intnodes[indx].size = (unsigned long)*ireg++ << 32;
552 ilen -= sizeof(uint);
553 intnodes[indx].size |= *ireg++;
554 ilen -= sizeof(uint);
556 if (indx >= NR_CPUS) break;
559 np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation");
560 if ((indx < NR_CPUS) && np) goto nextnode;
562 /* Find the server numbers for the boot cpu. */
563 for (np = of_find_node_by_type(NULL, "cpu");
565 np = of_find_node_by_type(np, "cpu")) {
566 ireg = (uint *)get_property(np, "reg", &ilen);
567 if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) {
568 ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s",
570 i = ilen / sizeof(int);
572 default_server = ireg[0];
573 default_distrib_server = ireg[i-1]; /* take last element */
575 ireg = (uint *)get_property(np,
576 "ibm,interrupt-server#-size", NULL);
578 interrupt_server_size = *ireg;
584 intr_base = intnodes[0].addr;
585 intr_size = intnodes[0].size;
587 if (firmware_has_feature(FW_FEATURE_LPAR)) {
588 ppc_md.get_irq = xics_get_irq_lpar;
589 chip = &xics_pic_lpar;
592 for_each_possible_cpu(i) {
595 /* FIXME: Do this dynamically! --RR */
599 hard_id = get_hard_smp_processor_id(i);
600 xics_per_cpu[i] = ioremap(intnodes[hard_id].addr,
601 intnodes[hard_id].size);
604 xics_per_cpu[0] = ioremap(intr_base, intr_size);
605 #endif /* CONFIG_SMP */
606 ppc_md.get_irq = xics_get_irq_direct;
607 chip = &xics_pic_direct;
611 for (i = irq_offset_value(); i < NR_IRQS; ++i) {
612 /* All IRQs on XICS are level for now. MSI code may want to modify
613 * that for reporting purposes
615 get_irq_desc(i)->status |= IRQ_LEVEL;
616 set_irq_chip_and_handler(i, chip, handle_fasteoi_irq);
621 ppc64_boot_msg(0x21, "XICS Done");
624 static int xics_setup_8259_cascade(void)
626 struct device_node *np;
629 np = of_find_node_by_type(NULL, "interrupt-controller");
631 printk(KERN_WARNING "xics: no ISA interrupt controller\n");
632 xics_irq_8259_cascade_real = -1;
633 xics_irq_8259_cascade = -1;
637 ireg = (uint *) get_property(np, "interrupts", NULL);
639 panic("xics_init_IRQ: can't find ISA interrupts property");
641 xics_irq_8259_cascade_real = *ireg;
642 xics_irq_8259_cascade = irq_offset_up
643 (virt_irq_create_mapping(xics_irq_8259_cascade_real));
647 xics_set_irq_revmap(xics_irq_8259_cascade);
648 set_irq_chained_handler(xics_irq_8259_cascade, pSeries_8259_cascade);
652 arch_initcall(xics_setup_8259_cascade);
656 void xics_request_IPIs(void)
658 virt_irq_to_real_map[XICS_IPI] = XICS_IPI;
661 * IPIs are marked IRQF_DISABLED as they must run with irqs
664 set_irq_handler(irq_offset_up(XICS_IPI), handle_percpu_irq);
665 if (firmware_has_feature(FW_FEATURE_LPAR))
666 request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_lpar,
667 SA_INTERRUPT, "IPI", NULL);
669 request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_direct,
670 SA_INTERRUPT, "IPI", NULL);
672 #endif /* CONFIG_SMP */
674 void xics_teardown_cpu(int secondary)
676 struct irq_desc *desc = get_irq_desc(irq_offset_up(XICS_IPI));
677 int cpu = smp_processor_id();
679 xics_set_cpu_priority(cpu, 0);
682 * we need to EOI the IPI if we got here from kexec down IPI
684 * probably need to check all the other interrupts too
685 * should we be flagging idle loop instead?
686 * or creating some task to be scheduled?
688 if (desc->chip && desc->chip->eoi)
689 desc->chip->eoi(XICS_IPI);
692 * Some machines need to have at least one cpu in the GIQ,
693 * so leave the master cpu in the group.
696 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
697 (1UL << interrupt_server_size) - 1 -
698 default_distrib_server, 0);
701 #ifdef CONFIG_HOTPLUG_CPU
703 /* Interrupts are disabled. */
704 void xics_migrate_irqs_away(void)
707 unsigned int irq, virq, cpu = smp_processor_id();
709 /* Reject any interrupt that was queued to us... */
710 xics_set_cpu_priority(cpu, 0);
712 /* remove ourselves from the global interrupt queue */
713 status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
714 (1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
717 /* Allow IPIs again... */
718 xics_set_cpu_priority(cpu, DEFAULT_PRIORITY);
721 struct irq_desc *desc;
725 /* We cant set affinity on ISA interrupts */
726 if (virq < irq_offset_value())
729 desc = get_irq_desc(virq);
730 irq = virt_irq_to_real(irq_offset_down(virq));
732 /* We need to get IPIs still. */
733 if (irq == XICS_IPI || irq == NO_IRQ)
736 /* We only need to migrate enabled IRQS */
737 if (desc == NULL || desc->chip == NULL
738 || desc->action == NULL
739 || desc->chip->set_affinity == NULL)
742 spin_lock_irqsave(&desc->lock, flags);
744 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
746 printk(KERN_ERR "migrate_irqs_away: irq=%u "
747 "ibm,get-xive returns %d\n",
753 * We only support delivery to all cpus or to one cpu.
754 * The irq has to be migrated only in the single cpu
757 if (xics_status[0] != get_hard_smp_processor_id(cpu))
760 printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
763 /* Reset affinity to all cpus */
764 desc->chip->set_affinity(virq, CPU_MASK_ALL);
765 irq_desc[irq].affinity = CPU_MASK_ALL;
767 spin_unlock_irqrestore(&desc->lock, flags);