static struct irqaction irq_resched = {
.handler = ipi_resched_interrupt,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED|IRQF_PERCPU,
.name = "IPI_resched"
};
static struct irqaction irq_call = {
.handler = ipi_call_interrupt,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED|IRQF_PERCPU,
.name = "IPI_call"
};
setup_irq(cpu_ipi_resched_irq, &irq_resched);
setup_irq(cpu_ipi_call_irq, &irq_call);
- /* need to mark IPI's as IRQ_PER_CPU */
- irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU;
set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
- irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU;
set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
}
void prom_init_secondary(void)
{
+ /* Enable per-cpu interrupts */
+
+ /* This is Malta specific: IPI,performance and timer inetrrupts */
write_c0_status((read_c0_status() & ~ST0_IM ) |
- (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7));
+ (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
}
void prom_smp_finish(void)
EXPORT_SYMBOL(null_perf_irq);
EXPORT_SYMBOL(perf_irq);
+/*
+ * Performance counter IRQ or -1 if shared with timer
+ */
+int mipsxx_perfcount_irq;
+EXPORT_SYMBOL(mipsxx_perfcount_irq);
+
+/*
+ * Possibly handle a performance counter interrupt.
+ * Return true if the timer interrupt should not be checked
+ */
+static inline int handle_perf_irq (int r2)
+{
+ /*
+ * The performance counter overflow interrupt may be shared with the
+ * timer interrupt (mipsxx_perfcount_irq < 0). If it is and a
+ * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
+ * and we can't reliably determine if a counter interrupt has also
+ * happened (!r2) then don't check for a timer interrupt.
+ */
+ return (mipsxx_perfcount_irq < 0) &&
+ perf_irq() == IRQ_HANDLED &&
+ !r2;
+}
+
asmlinkage void ll_timer_interrupt(int irq)
{
int r2 = cpu_has_mips_r2;
irq_enter();
kstat_this_cpu.irqs[irq]++;
- /*
- * Suckage alert:
- * Before R2 of the architecture there was no way to see if a
- * performance counter interrupt was pending, so we have to run the
- * performance counter interrupt handler anyway.
- */
- if (!r2 || (read_c0_cause() & (1 << 26)))
- if (perf_irq())
- goto out;
+ if (handle_perf_irq(r2))
+ goto out;
- /* we keep interrupt disabled all the time */
- if (!r2 || (read_c0_cause() & (1 << 30)))
- timer_interrupt(irq, NULL);
+ if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
+ goto out;
+
+ timer_interrupt(irq, NULL);
out:
irq_exit();
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
.name = "timer",
};
unsigned long cpu_khz;
-#define CPUCTR_IMASKBIT (0x100 << MIPSCPU_INT_CPUCTR)
-
static int mips_cpu_timer_irq;
+extern int mipsxx_perfcount_irq;
extern void smtc_timer_broadcast(int);
static void mips_timer_dispatch(void)
do_IRQ(mips_cpu_timer_irq);
}
+static void mips_perf_dispatch(void)
+{
+ do_IRQ(mipsxx_perfcount_irq);
+}
+
/*
* Redeclare until I get around mopping the timer code insanity on MIPS.
*/
extern int (*perf_irq)(void);
+/*
+ * Possibly handle a performance counter interrupt.
+ * Return true if the timer interrupt should not be checked
+ */
+static inline int handle_perf_irq (int r2)
+{
+ /*
+ * The performance counter overflow interrupt may be shared with the
+ * timer interrupt (mipsxx_perfcount_irq < 0). If it is and a
+ * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
+ * and we can't reliably determine if a counter interrupt has also
+ * happened (!r2) then don't check for a timer interrupt.
+ */
+ return (mipsxx_perfcount_irq < 0) &&
+ perf_irq() == IRQ_HANDLED &&
+ !r2;
+}
+
irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
{
int cpu = smp_processor_id();
* We could be here due to timer interrupt,
* perf counter overflow, or both.
*/
- if (read_c0_cause() & (1 << 26))
- perf_irq();
+ (void) handle_perf_irq(1);
if (read_c0_cause() & (1 << 30)) {
/*
#else /* CONFIG_MIPS_MT_SMTC */
int r2 = cpu_has_mips_r2;
+ if (handle_perf_irq(r2))
+ goto out;
+
+ if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
+ goto out;
+
if (cpu == 0) {
/*
* CPU 0 handles the global timer interrupt job and process
* accounting resets count/compare registers to trigger next
* timer int.
*/
- if (!r2 || (read_c0_cause() & (1 << 26)))
- if (perf_irq())
- goto out;
-
- /* we keep interrupt disabled all the time */
- if (!r2 || (read_c0_cause() & (1 << 30)))
- timer_interrupt(irq, NULL);
+ timer_interrupt(irq, NULL);
} else {
/* Everyone else needs to reset the timer int here as
ll_local_timer_interrupt doesn't */
mips_scroll_message();
}
-void __init plat_timer_setup(struct irqaction *irq)
+irqreturn_t mips_perf_interrupt(int irq, void *dev_id)
{
+ return perf_irq();
+}
+
+static struct irqaction perf_irqaction = {
+ .handler = mips_perf_interrupt,
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+ .name = "performance",
+};
+
+void __init plat_perf_setup(struct irqaction *irq)
+{
+ int hwint = 0;
+ mipsxx_perfcount_irq = -1;
+
#ifdef MSC01E_INT_BASE
if (cpu_has_veic) {
- set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch);
- mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR;
+ set_vi_handler (MSC01E_INT_PERFCTR, mips_perf_dispatch);
+ mipsxx_perfcount_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
} else
#endif
- {
- if (cpu_has_vint)
- set_vi_handler (MIPSCPU_INT_CPUCTR, mips_timer_dispatch);
- mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR;
+ if (cpu_has_mips_r2) {
+ /*
+ * Read IntCtl.IPPCI to determine the performance
+ * counter interrupt
+ */
+ hwint = (read_c0_intctl () >> 26) & 7;
+ if (hwint != MIPSCPU_INT_CPUCTR) {
+ if (cpu_has_vint)
+ set_vi_handler (hwint, mips_perf_dispatch);
+ mipsxx_perfcount_irq = MIPSCPU_INT_BASE + hwint;
+ }
+ }
+ if (mipsxx_perfcount_irq >= 0) {
+#ifdef CONFIG_MIPS_MT_SMTC
+ setup_irq_smtc(mipsxx_perfcount_irq, irq, 0x100 << hwint);
+#else
+ setup_irq(mipsxx_perfcount_irq, irq);
+#endif /* CONFIG_MIPS_MT_SMTC */
+#ifdef CONFIG_SMP
+ set_irq_handler(mipsxx_perfcount_irq, handle_percpu_irq);
+#endif
}
+}
+void __init plat_timer_setup(struct irqaction *irq)
+{
+ int hwint = 0;
+ if (cpu_has_veic) {
+ set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch);
+ mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR;
+ }
+ else {
+ if (cpu_has_mips_r2)
+ /*
+ * Read IntCtl.IPTI to determine the timer interrupt
+ */
+ hwint = (read_c0_intctl () >> 29) & 7;
+ else
+ hwint = MIPSCPU_INT_CPUCTR;
+ if (cpu_has_vint)
+ set_vi_handler (hwint, mips_timer_dispatch);
+ mips_cpu_timer_irq = MIPSCPU_INT_BASE + hwint;
+ }
/* we are using the cpu counter for timer interrupts */
irq->handler = mips_timer_interrupt; /* we use our own handler */
#ifdef CONFIG_MIPS_MT_SMTC
- setup_irq_smtc(mips_cpu_timer_irq, irq, CPUCTR_IMASKBIT);
+ setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << hwint);
#else
setup_irq(mips_cpu_timer_irq, irq);
#endif /* CONFIG_MIPS_MT_SMTC */
-
#ifdef CONFIG_SMP
- /* irq_desc(riptor) is a global resource, when the interrupt overlaps
- on seperate cpu's the first one tries to handle the second interrupt.
- The effect is that the int remains disabled on the second cpu.
- Mark the interrupt with IRQ_PER_CPU to avoid any confusion */
- irq_desc[mips_cpu_timer_irq].status |= IRQ_PER_CPU;
set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
#endif
+
+ plat_perf_setup(&perf_irqaction);
}