X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-mips%2Firq.h;h=a58f0eecc68f91b5d5a0a1c2ce6d4617dc5ba42d;hb=13c48c490208d9e70d8d66d56f96c5054db69af7;hp=2cb52cf8bd4ebe378b2e6f37890dc857a330dca1;hpb=731aa5fd9971a5163845fbe55de63d686a11da0a;p=linux-2.6 diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index 2cb52cf8bd..a58f0eecc6 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h @@ -46,6 +46,38 @@ static inline void smtc_im_ack_irq(unsigned int irq) #endif /* CONFIG_MIPS_MT_SMTC */ +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF +#include + +extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity); +extern void smtc_forward_irq(unsigned int irq); + +/* + * IRQ affinity hook invoked at the beginning of interrupt dispatch + * if option is enabled. + * + * Up through Linux 2.6.22 (at least) cpumask operations are very + * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity + * used a "fast path" per-IRQ-descriptor cache of affinity information + * to reduce latency. As there is a project afoot to optimize the + * cpumask implementations, this version is optimistically assuming + * that cpumask.h macro overhead is reasonable during interrupt dispatch. + */ +#define IRQ_AFFINITY_HOOK(irq) \ +do { \ + if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \ + smtc_forward_irq(irq); \ + irq_exit(); \ + return; \ + } \ +} while (0) + +#else /* Not doing SMTC affinity */ + +#define IRQ_AFFINITY_HOOK(irq) do { } while (0) + +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ + #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP /* @@ -56,13 +88,27 @@ static inline void smtc_im_ack_irq(unsigned int irq) */ #define __DO_IRQ_SMTC_HOOK(irq) \ do { \ + IRQ_AFFINITY_HOOK(irq); \ if (irq_hwmask[irq] & 0x0000ff00) \ write_c0_tccontext(read_c0_tccontext() & \ - ~(irq_hwmask[irq] & 0x0000ff00)); \ + ~(irq_hwmask[irq] & 0x0000ff00)); \ +} while (0) + +#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) \ +do { \ + if (irq_hwmask[irq] & 0x0000ff00) \ + write_c0_tccontext(read_c0_tccontext() & \ + ~(irq_hwmask[irq] & 0x0000ff00)); \ } while (0) + #else -#define __DO_IRQ_SMTC_HOOK(irq) do { } while (0) +#define __DO_IRQ_SMTC_HOOK(irq) \ +do { \ + IRQ_AFFINITY_HOOK(irq); \ +} while (0) +#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { } while (0) + #endif /* @@ -81,6 +127,23 @@ do { \ irq_exit(); \ } while (0) +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF +/* + * To avoid inefficient and in some cases pathological re-checking of + * IRQ affinity, we have this variant that skips the affinity check. + */ + + +#define do_IRQ_no_affinity(irq) \ +do { \ + irq_enter(); \ + __NO_AFFINITY_IRQ_SMTC_HOOK(irq); \ + generic_handle_irq(irq); \ + irq_exit(); \ +} while (0) + +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ + extern void arch_init_irq(void); extern void spurious_interrupt(void);