X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Finterrupt.h;h=dea7598aeff43c641d72ac18aa506a8fcac9ea57;hb=060195500e0347a6ba8ea89739a9898961eb6f2b;hp=5a8ba0b8ccbae860bc8d36b1f6082d94e51402fb;hpb=58a3bb59973e33a428d72fa530a3d1d81feb0e8f;p=linux-2.6 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 5a8ba0b8cc..dea7598aef 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -11,8 +11,6 @@ #include #include #include -#include -#include #include #include #include @@ -42,6 +40,11 @@ * IRQF_SHARED - allow sharing the irq among several devices * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur * IRQF_TIMER - Flag to mark this interrupt as timer interrupt + * IRQF_PERCPU - Interrupt is per cpu + * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing + * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is + * registered first in an shared interrupt is considered for + * performance reasons) */ #define IRQF_DISABLED 0x00000020 #define IRQF_SAMPLE_RANDOM 0x00000040 @@ -49,22 +52,8 @@ #define IRQF_PROBE_SHARED 0x00000100 #define IRQF_TIMER 0x00000200 #define IRQF_PERCPU 0x00000400 - -/* - * Migration helpers. Scheduled for removal in 1/2007 - * Do not use for new code ! - */ -#define SA_INTERRUPT IRQF_DISABLED -#define SA_SAMPLE_RANDOM IRQF_SAMPLE_RANDOM -#define SA_SHIRQ IRQF_SHARED -#define SA_PROBEIRQ IRQF_PROBE_SHARED -#define SA_PERCPU IRQF_PERCPU - -#define SA_TRIGGER_LOW IRQF_TRIGGER_LOW -#define SA_TRIGGER_HIGH IRQF_TRIGGER_HIGH -#define SA_TRIGGER_FALLING IRQF_TRIGGER_FALLING -#define SA_TRIGGER_RISING IRQF_TRIGGER_RISING -#define SA_TRIGGER_MASK IRQF_TRIGGER_MASK +#define IRQF_NOBALANCING 0x00000800 +#define IRQF_IRQPOLL 0x00001000 typedef irqreturn_t (*irq_handler_t)(int, void *); @@ -80,11 +69,13 @@ struct irqaction { }; extern irqreturn_t no_action(int cpl, void *dev_id); -extern int request_irq(unsigned int, irq_handler_t handler, +extern int __must_check request_irq(unsigned int, irq_handler_t handler, unsigned long, const char *, void *); extern void free_irq(unsigned int, void *); -extern int devm_request_irq(struct device *dev, unsigned int irq, +struct device; + +extern int __must_check devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); @@ -107,11 +98,11 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); # define local_irq_enable_in_hardirq() local_irq_enable() #endif -#ifdef CONFIG_GENERIC_HARDIRQS extern void disable_irq_nosync(unsigned int irq); extern void disable_irq(unsigned int irq); extern void enable_irq(unsigned int irq); +#ifdef CONFIG_GENERIC_HARDIRQS /* * Special lockdep variants of irq disabling/enabling. * These should be used for locking constructs that @@ -182,12 +173,25 @@ static inline int disable_irq_wake(unsigned int irq) * validator need to define the methods below in their asm/irq.h * files, under an #ifdef CONFIG_LOCKDEP section. */ -# ifndef CONFIG_LOCKDEP +#ifndef CONFIG_LOCKDEP # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) +# define disable_irq_nosync_lockdep_irqsave(irq, flags) \ + disable_irq_nosync(irq) # define disable_irq_lockdep(irq) disable_irq(irq) # define enable_irq_lockdep(irq) enable_irq(irq) +# define enable_irq_lockdep_irqrestore(irq, flags) \ + enable_irq(irq) # endif +static inline int enable_irq_wake(unsigned int irq) +{ + return 0; +} + +static inline int disable_irq_wake(unsigned int irq) +{ + return 0; +} #endif /* CONFIG_GENERIC_HARDIRQS */ #ifndef __ARCH_SET_SOFTIRQ_PENDING @@ -224,6 +228,16 @@ static inline void __deprecated save_and_cli(unsigned long *x) #define save_and_cli(x) save_and_cli(&x) #endif /* CONFIG_SMP */ +/* Some architectures might implement lazy enabling/disabling of + * interrupts. In some cases, such as stop_machine, we might want + * to ensure that after a local_irq_disable(), interrupts have + * really been disabled in hardware. Such architectures need to + * implement the following hook. + */ +#ifndef hard_irq_disable +#define hard_irq_disable() do { } while(0) +#endif + /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high frequency threaded job scheduling. For almost all the purposes tasklets are more than enough. F.e. all serial device BHs et @@ -239,6 +253,10 @@ enum BLOCK_SOFTIRQ, TASKLET_SOFTIRQ, SCHED_SOFTIRQ, +#ifdef CONFIG_HIGH_RES_TIMERS + HRTIMER_SOFTIRQ, +#endif + RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ }; /* softirq mask and active fields moved to irq_cpustat_t in @@ -417,4 +435,15 @@ extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ #endif +#ifdef CONFIG_PROC_FS +/* Initialize /proc/irq/ */ +extern void init_irq_proc(void); +#else +static inline void init_irq_proc(void) +{ +} +#endif + +int show_interrupts(struct seq_file *p, void *v); + #endif