* These tables map IA-64 vectors to the IOSAPIC pin that generates this
* vector.
*/
+
+#define NO_REF_RTE 0
+
static struct iosapic {
char __iomem *addr; /* base address of IOSAPIC */
unsigned int gsi_base; /* GSI base */
static struct iosapic_intr_info {
struct list_head rtes; /* RTEs using this vector (empty =>
* not an IOSAPIC interrupt) */
- int count; /* # of RTEs that shares this vector */
+ int count; /* # of registered RTEs */
u32 low32; /* current value of low word of
* Redirection table entry */
unsigned int dest; /* destination CPU physical ID */
gsi_to_vector (unsigned int gsi)
{
int irq = __gsi_to_irq(gsi);
- if (irq < 0)
+ if (check_irq_used(irq) < 0)
return -1;
return irq_to_vector(irq);
}
int rte_index;
struct iosapic_rte_info *rte;
- if (list_empty(&iosapic_intr_info[irq].rtes))
+ if (!iosapic_intr_info[irq].count)
return; /* not an IOSAPIC interrupt! */
/* set only the mask bit */
int rte_index;
struct iosapic_rte_info *rte;
- if (list_empty(&iosapic_intr_info[irq].rtes))
+ if (!iosapic_intr_info[irq].count)
return; /* not an IOSAPIC interrupt! */
low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
irq &= (~IA64_IRQ_REDIRECTED);
+ cpus_and(mask, mask, cpu_online_map);
if (cpus_empty(mask))
return;
+ if (reassign_irq_vector(irq, first_cpu(mask)))
+ return;
+
dest = cpu_physical_id(first_cpu(mask));
- if (list_empty(&iosapic_intr_info[irq].rtes))
+ if (!iosapic_intr_info[irq].count)
return; /* not an IOSAPIC interrupt */
set_irq_affinity_info(irq, dest, redir);
else
/* change delivery mode to fixed */
low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
+ low32 &= IOSAPIC_VECTOR_MASK;
+ low32 |= irq_to_vector(irq);
iosapic_intr_info[irq].low32 = low32;
iosapic_intr_info[irq].dest = dest;
{
ia64_vector vec = irq_to_vector(irq);
struct iosapic_rte_info *rte;
+ int do_unmask_irq = 0;
+
+ if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
+ do_unmask_irq = 1;
+ mask_irq(irq);
+ }
- move_native_irq(irq);
list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
iosapic_eoi(rte->iosapic->addr, vec);
+
+ if (unlikely(do_unmask_irq)) {
+ move_masked_irq(irq);
+ unmask_irq(irq);
+ }
}
#define iosapic_shutdown_level_irq mask_irq
for (i = 0; i <= NR_IRQS; i++) {
info = &iosapic_intr_info[i];
if (info->trigger == trigger && info->polarity == pol &&
- (info->dmode == IOSAPIC_FIXED || info->dmode ==
- IOSAPIC_LOWEST_PRIORITY)) {
+ (info->dmode == IOSAPIC_FIXED ||
+ info->dmode == IOSAPIC_LOWEST_PRIORITY) &&
+ can_request_irq(i, IRQF_SHARED)) {
if (min_count == -1 || info->count < min_count) {
irq = i;
min_count = info->count;
{
int new_irq;
- if (!list_empty(&iosapic_intr_info[irq].rtes)) {
+ if (iosapic_intr_info[irq].count) {
new_irq = create_irq();
if (new_irq < 0)
panic("%s: out of interrupt vectors!\n", __FUNCTION__);
}
}
-static struct iosapic_rte_info *iosapic_alloc_rte (void)
+static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
{
int i;
struct iosapic_rte_info *rte;
return rte;
}
-static void iosapic_free_rte (struct iosapic_rte_info *rte)
-{
- if (rte->flags & RTE_PREALLOCATED)
- list_add_tail(&rte->rte_list, &free_rte_list);
- else
- kfree(rte);
-}
-
static inline int irq_is_shared (int irq)
{
return (iosapic_intr_info[irq].count > 1);
iosapic_intr_info[irq].count++;
iosapic_lists[index].rtes_inuse++;
}
- else if (irq_is_shared(irq)) {
+ else if (rte->refcnt == NO_REF_RTE) {
struct iosapic_intr_info *info = &iosapic_intr_info[irq];
- if (info->trigger != trigger || info->polarity != polarity) {
+ if (info->count > 0 &&
+ (info->trigger != trigger || info->polarity != polarity)){
printk (KERN_WARNING
"%s: cannot override the interrupt\n",
__FUNCTION__);
return -EINVAL;
}
+ rte->refcnt++;
+ iosapic_intr_info[irq].count++;
+ iosapic_lists[index].rtes_inuse++;
}
iosapic_intr_info[irq].polarity = polarity;
#ifdef CONFIG_SMP
static int cpu = -1;
extern int cpe_vector;
+ cpumask_t domain = irq_to_domain(irq);
/*
* In case of vector shared by multiple RTEs, all RTEs that
* share the vector need to use the same destination CPU.
*/
- if (!list_empty(&iosapic_intr_info[irq].rtes))
+ if (iosapic_intr_info[irq].count)
return iosapic_intr_info[irq].dest;
/*
goto skip_numa_setup;
cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
-
+ cpus_and(cpu_mask, cpu_mask, domain);
for_each_cpu_mask(numa_cpu, cpu_mask) {
if (!cpu_online(numa_cpu))
cpu_clear(numa_cpu, cpu_mask);
do {
if (++cpu >= NR_CPUS)
cpu = 0;
- } while (!cpu_online(cpu));
+ } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
return cpu_physical_id(cpu);
#else /* CONFIG_SMP */
irq = __gsi_to_irq(gsi);
if (irq > 0) {
rte = find_rte(irq, gsi);
- rte->refcnt++;
- goto unlock_iosapic_lock;
- }
+ if(iosapic_intr_info[irq].count == 0) {
+ assign_irq_vector(irq);
+ dynamic_irq_init(irq);
+ } else if (rte->refcnt != NO_REF_RTE) {
+ rte->refcnt++;
+ goto unlock_iosapic_lock;
+ }
+ } else
+ irq = create_irq();
/* If vector is running out, we try to find a sharable vector */
- irq = create_irq();
if (irq < 0) {
irq = iosapic_find_sharable_irq(trigger, polarity);
if (irq < 0)
err = register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY,
polarity, trigger);
if (err < 0) {
+ spin_unlock(&irq_desc[irq].lock);
irq = err;
- goto unlock_all;
+ goto unlock_iosapic_lock;
}
/*
gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
cpu_logical_id(dest), dest, irq_to_vector(irq));
- unlock_all:
+
spin_unlock(&irq_desc[irq].lock);
unlock_iosapic_lock:
spin_unlock_irqrestore(&iosapic_lock, flags);
if (--rte->refcnt > 0)
goto out;
- /* Remove the rte entry from the list */
idesc = irq_desc + irq;
- spin_lock(&idesc->lock);
- list_del(&rte->rte_list);
- spin_unlock(&idesc->lock);
+ rte->refcnt = NO_REF_RTE;
/* Mask the interrupt */
low32 = iosapic_intr_info[irq].low32 | IOSAPIC_MASK;
iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), low32);
iosapic_intr_info[irq].count--;
- iosapic_free_rte(rte);
index = find_iosapic(gsi);
iosapic_lists[index].rtes_inuse--;
WARN_ON(iosapic_lists[index].rtes_inuse < 0);
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
cpu_logical_id(dest), dest, irq_to_vector(irq));
- if (list_empty(&iosapic_intr_info[irq].rtes)) {
- /* Sanity check */
- BUG_ON(iosapic_intr_info[irq].count);
+ if (iosapic_intr_info[irq].count == 0) {
#ifdef CONFIG_SMP
/* Clear affinity */
cpus_setall(idesc->affinity);
#endif
/* Clear the interrupt information */
- memset(&iosapic_intr_info[irq], 0,
- sizeof(struct iosapic_intr_info));
+ iosapic_intr_info[irq].dest = 0;
+ iosapic_intr_info[irq].dmode = 0;
+ iosapic_intr_info[irq].polarity = 0;
+ iosapic_intr_info[irq].trigger = 0;
iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
- INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
- /* Destroy IRQ */
- destroy_irq(irq);
+ /* Destroy and reserve IRQ */
+ destroy_and_reserve_irq(irq);
}
out:
spin_unlock_irqrestore(&iosapic_lock, flags);
switch (int_type) {
case ACPI_INTERRUPT_PMI:
- vector = iosapic_vector;
- irq = vector; /* FIXME */
+ irq = vector = iosapic_vector;
+ bind_irq_vector(irq, vector, CPU_MASK_ALL);
/*
* since PMI vector is alloc'd by FW(ACPI) not by kernel,
* we need to make sure the vector is available
delivery = IOSAPIC_INIT;
break;
case ACPI_INTERRUPT_CPEI:
- vector = IA64_CPE_VECTOR;
- irq = vector; /* FIXME */
+ irq = vector = IA64_CPE_VECTOR;
+ BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
delivery = IOSAPIC_LOWEST_PRIORITY;
mask = 1;
break;
int vector, irq;
unsigned int dest = cpu_physical_id(smp_processor_id());
- vector = isa_irq_to_vector(isa_irq);
- irq = vector; /* FIXME */
+ irq = vector = isa_irq_to_vector(isa_irq);
+ BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY, polarity, trigger);
DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
/* mark as unused */
INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
+
+ iosapic_intr_info[irq].count = 0;
}
pcat_compat = system_pcat_compat;