3 * Purpose: PCI Message Signaled Interrupt (MSI)
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
10 #include <linux/irq.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/smp_lock.h>
15 #include <linux/pci.h>
16 #include <linux/proc_fs.h>
18 #include <asm/errno.h>
25 static DEFINE_SPINLOCK(msi_lock);
26 static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
27 static kmem_cache_t* msi_cachep;
29 static int pci_msi_enable = 1;
30 static int last_alloc_vector;
31 static int nr_released_vectors;
32 static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS;
33 static int nr_msix_devices;
35 #ifndef CONFIG_X86_IO_APIC
36 int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
39 static struct msi_ops *msi_ops;
42 msi_register(struct msi_ops *ops)
48 static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags)
50 memset(p, 0, sizeof(struct msi_desc));
53 static int msi_cache_init(void)
55 msi_cachep = kmem_cache_create("msi_cache",
56 sizeof(struct msi_desc),
57 0, SLAB_HWCACHE_ALIGN, msi_cache_ctor, NULL);
64 static void msi_set_mask_bit(unsigned int vector, int flag)
66 struct msi_desc *entry;
68 entry = (struct msi_desc *)msi_desc[vector];
69 if (!entry || !entry->dev || !entry->mask_base)
71 switch (entry->msi_attrib.type) {
77 pos = (long)entry->mask_base;
78 pci_read_config_dword(entry->dev, pos, &mask_bits);
81 pci_write_config_dword(entry->dev, pos, mask_bits);
86 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
87 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
88 writel(flag, entry->mask_base + offset);
97 static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
99 struct msi_desc *entry;
100 u32 address_hi, address_lo;
101 unsigned int irq = vector;
102 unsigned int dest_cpu = first_cpu(cpu_mask);
104 entry = (struct msi_desc *)msi_desc[vector];
105 if (!entry || !entry->dev)
108 switch (entry->msi_attrib.type) {
111 int pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI);
116 pci_read_config_dword(entry->dev, msi_upper_address_reg(pos),
118 pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
121 msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
123 pci_write_config_dword(entry->dev, msi_upper_address_reg(pos),
125 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
127 set_native_irq_info(irq, cpu_mask);
130 case PCI_CAP_ID_MSIX:
133 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
134 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET;
136 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
137 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
139 address_hi = readl(entry->mask_base + offset_hi);
140 address_lo = readl(entry->mask_base + offset_lo);
142 msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
144 writel(address_hi, entry->mask_base + offset_hi);
145 writel(address_lo, entry->mask_base + offset_lo);
146 set_native_irq_info(irq, cpu_mask);
154 #define set_msi_affinity NULL
155 #endif /* CONFIG_SMP */
157 static void mask_MSI_irq(unsigned int vector)
159 msi_set_mask_bit(vector, 1);
162 static void unmask_MSI_irq(unsigned int vector)
164 msi_set_mask_bit(vector, 0);
167 static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
169 struct msi_desc *entry;
172 spin_lock_irqsave(&msi_lock, flags);
173 entry = msi_desc[vector];
174 if (!entry || !entry->dev) {
175 spin_unlock_irqrestore(&msi_lock, flags);
178 entry->msi_attrib.state = 1; /* Mark it active */
179 spin_unlock_irqrestore(&msi_lock, flags);
181 return 0; /* never anything pending */
184 static unsigned int startup_msi_irq_w_maskbit(unsigned int vector)
186 startup_msi_irq_wo_maskbit(vector);
187 unmask_MSI_irq(vector);
188 return 0; /* never anything pending */
191 static void shutdown_msi_irq(unsigned int vector)
193 struct msi_desc *entry;
196 spin_lock_irqsave(&msi_lock, flags);
197 entry = msi_desc[vector];
198 if (entry && entry->dev)
199 entry->msi_attrib.state = 0; /* Mark it not active */
200 spin_unlock_irqrestore(&msi_lock, flags);
203 static void end_msi_irq_wo_maskbit(unsigned int vector)
205 move_native_irq(vector);
209 static void end_msi_irq_w_maskbit(unsigned int vector)
211 move_native_irq(vector);
212 unmask_MSI_irq(vector);
216 static void do_nothing(unsigned int vector)
221 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices,
222 * which implement the MSI-X Capability Structure.
224 static struct hw_interrupt_type msix_irq_type = {
225 .typename = "PCI-MSI-X",
226 .startup = startup_msi_irq_w_maskbit,
227 .shutdown = shutdown_msi_irq,
228 .enable = unmask_MSI_irq,
229 .disable = mask_MSI_irq,
231 .end = end_msi_irq_w_maskbit,
232 .set_affinity = set_msi_affinity
236 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
237 * which implement the MSI Capability Structure with
238 * Mask-and-Pending Bits.
240 static struct hw_interrupt_type msi_irq_w_maskbit_type = {
241 .typename = "PCI-MSI",
242 .startup = startup_msi_irq_w_maskbit,
243 .shutdown = shutdown_msi_irq,
244 .enable = unmask_MSI_irq,
245 .disable = mask_MSI_irq,
247 .end = end_msi_irq_w_maskbit,
248 .set_affinity = set_msi_affinity
252 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
253 * which implement the MSI Capability Structure without
254 * Mask-and-Pending Bits.
256 static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
257 .typename = "PCI-MSI",
258 .startup = startup_msi_irq_wo_maskbit,
259 .shutdown = shutdown_msi_irq,
260 .enable = do_nothing,
261 .disable = do_nothing,
263 .end = end_msi_irq_wo_maskbit,
264 .set_affinity = set_msi_affinity
267 static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
268 static int assign_msi_vector(void)
270 static int new_vector_avail = 1;
275 * msi_lock is provided to ensure that successful allocation of MSI
276 * vector is assigned unique among drivers.
278 spin_lock_irqsave(&msi_lock, flags);
280 if (!new_vector_avail) {
284 * vector_irq[] = -1 indicates that this specific vector is:
285 * - assigned for MSI (since MSI have no associated IRQ) or
286 * - assigned for legacy if less than 16, or
287 * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping
288 * vector_irq[] = 0 indicates that this vector, previously
289 * assigned for MSI, is freed by hotplug removed operations.
290 * This vector will be reused for any subsequent hotplug added
292 * vector_irq[] > 0 indicates that this vector is assigned for
293 * IOxAPIC IRQs. This vector and its value provides a 1-to-1
294 * vector-to-IOxAPIC IRQ mapping.
296 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
297 if (vector_irq[vector] != 0)
299 free_vector = vector;
300 if (!msi_desc[vector])
306 spin_unlock_irqrestore(&msi_lock, flags);
309 vector_irq[free_vector] = -1;
310 nr_released_vectors--;
311 spin_unlock_irqrestore(&msi_lock, flags);
312 if (msi_desc[free_vector] != NULL) {
316 /* free all linked vectors before re-assign */
318 spin_lock_irqsave(&msi_lock, flags);
319 dev = msi_desc[free_vector]->dev;
320 tail = msi_desc[free_vector]->link.tail;
321 spin_unlock_irqrestore(&msi_lock, flags);
322 msi_free_vector(dev, tail, 1);
323 } while (free_vector != tail);
328 vector = assign_irq_vector(AUTO_ASSIGN);
329 last_alloc_vector = vector;
330 if (vector == LAST_DEVICE_VECTOR)
331 new_vector_avail = 0;
333 spin_unlock_irqrestore(&msi_lock, flags);
337 static int get_new_vector(void)
339 int vector = assign_msi_vector();
342 set_intr_gate(vector, interrupt[vector]);
347 static int msi_init(void)
349 static int status = -ENOMEM;
356 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
361 status = msi_arch_init();
365 "PCI: MSI arch init failed. MSI disabled.\n");
371 "PCI: MSI ops not registered. MSI disabled.\n");
376 last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
377 status = msi_cache_init();
380 printk(KERN_WARNING "PCI: MSI cache init failed\n");
384 if (last_alloc_vector < 0) {
386 printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n");
390 vector_irq[last_alloc_vector] = 0;
391 nr_released_vectors++;
396 static int get_msi_vector(struct pci_dev *dev)
398 return get_new_vector();
401 static struct msi_desc* alloc_msi_entry(void)
403 struct msi_desc *entry;
405 entry = kmem_cache_alloc(msi_cachep, SLAB_KERNEL);
409 memset(entry, 0, sizeof(struct msi_desc));
410 entry->link.tail = entry->link.head = 0; /* single message */
416 static void attach_msi_entry(struct msi_desc *entry, int vector)
420 spin_lock_irqsave(&msi_lock, flags);
421 msi_desc[vector] = entry;
422 spin_unlock_irqrestore(&msi_lock, flags);
425 static void irq_handler_init(int cap_id, int pos, int mask)
429 spin_lock_irqsave(&irq_desc[pos].lock, flags);
430 if (cap_id == PCI_CAP_ID_MSIX)
431 irq_desc[pos].chip = &msix_irq_type;
434 irq_desc[pos].chip = &msi_irq_wo_maskbit_type;
436 irq_desc[pos].chip = &msi_irq_w_maskbit_type;
438 spin_unlock_irqrestore(&irq_desc[pos].lock, flags);
441 static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
445 pci_read_config_word(dev, msi_control_reg(pos), &control);
446 if (type == PCI_CAP_ID_MSI) {
447 /* Set enabled bits to single MSI & enable MSI_enable bit */
448 msi_enable(control, 1);
449 pci_write_config_word(dev, msi_control_reg(pos), control);
450 dev->msi_enabled = 1;
452 msix_enable(control);
453 pci_write_config_word(dev, msi_control_reg(pos), control);
454 dev->msix_enabled = 1;
456 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
457 /* PCI Express Endpoint device detected */
458 pci_intx(dev, 0); /* disable intx */
462 void disable_msi_mode(struct pci_dev *dev, int pos, int type)
466 pci_read_config_word(dev, msi_control_reg(pos), &control);
467 if (type == PCI_CAP_ID_MSI) {
468 /* Set enabled bits to single MSI & enable MSI_enable bit */
469 msi_disable(control);
470 pci_write_config_word(dev, msi_control_reg(pos), control);
471 dev->msi_enabled = 0;
473 msix_disable(control);
474 pci_write_config_word(dev, msi_control_reg(pos), control);
475 dev->msix_enabled = 0;
477 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
478 /* PCI Express Endpoint device detected */
479 pci_intx(dev, 1); /* enable intx */
483 static int msi_lookup_vector(struct pci_dev *dev, int type)
488 spin_lock_irqsave(&msi_lock, flags);
489 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
490 if (!msi_desc[vector] || msi_desc[vector]->dev != dev ||
491 msi_desc[vector]->msi_attrib.type != type ||
492 msi_desc[vector]->msi_attrib.default_vector != dev->irq)
494 spin_unlock_irqrestore(&msi_lock, flags);
495 /* This pre-assigned MSI vector for this device
496 already exits. Override dev->irq with this vector */
500 spin_unlock_irqrestore(&msi_lock, flags);
505 void pci_scan_msi_device(struct pci_dev *dev)
510 if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0)
512 else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
513 nr_reserved_vectors++;
517 int pci_save_msi_state(struct pci_dev *dev)
521 struct pci_cap_saved_state *save_state;
524 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
525 if (pos <= 0 || dev->no_msi)
528 pci_read_config_word(dev, msi_control_reg(pos), &control);
529 if (!(control & PCI_MSI_FLAGS_ENABLE))
532 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
535 printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
538 cap = &save_state->data[0];
540 pci_read_config_dword(dev, pos, &cap[i++]);
541 control = cap[0] >> 16;
542 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
543 if (control & PCI_MSI_FLAGS_64BIT) {
544 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
545 pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
547 pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
548 if (control & PCI_MSI_FLAGS_MASKBIT)
549 pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
550 save_state->cap_nr = PCI_CAP_ID_MSI;
551 pci_add_saved_cap(dev, save_state);
555 void pci_restore_msi_state(struct pci_dev *dev)
559 struct pci_cap_saved_state *save_state;
562 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
563 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
564 if (!save_state || pos <= 0)
566 cap = &save_state->data[0];
568 control = cap[i++] >> 16;
569 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
570 if (control & PCI_MSI_FLAGS_64BIT) {
571 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
572 pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
574 pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
575 if (control & PCI_MSI_FLAGS_MASKBIT)
576 pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
577 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
578 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
579 pci_remove_saved_cap(save_state);
583 int pci_save_msix_state(struct pci_dev *dev)
587 int vector, head, tail = 0;
589 struct pci_cap_saved_state *save_state;
591 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
592 if (pos <= 0 || dev->no_msi)
595 /* save the capability */
596 pci_read_config_word(dev, msi_control_reg(pos), &control);
597 if (!(control & PCI_MSIX_FLAGS_ENABLE))
599 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
602 printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
605 *((u16 *)&save_state->data[0]) = control;
609 if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
614 vector = head = dev->irq;
615 while (head != tail) {
618 struct msi_desc *entry;
620 entry = msi_desc[vector];
621 base = entry->mask_base;
622 j = entry->msi_attrib.entry_nr;
624 entry->address_lo_save =
625 readl(base + j * PCI_MSIX_ENTRY_SIZE +
626 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
627 entry->address_hi_save =
628 readl(base + j * PCI_MSIX_ENTRY_SIZE +
629 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
631 readl(base + j * PCI_MSIX_ENTRY_SIZE +
632 PCI_MSIX_ENTRY_DATA_OFFSET);
634 tail = msi_desc[vector]->link.tail;
639 save_state->cap_nr = PCI_CAP_ID_MSIX;
640 pci_add_saved_cap(dev, save_state);
644 void pci_restore_msix_state(struct pci_dev *dev)
648 int vector, head, tail = 0;
651 struct msi_desc *entry;
653 struct pci_cap_saved_state *save_state;
655 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
658 save = *((u16 *)&save_state->data[0]);
659 pci_remove_saved_cap(save_state);
662 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
666 /* route the table */
668 if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX))
670 vector = head = dev->irq;
671 while (head != tail) {
672 entry = msi_desc[vector];
673 base = entry->mask_base;
674 j = entry->msi_attrib.entry_nr;
676 writel(entry->address_lo_save,
677 base + j * PCI_MSIX_ENTRY_SIZE +
678 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
679 writel(entry->address_hi_save,
680 base + j * PCI_MSIX_ENTRY_SIZE +
681 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
682 writel(entry->data_save,
683 base + j * PCI_MSIX_ENTRY_SIZE +
684 PCI_MSIX_ENTRY_DATA_OFFSET);
686 tail = msi_desc[vector]->link.tail;
691 pci_write_config_word(dev, msi_control_reg(pos), save);
692 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
696 static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
702 int pos, vector = dev->irq;
705 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
706 pci_read_config_word(dev, msi_control_reg(pos), &control);
708 /* Configure MSI capability structure */
709 status = msi_ops->setup(dev, vector, &address_hi, &address_lo, &data);
713 pci_write_config_dword(dev, msi_lower_address_reg(pos), address_lo);
714 if (is_64bit_address(control)) {
715 pci_write_config_dword(dev,
716 msi_upper_address_reg(pos), address_hi);
717 pci_write_config_word(dev,
718 msi_data_reg(pos, 1), data);
720 pci_write_config_word(dev,
721 msi_data_reg(pos, 0), data);
722 if (entry->msi_attrib.maskbit) {
723 unsigned int maskbits, temp;
724 /* All MSIs are unmasked by default, Mask them all */
725 pci_read_config_dword(dev,
726 msi_mask_bits_reg(pos, is_64bit_address(control)),
728 temp = (1 << multi_msi_capable(control));
729 temp = ((temp - 1) & ~temp);
731 pci_write_config_dword(dev,
732 msi_mask_bits_reg(pos, is_64bit_address(control)),
740 * msi_capability_init - configure device's MSI capability structure
741 * @dev: pointer to the pci_dev data structure of MSI device function
743 * Setup the MSI capability structure of device function with a single
744 * MSI vector, regardless of device function is capable of handling
745 * multiple messages. A return of zero indicates the successful setup
746 * of an entry zero with the new MSI vector or non-zero for otherwise.
748 static int msi_capability_init(struct pci_dev *dev)
751 struct msi_desc *entry;
755 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
756 pci_read_config_word(dev, msi_control_reg(pos), &control);
757 /* MSI Entry Initialization */
758 entry = alloc_msi_entry();
762 vector = get_msi_vector(dev);
764 kmem_cache_free(msi_cachep, entry);
767 entry->link.head = vector;
768 entry->link.tail = vector;
769 entry->msi_attrib.type = PCI_CAP_ID_MSI;
770 entry->msi_attrib.state = 0; /* Mark it not active */
771 entry->msi_attrib.entry_nr = 0;
772 entry->msi_attrib.maskbit = is_mask_bit_support(control);
773 entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */
776 if (is_mask_bit_support(control)) {
777 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
778 is_64bit_address(control));
780 /* Replace with MSI handler */
781 irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
782 /* Configure MSI capability structure */
783 status = msi_register_init(dev, entry);
785 dev->irq = entry->msi_attrib.default_vector;
786 kmem_cache_free(msi_cachep, entry);
790 attach_msi_entry(entry, vector);
791 /* Set MSI enabled bits */
792 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
798 * msix_capability_init - configure device's MSI-X capability
799 * @dev: pointer to the pci_dev data structure of MSI-X device function
800 * @entries: pointer to an array of struct msix_entry entries
801 * @nvec: number of @entries
803 * Setup the MSI-X capability structure of device function with a
804 * single MSI-X vector. A return of zero indicates the successful setup of
805 * requested MSI-X entries with allocated vectors or non-zero for otherwise.
807 static int msix_capability_init(struct pci_dev *dev,
808 struct msix_entry *entries, int nvec)
810 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
815 int vector, pos, i, j, nr_entries, temp = 0;
816 unsigned long phys_addr;
822 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
823 /* Request & Map MSI-X table region */
824 pci_read_config_word(dev, msi_control_reg(pos), &control);
825 nr_entries = multi_msix_capable(control);
827 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
828 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
829 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
830 phys_addr = pci_resource_start (dev, bir) + table_offset;
831 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
835 /* MSI-X Table Initialization */
836 for (i = 0; i < nvec; i++) {
837 entry = alloc_msi_entry();
840 vector = get_msi_vector(dev);
842 kmem_cache_free(msi_cachep, entry);
846 j = entries[i].entry;
847 entries[i].vector = vector;
848 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
849 entry->msi_attrib.state = 0; /* Mark it not active */
850 entry->msi_attrib.entry_nr = j;
851 entry->msi_attrib.maskbit = 1;
852 entry->msi_attrib.default_vector = dev->irq;
854 entry->mask_base = base;
856 entry->link.head = vector;
857 entry->link.tail = vector;
860 entry->link.head = temp;
861 entry->link.tail = tail->link.tail;
862 tail->link.tail = vector;
863 head->link.head = vector;
867 /* Replace with MSI-X handler */
868 irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
869 /* Configure MSI-X capability structure */
870 status = msi_ops->setup(dev, vector,
878 base + j * PCI_MSIX_ENTRY_SIZE +
879 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
881 base + j * PCI_MSIX_ENTRY_SIZE +
882 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
884 base + j * PCI_MSIX_ENTRY_SIZE +
885 PCI_MSIX_ENTRY_DATA_OFFSET);
886 attach_msi_entry(entry, vector);
890 for (; i >= 0; i--) {
891 vector = (entries + i)->vector;
892 msi_free_vector(dev, vector, 0);
893 (entries + i)->vector = 0;
897 /* Set MSI-X enabled bits */
898 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
904 * pci_msi_supported - check whether MSI may be enabled on device
905 * @dev: pointer to the pci_dev data structure of MSI device function
907 * MSI must be globally enabled and supported by the device and its root
908 * bus. But, the root bus is not easy to find since some architectures
909 * have virtual busses on top of the PCI hierarchy (for instance the
910 * hypertransport bus), while the actual bus where MSI must be supported
911 * is below. So we test the MSI flag on all parent busses and assume
912 * that no quirk will ever set the NO_MSI flag on a non-root bus.
915 int pci_msi_supported(struct pci_dev * dev)
919 if (!pci_msi_enable || !dev || dev->no_msi)
922 /* check MSI flags of all parent busses */
923 for (bus = dev->bus; bus; bus = bus->parent)
924 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
931 * pci_enable_msi - configure device's MSI capability structure
932 * @dev: pointer to the pci_dev data structure of MSI device function
934 * Setup the MSI capability structure of device function with
935 * a single MSI vector upon its software driver call to request for
936 * MSI mode enabled on its hardware device function. A return of zero
937 * indicates the successful setup of an entry zero with the new MSI
938 * vector or non-zero for otherwise.
940 int pci_enable_msi(struct pci_dev* dev)
942 int pos, temp, status;
945 if (pci_msi_supported(dev) < 0)
954 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
958 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
962 pci_read_config_word(dev, msi_control_reg(pos), &control);
963 if (control & PCI_MSI_FLAGS_ENABLE)
964 return 0; /* Already in MSI mode */
965 spin_lock_irqsave(&msi_lock, flags);
966 if (!vector_irq[dev->irq]) {
967 msi_desc[dev->irq]->msi_attrib.state = 0;
968 vector_irq[dev->irq] = -1;
969 nr_released_vectors--;
970 spin_unlock_irqrestore(&msi_lock, flags);
971 status = msi_register_init(dev, msi_desc[dev->irq]);
973 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
976 spin_unlock_irqrestore(&msi_lock, flags);
979 /* Check whether driver already requested for MSI-X vectors */
980 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
981 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
982 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
983 "Device already has MSI-X vectors assigned\n",
988 status = msi_capability_init(dev);
991 nr_reserved_vectors--; /* Only MSI capable */
992 else if (nr_msix_devices > 0)
993 nr_msix_devices--; /* Both MSI and MSI-X capable,
994 but choose enabling MSI */
1000 void pci_disable_msi(struct pci_dev* dev)
1002 struct msi_desc *entry;
1003 int pos, default_vector;
1005 unsigned long flags;
1007 if (!pci_msi_enable)
1012 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1016 pci_read_config_word(dev, msi_control_reg(pos), &control);
1017 if (!(control & PCI_MSI_FLAGS_ENABLE))
1020 spin_lock_irqsave(&msi_lock, flags);
1021 entry = msi_desc[dev->irq];
1022 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
1023 spin_unlock_irqrestore(&msi_lock, flags);
1026 if (entry->msi_attrib.state) {
1027 spin_unlock_irqrestore(&msi_lock, flags);
1028 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
1029 "free_irq() on MSI vector %d\n",
1030 pci_name(dev), dev->irq);
1031 BUG_ON(entry->msi_attrib.state > 0);
1033 vector_irq[dev->irq] = 0; /* free it */
1034 nr_released_vectors++;
1035 default_vector = entry->msi_attrib.default_vector;
1036 spin_unlock_irqrestore(&msi_lock, flags);
1037 /* Restore dev->irq to its default pin-assertion vector */
1038 dev->irq = default_vector;
1039 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
1044 static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
1046 struct msi_desc *entry;
1047 int head, entry_nr, type;
1049 unsigned long flags;
1051 msi_ops->teardown(vector);
1053 spin_lock_irqsave(&msi_lock, flags);
1054 entry = msi_desc[vector];
1055 if (!entry || entry->dev != dev) {
1056 spin_unlock_irqrestore(&msi_lock, flags);
1059 type = entry->msi_attrib.type;
1060 entry_nr = entry->msi_attrib.entry_nr;
1061 head = entry->link.head;
1062 base = entry->mask_base;
1063 msi_desc[entry->link.head]->link.tail = entry->link.tail;
1064 msi_desc[entry->link.tail]->link.head = entry->link.head;
1067 vector_irq[vector] = 0;
1068 nr_released_vectors++;
1070 msi_desc[vector] = NULL;
1071 spin_unlock_irqrestore(&msi_lock, flags);
1073 kmem_cache_free(msi_cachep, entry);
1075 if (type == PCI_CAP_ID_MSIX) {
1078 entry_nr * PCI_MSIX_ENTRY_SIZE +
1079 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
1088 static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec)
1090 int vector = head, tail = 0;
1091 int i, j = 0, nr_entries = 0;
1093 unsigned long flags;
1095 spin_lock_irqsave(&msi_lock, flags);
1096 while (head != tail) {
1098 tail = msi_desc[vector]->link.tail;
1099 if (entries[0].entry == msi_desc[vector]->msi_attrib.entry_nr)
1103 if (*nvec > nr_entries) {
1104 spin_unlock_irqrestore(&msi_lock, flags);
1108 vector = ((j > 0) ? j : head);
1109 for (i = 0; i < *nvec; i++) {
1110 j = msi_desc[vector]->msi_attrib.entry_nr;
1111 msi_desc[vector]->msi_attrib.state = 0; /* Mark it not active */
1112 vector_irq[vector] = -1; /* Mark it busy */
1113 nr_released_vectors--;
1114 entries[i].vector = vector;
1115 if (j != (entries + i)->entry) {
1116 base = msi_desc[vector]->mask_base;
1117 msi_desc[vector]->msi_attrib.entry_nr =
1118 (entries + i)->entry;
1119 writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
1120 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET), base +
1121 (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
1122 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
1123 writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
1124 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET), base +
1125 (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
1126 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
1127 writel( (readl(base + j * PCI_MSIX_ENTRY_SIZE +
1128 PCI_MSIX_ENTRY_DATA_OFFSET) & 0xff00) | vector,
1129 base + (entries+i)->entry*PCI_MSIX_ENTRY_SIZE +
1130 PCI_MSIX_ENTRY_DATA_OFFSET);
1132 vector = msi_desc[vector]->link.tail;
1134 spin_unlock_irqrestore(&msi_lock, flags);
1140 * pci_enable_msix - configure device's MSI-X capability structure
1141 * @dev: pointer to the pci_dev data structure of MSI-X device function
1142 * @entries: pointer to an array of MSI-X entries
1143 * @nvec: number of MSI-X vectors requested for allocation by device driver
1145 * Setup the MSI-X capability structure of device function with the number
1146 * of requested vectors upon its software driver call to request for
1147 * MSI-X mode enabled on its hardware device function. A return of zero
1148 * indicates the successful configuration of MSI-X capability structure
1149 * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
1150 * Or a return of > 0 indicates that driver request is exceeding the number
1151 * of vectors available. Driver should use the returned value to re-send
1154 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
1156 int status, pos, nr_entries, free_vectors;
1159 unsigned long flags;
1161 if (!entries || pci_msi_supported(dev) < 0)
1164 status = msi_init();
1168 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1172 pci_read_config_word(dev, msi_control_reg(pos), &control);
1173 if (control & PCI_MSIX_FLAGS_ENABLE)
1174 return -EINVAL; /* Already in MSI-X mode */
1176 nr_entries = multi_msix_capable(control);
1177 if (nvec > nr_entries)
1180 /* Check for any invalid entries */
1181 for (i = 0; i < nvec; i++) {
1182 if (entries[i].entry >= nr_entries)
1183 return -EINVAL; /* invalid entry */
1184 for (j = i + 1; j < nvec; j++) {
1185 if (entries[i].entry == entries[j].entry)
1186 return -EINVAL; /* duplicate entry */
1190 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1193 /* Reroute MSI-X table */
1194 if (reroute_msix_table(dev->irq, entries, &nr_entries)) {
1195 /* #requested > #previous-assigned */
1200 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
1203 /* Check whether driver already requested for MSI vector */
1204 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
1205 !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1206 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
1207 "Device already has an MSI vector assigned\n",
1213 spin_lock_irqsave(&msi_lock, flags);
1215 * msi_lock is provided to ensure that enough vectors resources are
1216 * available before granting.
1218 free_vectors = pci_vector_resources(last_alloc_vector,
1219 nr_released_vectors);
1220 /* Ensure that each MSI/MSI-X device has one vector reserved by
1221 default to avoid any MSI-X driver to take all available
1223 free_vectors -= nr_reserved_vectors;
1224 /* Find the average of free vectors among MSI-X devices */
1225 if (nr_msix_devices > 0)
1226 free_vectors /= nr_msix_devices;
1227 spin_unlock_irqrestore(&msi_lock, flags);
1229 if (nvec > free_vectors) {
1230 if (free_vectors > 0)
1231 return free_vectors;
1236 status = msix_capability_init(dev, entries, nvec);
1237 if (!status && nr_msix_devices > 0)
1243 void pci_disable_msix(struct pci_dev* dev)
1248 if (!pci_msi_enable)
1253 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1257 pci_read_config_word(dev, msi_control_reg(pos), &control);
1258 if (!(control & PCI_MSIX_FLAGS_ENABLE))
1262 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1263 int state, vector, head, tail = 0, warning = 0;
1264 unsigned long flags;
1266 vector = head = dev->irq;
1267 spin_lock_irqsave(&msi_lock, flags);
1268 while (head != tail) {
1269 state = msi_desc[vector]->msi_attrib.state;
1273 vector_irq[vector] = 0; /* free it */
1274 nr_released_vectors++;
1276 tail = msi_desc[vector]->link.tail;
1279 spin_unlock_irqrestore(&msi_lock, flags);
1282 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
1283 "free_irq() on all MSI-X vectors\n",
1285 BUG_ON(warning > 0);
1288 disable_msi_mode(dev,
1289 pci_find_capability(dev, PCI_CAP_ID_MSIX),
1297 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
1298 * @dev: pointer to the pci_dev data structure of MSI(X) device function
1300 * Being called during hotplug remove, from which the device function
1301 * is hot-removed. All previous assigned MSI/MSI-X vectors, if
1302 * allocated for this device function, are reclaimed to unused state,
1303 * which may be used later on.
1305 void msi_remove_pci_irq_vectors(struct pci_dev* dev)
1307 int state, pos, temp;
1308 unsigned long flags;
1310 if (!pci_msi_enable || !dev)
1313 temp = dev->irq; /* Save IOAPIC IRQ */
1314 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1315 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1316 spin_lock_irqsave(&msi_lock, flags);
1317 state = msi_desc[dev->irq]->msi_attrib.state;
1318 spin_unlock_irqrestore(&msi_lock, flags);
1320 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1321 "called without free_irq() on MSI vector %d\n",
1322 pci_name(dev), dev->irq);
1324 } else /* Release MSI vector assigned to this device */
1325 msi_free_vector(dev, dev->irq, 0);
1326 dev->irq = temp; /* Restore IOAPIC IRQ */
1328 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1329 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1330 int vector, head, tail = 0, warning = 0;
1331 void __iomem *base = NULL;
1333 vector = head = dev->irq;
1334 while (head != tail) {
1335 spin_lock_irqsave(&msi_lock, flags);
1336 state = msi_desc[vector]->msi_attrib.state;
1337 tail = msi_desc[vector]->link.tail;
1338 base = msi_desc[vector]->mask_base;
1339 spin_unlock_irqrestore(&msi_lock, flags);
1342 else if (vector != head) /* Release MSI-X vector */
1343 msi_free_vector(dev, vector, 0);
1346 msi_free_vector(dev, vector, 0);
1349 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1350 "called without free_irq() on all MSI-X vectors\n",
1352 BUG_ON(warning > 0);
1354 dev->irq = temp; /* Restore IOAPIC IRQ */
1358 void pci_no_msi(void)
1363 EXPORT_SYMBOL(pci_enable_msi);
1364 EXPORT_SYMBOL(pci_disable_msi);
1365 EXPORT_SYMBOL(pci_enable_msix);
1366 EXPORT_SYMBOL(pci_disable_msix);