2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/interrupt.h>
31 #include <linux/dmaengine.h>
32 #include <linux/delay.h>
33 #include <linux/dma-mapping.h>
35 #include "ioatdma_registers.h"
36 #include "ioatdma_hw.h"
38 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
39 #define to_ioat_device(dev) container_of(dev, struct ioat_device, common)
40 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
42 /* internal functions */
43 static int __devinit ioat_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
44 static void ioat_shutdown(struct pci_dev *pdev);
45 static void __devexit ioat_remove(struct pci_dev *pdev);
47 static int enumerate_dma_channels(struct ioat_device *device)
52 struct ioat_dma_chan *ioat_chan;
54 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
55 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
56 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
58 for (i = 0; i < device->common.chancnt; i++) {
59 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
61 device->common.chancnt = i;
65 ioat_chan->device = device;
66 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
67 ioat_chan->xfercap = xfercap;
68 spin_lock_init(&ioat_chan->cleanup_lock);
69 spin_lock_init(&ioat_chan->desc_lock);
70 INIT_LIST_HEAD(&ioat_chan->free_desc);
71 INIT_LIST_HEAD(&ioat_chan->used_desc);
72 /* This should be made common somewhere in dmaengine.c */
73 ioat_chan->common.device = &device->common;
74 ioat_chan->common.client = NULL;
75 list_add_tail(&ioat_chan->common.device_node,
76 &device->common.channels);
78 return device->common.chancnt;
81 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
82 struct ioat_dma_chan *ioat_chan,
85 struct ioat_dma_descriptor *desc;
86 struct ioat_desc_sw *desc_sw;
87 struct ioat_device *ioat_device;
90 ioat_device = to_ioat_device(ioat_chan->common.device);
91 desc = pci_pool_alloc(ioat_device->dma_pool, flags, &phys);
95 desc_sw = kzalloc(sizeof(*desc_sw), flags);
96 if (unlikely(!desc_sw)) {
97 pci_pool_free(ioat_device->dma_pool, desc, phys);
101 memset(desc, 0, sizeof(*desc));
103 desc_sw->phys = phys;
108 #define INITIAL_IOAT_DESC_COUNT 128
110 static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan);
112 /* returns the actual number of allocated descriptors */
113 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
115 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
116 struct ioat_desc_sw *desc = NULL;
123 * In-use bit automatically set by reading chanctrl
124 * If 0, we got it, if 1, someone else did
126 chanctrl = readw(ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
127 if (chanctrl & IOAT_CHANCTRL_CHANNEL_IN_USE)
130 /* Setup register to interrupt and write completion status on error */
131 chanctrl = IOAT_CHANCTRL_CHANNEL_IN_USE |
132 IOAT_CHANCTRL_ERR_INT_EN |
133 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
134 IOAT_CHANCTRL_ERR_COMPLETION_EN;
135 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
137 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
139 printk("IOAT: CHANERR = %x, clearing\n", chanerr);
140 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
143 /* Allocate descriptors */
144 for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
145 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
147 printk(KERN_ERR "IOAT: Only %d initial descriptors\n", i);
150 list_add_tail(&desc->node, &tmp_list);
152 spin_lock_bh(&ioat_chan->desc_lock);
153 list_splice(&tmp_list, &ioat_chan->free_desc);
154 spin_unlock_bh(&ioat_chan->desc_lock);
156 /* allocate a completion writeback area */
157 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
158 ioat_chan->completion_virt =
159 pci_pool_alloc(ioat_chan->device->completion_pool,
161 &ioat_chan->completion_addr);
162 memset(ioat_chan->completion_virt, 0,
163 sizeof(*ioat_chan->completion_virt));
164 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
165 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
166 writel(((u64) ioat_chan->completion_addr) >> 32,
167 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
169 ioat_start_null_desc(ioat_chan);
173 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
175 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
177 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
178 struct ioat_device *ioat_device = to_ioat_device(chan->device);
179 struct ioat_desc_sw *desc, *_desc;
181 int in_use_descs = 0;
183 ioat_dma_memcpy_cleanup(ioat_chan);
185 writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
187 spin_lock_bh(&ioat_chan->desc_lock);
188 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
190 list_del(&desc->node);
191 pci_pool_free(ioat_device->dma_pool, desc->hw, desc->phys);
194 list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
195 list_del(&desc->node);
196 pci_pool_free(ioat_device->dma_pool, desc->hw, desc->phys);
199 spin_unlock_bh(&ioat_chan->desc_lock);
201 pci_pool_free(ioat_device->completion_pool,
202 ioat_chan->completion_virt,
203 ioat_chan->completion_addr);
205 /* one is ok since we left it on there on purpose */
206 if (in_use_descs > 1)
207 printk(KERN_ERR "IOAT: Freeing %d in use descriptors!\n",
210 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
212 /* Tell hw the chan is free */
213 chanctrl = readw(ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
214 chanctrl &= ~IOAT_CHANCTRL_CHANNEL_IN_USE;
215 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
219 * do_ioat_dma_memcpy - actual function that initiates a IOAT DMA transaction
220 * @ioat_chan: IOAT DMA channel handle
221 * @dest: DMA destination address
222 * @src: DMA source address
223 * @len: transaction length in bytes
226 static dma_cookie_t do_ioat_dma_memcpy(struct ioat_dma_chan *ioat_chan,
231 struct ioat_desc_sw *first;
232 struct ioat_desc_sw *prev;
233 struct ioat_desc_sw *new;
235 LIST_HEAD(new_chain);
238 dma_addr_t orig_src, orig_dst;
239 unsigned int desc_count = 0;
240 unsigned int append = 0;
242 if (!ioat_chan || !dest || !src)
246 return ioat_chan->common.cookie;
255 spin_lock_bh(&ioat_chan->desc_lock);
258 if (!list_empty(&ioat_chan->free_desc)) {
259 new = to_ioat_desc(ioat_chan->free_desc.next);
260 list_del(&new->node);
262 /* try to get another desc */
263 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
264 /* will this ever happen? */
265 /* TODO add upper limit on these */
269 copy = min((u32) len, ioat_chan->xfercap);
271 new->hw->size = copy;
273 new->hw->src_addr = src;
274 new->hw->dst_addr = dest;
277 /* chain together the physical address list for the HW */
281 prev->hw->next = (u64) new->phys;
289 list_add_tail(&new->node, &new_chain);
292 new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
295 /* cookie incr and addition to used_list must be atomic */
297 cookie = ioat_chan->common.cookie;
301 ioat_chan->common.cookie = new->cookie = cookie;
303 pci_unmap_addr_set(new, src, orig_src);
304 pci_unmap_addr_set(new, dst, orig_dst);
305 pci_unmap_len_set(new, src_len, orig_len);
306 pci_unmap_len_set(new, dst_len, orig_len);
308 /* write address into NextDescriptor field of last desc in chain */
309 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = first->phys;
310 list_splice_init(&new_chain, ioat_chan->used_desc.prev);
312 ioat_chan->pending += desc_count;
313 if (ioat_chan->pending >= 4) {
315 ioat_chan->pending = 0;
318 spin_unlock_bh(&ioat_chan->desc_lock);
321 writeb(IOAT_CHANCMD_APPEND,
322 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
327 * ioat_dma_memcpy_buf_to_buf - wrapper that takes src & dest bufs
328 * @chan: IOAT DMA channel handle
329 * @dest: DMA destination address
330 * @src: DMA source address
331 * @len: transaction length in bytes
334 static dma_cookie_t ioat_dma_memcpy_buf_to_buf(struct dma_chan *chan,
339 dma_addr_t dest_addr;
341 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
343 dest_addr = pci_map_single(ioat_chan->device->pdev,
344 dest, len, PCI_DMA_FROMDEVICE);
345 src_addr = pci_map_single(ioat_chan->device->pdev,
346 src, len, PCI_DMA_TODEVICE);
348 return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);
352 * ioat_dma_memcpy_buf_to_pg - wrapper, copying from a buf to a page
353 * @chan: IOAT DMA channel handle
354 * @page: pointer to the page to copy to
355 * @offset: offset into that page
356 * @src: DMA source address
357 * @len: transaction length in bytes
360 static dma_cookie_t ioat_dma_memcpy_buf_to_pg(struct dma_chan *chan,
366 dma_addr_t dest_addr;
368 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
370 dest_addr = pci_map_page(ioat_chan->device->pdev,
371 page, offset, len, PCI_DMA_FROMDEVICE);
372 src_addr = pci_map_single(ioat_chan->device->pdev,
373 src, len, PCI_DMA_TODEVICE);
375 return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);
379 * ioat_dma_memcpy_pg_to_pg - wrapper, copying between two pages
380 * @chan: IOAT DMA channel handle
381 * @dest_pg: pointer to the page to copy to
382 * @dest_off: offset into that page
383 * @src_pg: pointer to the page to copy from
384 * @src_off: offset into that page
385 * @len: transaction length in bytes. This is guaranteed not to make a copy
386 * across a page boundary.
389 static dma_cookie_t ioat_dma_memcpy_pg_to_pg(struct dma_chan *chan,
390 struct page *dest_pg,
391 unsigned int dest_off,
393 unsigned int src_off,
396 dma_addr_t dest_addr;
398 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
400 dest_addr = pci_map_page(ioat_chan->device->pdev,
401 dest_pg, dest_off, len, PCI_DMA_FROMDEVICE);
402 src_addr = pci_map_page(ioat_chan->device->pdev,
403 src_pg, src_off, len, PCI_DMA_TODEVICE);
405 return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);
409 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended descriptors to hw
410 * @chan: DMA channel handle
413 static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
415 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
417 if (ioat_chan->pending != 0) {
418 ioat_chan->pending = 0;
419 writeb(IOAT_CHANCMD_APPEND,
420 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
424 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
426 unsigned long phys_complete;
427 struct ioat_desc_sw *desc, *_desc;
428 dma_cookie_t cookie = 0;
430 prefetch(chan->completion_virt);
432 if (!spin_trylock(&chan->cleanup_lock))
435 /* The completion writeback can happen at any time,
436 so reads by the driver need to be atomic operations
437 The descriptor physical addresses are limited to 32-bits
438 when the CPU can only do a 32-bit mov */
440 #if (BITS_PER_LONG == 64)
442 chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
444 phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
447 if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
448 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
449 printk("IOAT: Channel halted, chanerr = %x\n",
450 readl(chan->reg_base + IOAT_CHANERR_OFFSET));
452 /* TODO do something to salvage the situation */
455 if (phys_complete == chan->last_completion) {
456 spin_unlock(&chan->cleanup_lock);
460 spin_lock_bh(&chan->desc_lock);
461 list_for_each_entry_safe(desc, _desc, &chan->used_desc, node) {
464 * Incoming DMA requests may use multiple descriptors, due to
465 * exceeding xfercap, perhaps. If so, only the last one will
466 * have a cookie, and require unmapping.
469 cookie = desc->cookie;
471 /* yes we are unmapping both _page and _single alloc'd
472 regions with unmap_page. Is this *really* that bad?
474 pci_unmap_page(chan->device->pdev,
475 pci_unmap_addr(desc, dst),
476 pci_unmap_len(desc, dst_len),
478 pci_unmap_page(chan->device->pdev,
479 pci_unmap_addr(desc, src),
480 pci_unmap_len(desc, src_len),
484 if (desc->phys != phys_complete) {
485 /* a completed entry, but not the last, so cleanup */
486 list_del(&desc->node);
487 list_add_tail(&desc->node, &chan->free_desc);
489 /* last used desc. Do not remove, so we can append from
490 it, but don't look at it next time, either */
493 /* TODO check status bits? */
498 spin_unlock_bh(&chan->desc_lock);
500 chan->last_completion = phys_complete;
502 chan->completed_cookie = cookie;
504 spin_unlock(&chan->cleanup_lock);
508 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
509 * @chan: IOAT DMA channel handle
510 * @cookie: DMA transaction identifier
511 * @done: if not %NULL, updated with last completed transaction
512 * @used: if not %NULL, updated with last used transaction
515 static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
520 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
521 dma_cookie_t last_used;
522 dma_cookie_t last_complete;
525 last_used = chan->cookie;
526 last_complete = ioat_chan->completed_cookie;
529 *done= last_complete;
533 ret = dma_async_is_complete(cookie, last_complete, last_used);
534 if (ret == DMA_SUCCESS)
537 ioat_dma_memcpy_cleanup(ioat_chan);
539 last_used = chan->cookie;
540 last_complete = ioat_chan->completed_cookie;
543 *done= last_complete;
547 return dma_async_is_complete(cookie, last_complete, last_used);
552 static struct pci_device_id ioat_pci_tbl[] = {
553 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
557 static struct pci_driver ioat_pci_driver = {
559 .id_table = ioat_pci_tbl,
561 .shutdown = ioat_shutdown,
562 .remove = __devexit_p(ioat_remove),
565 static irqreturn_t ioat_do_interrupt(int irq, void *data)
567 struct ioat_device *instance = data;
568 unsigned long attnstatus;
571 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
573 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
576 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
577 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
581 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
583 printk(KERN_ERR "ioatdma error: interrupt! status %lx\n", attnstatus);
585 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
589 static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan)
591 struct ioat_desc_sw *desc;
593 spin_lock_bh(&ioat_chan->desc_lock);
595 if (!list_empty(&ioat_chan->free_desc)) {
596 desc = to_ioat_desc(ioat_chan->free_desc.next);
597 list_del(&desc->node);
599 /* try to get another desc */
600 spin_unlock_bh(&ioat_chan->desc_lock);
601 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
602 spin_lock_bh(&ioat_chan->desc_lock);
603 /* will this ever happen? */
607 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
610 list_add_tail(&desc->node, &ioat_chan->used_desc);
611 spin_unlock_bh(&ioat_chan->desc_lock);
613 writel(((u64) desc->phys) & 0x00000000FFFFFFFF,
614 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
615 writel(((u64) desc->phys) >> 32,
616 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
618 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
622 * Perform a IOAT transaction to verify the HW works.
624 #define IOAT_TEST_SIZE 2000
626 static int ioat_self_test(struct ioat_device *device)
631 struct dma_chan *dma_chan;
635 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
638 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
644 /* Fill in src buffer */
645 for (i = 0; i < IOAT_TEST_SIZE; i++)
648 /* Start copy, using first DMA channel */
649 dma_chan = container_of(device->common.channels.next,
652 if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
657 cookie = ioat_dma_memcpy_buf_to_buf(dma_chan, dest, src, IOAT_TEST_SIZE);
658 ioat_dma_memcpy_issue_pending(dma_chan);
661 if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
662 printk(KERN_ERR "ioatdma: Self-test copy timed out, disabling\n");
666 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
667 printk(KERN_ERR "ioatdma: Self-test copy failed compare, disabling\n");
673 ioat_dma_free_chan_resources(dma_chan);
680 static int __devinit ioat_probe(struct pci_dev *pdev,
681 const struct pci_device_id *ent)
684 unsigned long mmio_start, mmio_len;
685 void __iomem *reg_base;
686 struct ioat_device *device;
688 err = pci_enable_device(pdev);
690 goto err_enable_device;
692 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
694 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
696 goto err_set_dma_mask;
698 err = pci_request_regions(pdev, ioat_pci_driver.name);
700 goto err_request_regions;
702 mmio_start = pci_resource_start(pdev, 0);
703 mmio_len = pci_resource_len(pdev, 0);
705 reg_base = ioremap(mmio_start, mmio_len);
711 device = kzalloc(sizeof(*device), GFP_KERNEL);
717 /* DMA coherent memory pool for DMA descriptor allocations */
718 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
719 sizeof(struct ioat_dma_descriptor), 64, 0);
720 if (!device->dma_pool) {
725 device->completion_pool = pci_pool_create("completion_pool", pdev, sizeof(u64), SMP_CACHE_BYTES, SMP_CACHE_BYTES);
726 if (!device->completion_pool) {
728 goto err_completion_pool;
732 pci_set_drvdata(pdev, device);
733 #ifdef CONFIG_PCI_MSI
734 if (pci_enable_msi(pdev) == 0) {
740 err = request_irq(pdev->irq, &ioat_do_interrupt, IRQF_SHARED, "ioat",
745 device->reg_base = reg_base;
747 writeb(IOAT_INTRCTRL_MASTER_INT_EN, device->reg_base + IOAT_INTRCTRL_OFFSET);
748 pci_set_master(pdev);
750 INIT_LIST_HEAD(&device->common.channels);
751 enumerate_dma_channels(device);
753 device->common.device_alloc_chan_resources = ioat_dma_alloc_chan_resources;
754 device->common.device_free_chan_resources = ioat_dma_free_chan_resources;
755 device->common.device_memcpy_buf_to_buf = ioat_dma_memcpy_buf_to_buf;
756 device->common.device_memcpy_buf_to_pg = ioat_dma_memcpy_buf_to_pg;
757 device->common.device_memcpy_pg_to_pg = ioat_dma_memcpy_pg_to_pg;
758 device->common.device_memcpy_complete = ioat_dma_is_complete;
759 device->common.device_memcpy_issue_pending = ioat_dma_memcpy_issue_pending;
760 printk(KERN_INFO "Intel(R) I/OAT DMA Engine found, %d channels\n",
761 device->common.chancnt);
763 err = ioat_self_test(device);
767 dma_async_device_register(&device->common);
773 pci_pool_destroy(device->completion_pool);
775 pci_pool_destroy(device->dma_pool);
781 pci_release_regions(pdev);
784 pci_disable_device(pdev);
787 printk(KERN_ERR "Intel(R) I/OAT DMA Engine initialization failed\n");
792 static void ioat_shutdown(struct pci_dev *pdev)
794 struct ioat_device *device;
795 device = pci_get_drvdata(pdev);
797 dma_async_device_unregister(&device->common);
800 static void __devexit ioat_remove(struct pci_dev *pdev)
802 struct ioat_device *device;
803 struct dma_chan *chan, *_chan;
804 struct ioat_dma_chan *ioat_chan;
806 device = pci_get_drvdata(pdev);
807 dma_async_device_unregister(&device->common);
809 free_irq(device->pdev->irq, device);
810 #ifdef CONFIG_PCI_MSI
812 pci_disable_msi(device->pdev);
814 pci_pool_destroy(device->dma_pool);
815 pci_pool_destroy(device->completion_pool);
816 iounmap(device->reg_base);
817 pci_release_regions(pdev);
818 pci_disable_device(pdev);
819 list_for_each_entry_safe(chan, _chan, &device->common.channels, device_node) {
820 ioat_chan = to_ioat_chan(chan);
821 list_del(&chan->device_node);
828 MODULE_VERSION("1.9");
829 MODULE_LICENSE("GPL");
830 MODULE_AUTHOR("Intel Corporation");
832 static int __init ioat_init_module(void)
834 /* it's currently unsafe to unload this module */
835 /* if forced, worst case is that rmmod hangs */
836 __unsafe(THIS_MODULE);
838 return pci_register_driver(&ioat_pci_driver);
841 module_init(ioat_init_module);
843 static void __exit ioat_exit_module(void)
845 pci_unregister_driver(&ioat_pci_driver);
848 module_exit(ioat_exit_module);