2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2007 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
36 #include "ioatdma_registers.h"
37 #include "ioatdma_hw.h"
39 #define INITIAL_IOAT_DESC_COUNT 128
41 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
42 #define to_ioat_device(dev) container_of(dev, struct ioat_device, common)
43 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
44 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
46 /* internal functions */
47 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
48 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
49 static int __devinit ioat_probe(struct pci_dev *pdev,
50 const struct pci_device_id *ent);
51 static void ioat_shutdown(struct pci_dev *pdev);
52 static void __devexit ioat_remove(struct pci_dev *pdev);
54 static int ioat_dma_enumerate_channels(struct ioat_device *device)
59 struct ioat_dma_chan *ioat_chan;
61 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
62 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
63 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
65 for (i = 0; i < device->common.chancnt; i++) {
66 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
68 device->common.chancnt = i;
72 ioat_chan->device = device;
73 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
74 ioat_chan->xfercap = xfercap;
75 spin_lock_init(&ioat_chan->cleanup_lock);
76 spin_lock_init(&ioat_chan->desc_lock);
77 INIT_LIST_HEAD(&ioat_chan->free_desc);
78 INIT_LIST_HEAD(&ioat_chan->used_desc);
79 /* This should be made common somewhere in dmaengine.c */
80 ioat_chan->common.device = &device->common;
81 list_add_tail(&ioat_chan->common.device_node,
82 &device->common.channels);
84 return device->common.chancnt;
87 static void ioat_set_src(dma_addr_t addr,
88 struct dma_async_tx_descriptor *tx,
91 struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
92 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
94 pci_unmap_addr_set(desc, src, addr);
96 list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
97 iter->hw->src_addr = addr;
98 addr += ioat_chan->xfercap;
103 static void ioat_set_dest(dma_addr_t addr,
104 struct dma_async_tx_descriptor *tx,
107 struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
108 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
110 pci_unmap_addr_set(desc, dst, addr);
112 list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
113 iter->hw->dst_addr = addr;
114 addr += ioat_chan->xfercap;
118 static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
120 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
121 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
124 struct ioat_desc_sw *group_start;
126 group_start = list_entry(desc->async_tx.tx_list.next,
127 struct ioat_desc_sw, node);
128 spin_lock_bh(&ioat_chan->desc_lock);
129 /* cookie incr and addition to used_list must be atomic */
130 cookie = ioat_chan->common.cookie;
134 ioat_chan->common.cookie = desc->async_tx.cookie = cookie;
136 /* write address into NextDescriptor field of last desc in chain */
137 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
138 group_start->async_tx.phys;
139 list_splice_init(&desc->async_tx.tx_list, ioat_chan->used_desc.prev);
141 ioat_chan->pending += desc->tx_cnt;
142 if (ioat_chan->pending >= 4) {
144 ioat_chan->pending = 0;
146 spin_unlock_bh(&ioat_chan->desc_lock);
149 writeb(IOAT_CHANCMD_APPEND,
150 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
155 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
156 struct ioat_dma_chan *ioat_chan,
159 struct ioat_dma_descriptor *desc;
160 struct ioat_desc_sw *desc_sw;
161 struct ioat_device *ioat_device;
164 ioat_device = to_ioat_device(ioat_chan->common.device);
165 desc = pci_pool_alloc(ioat_device->dma_pool, flags, &phys);
169 desc_sw = kzalloc(sizeof(*desc_sw), flags);
170 if (unlikely(!desc_sw)) {
171 pci_pool_free(ioat_device->dma_pool, desc, phys);
175 memset(desc, 0, sizeof(*desc));
176 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
177 desc_sw->async_tx.tx_set_src = ioat_set_src;
178 desc_sw->async_tx.tx_set_dest = ioat_set_dest;
179 desc_sw->async_tx.tx_submit = ioat_tx_submit;
180 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
182 desc_sw->async_tx.phys = phys;
187 /* returns the actual number of allocated descriptors */
188 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
190 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
191 struct ioat_desc_sw *desc = NULL;
197 /* have we already been set up? */
198 if (!list_empty(&ioat_chan->free_desc))
199 return INITIAL_IOAT_DESC_COUNT;
201 /* Setup register to interrupt and write completion status on error */
202 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
203 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
204 IOAT_CHANCTRL_ERR_COMPLETION_EN;
205 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
207 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
209 dev_err(&ioat_chan->device->pdev->dev,
210 "ioatdma: CHANERR = %x, clearing\n", chanerr);
211 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
214 /* Allocate descriptors */
215 for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
216 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
218 dev_err(&ioat_chan->device->pdev->dev,
219 "ioatdma: Only %d initial descriptors\n", i);
222 list_add_tail(&desc->node, &tmp_list);
224 spin_lock_bh(&ioat_chan->desc_lock);
225 list_splice(&tmp_list, &ioat_chan->free_desc);
226 spin_unlock_bh(&ioat_chan->desc_lock);
228 /* allocate a completion writeback area */
229 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
230 ioat_chan->completion_virt =
231 pci_pool_alloc(ioat_chan->device->completion_pool,
233 &ioat_chan->completion_addr);
234 memset(ioat_chan->completion_virt, 0,
235 sizeof(*ioat_chan->completion_virt));
236 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
237 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
238 writel(((u64) ioat_chan->completion_addr) >> 32,
239 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
241 ioat_dma_start_null_desc(ioat_chan);
245 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
247 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
248 struct ioat_device *ioat_device = to_ioat_device(chan->device);
249 struct ioat_desc_sw *desc, *_desc;
251 int in_use_descs = 0;
253 ioat_dma_memcpy_cleanup(ioat_chan);
255 writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
257 spin_lock_bh(&ioat_chan->desc_lock);
258 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
260 list_del(&desc->node);
261 pci_pool_free(ioat_device->dma_pool, desc->hw,
262 desc->async_tx.phys);
265 list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
266 list_del(&desc->node);
267 pci_pool_free(ioat_device->dma_pool, desc->hw,
268 desc->async_tx.phys);
271 spin_unlock_bh(&ioat_chan->desc_lock);
273 pci_pool_free(ioat_device->completion_pool,
274 ioat_chan->completion_virt,
275 ioat_chan->completion_addr);
277 /* one is ok since we left it on there on purpose */
278 if (in_use_descs > 1)
279 dev_err(&ioat_chan->device->pdev->dev,
280 "ioatdma: Freeing %d in use descriptors!\n",
283 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
286 static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
287 struct dma_chan *chan,
291 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
292 struct ioat_desc_sw *first, *prev, *new;
293 LIST_HEAD(new_chain);
306 spin_lock_bh(&ioat_chan->desc_lock);
308 if (!list_empty(&ioat_chan->free_desc)) {
309 new = to_ioat_desc(ioat_chan->free_desc.next);
310 list_del(&new->node);
312 /* try to get another desc */
313 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
314 /* will this ever happen? */
315 /* TODO add upper limit on these */
319 copy = min((u32) len, ioat_chan->xfercap);
321 new->hw->size = copy;
323 new->async_tx.cookie = 0;
324 new->async_tx.ack = 1;
326 /* chain together the physical address list for the HW */
330 prev->hw->next = (u64) new->async_tx.phys;
334 list_add_tail(&new->node, &new_chain);
338 list_splice(&new_chain, &new->async_tx.tx_list);
340 new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
342 new->tx_cnt = desc_count;
343 new->async_tx.ack = 0; /* client is in control of this ack */
344 new->async_tx.cookie = -EBUSY;
346 pci_unmap_len_set(new, len, orig_len);
347 spin_unlock_bh(&ioat_chan->desc_lock);
349 return new ? &new->async_tx : NULL;
353 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
355 * @chan: DMA channel handle
357 static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
359 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
361 if (ioat_chan->pending != 0) {
362 ioat_chan->pending = 0;
363 writeb(IOAT_CHANCMD_APPEND,
364 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
368 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
370 unsigned long phys_complete;
371 struct ioat_desc_sw *desc, *_desc;
372 dma_cookie_t cookie = 0;
374 prefetch(ioat_chan->completion_virt);
376 if (!spin_trylock(&ioat_chan->cleanup_lock))
379 /* The completion writeback can happen at any time,
380 so reads by the driver need to be atomic operations
381 The descriptor physical addresses are limited to 32-bits
382 when the CPU can only do a 32-bit mov */
384 #if (BITS_PER_LONG == 64)
386 ioat_chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
388 phys_complete = ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
391 if ((ioat_chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
392 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
393 dev_err(&ioat_chan->device->pdev->dev,
394 "ioatdma: Channel halted, chanerr = %x\n",
395 readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
397 /* TODO do something to salvage the situation */
400 if (phys_complete == ioat_chan->last_completion) {
401 spin_unlock(&ioat_chan->cleanup_lock);
405 spin_lock_bh(&ioat_chan->desc_lock);
406 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
409 * Incoming DMA requests may use multiple descriptors, due to
410 * exceeding xfercap, perhaps. If so, only the last one will
411 * have a cookie, and require unmapping.
413 if (desc->async_tx.cookie) {
414 cookie = desc->async_tx.cookie;
417 * yes we are unmapping both _page and _single alloc'd
418 * regions with unmap_page. Is this *really* that bad?
420 pci_unmap_page(ioat_chan->device->pdev,
421 pci_unmap_addr(desc, dst),
422 pci_unmap_len(desc, len),
424 pci_unmap_page(ioat_chan->device->pdev,
425 pci_unmap_addr(desc, src),
426 pci_unmap_len(desc, len),
430 if (desc->async_tx.phys != phys_complete) {
432 * a completed entry, but not the last, so cleanup
433 * if the client is done with the descriptor
435 if (desc->async_tx.ack) {
436 list_del(&desc->node);
437 list_add_tail(&desc->node,
438 &ioat_chan->free_desc);
440 desc->async_tx.cookie = 0;
443 * last used desc. Do not remove, so we can append from
444 * it, but don't look at it next time, either
446 desc->async_tx.cookie = 0;
448 /* TODO check status bits? */
453 spin_unlock_bh(&ioat_chan->desc_lock);
455 ioat_chan->last_completion = phys_complete;
457 ioat_chan->completed_cookie = cookie;
459 spin_unlock(&ioat_chan->cleanup_lock);
462 static void ioat_dma_dependency_added(struct dma_chan *chan)
464 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
465 spin_lock_bh(&ioat_chan->desc_lock);
466 if (ioat_chan->pending == 0) {
467 spin_unlock_bh(&ioat_chan->desc_lock);
468 ioat_dma_memcpy_cleanup(ioat_chan);
470 spin_unlock_bh(&ioat_chan->desc_lock);
474 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
475 * @chan: IOAT DMA channel handle
476 * @cookie: DMA transaction identifier
477 * @done: if not %NULL, updated with last completed transaction
478 * @used: if not %NULL, updated with last used transaction
480 static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
485 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
486 dma_cookie_t last_used;
487 dma_cookie_t last_complete;
490 last_used = chan->cookie;
491 last_complete = ioat_chan->completed_cookie;
494 *done = last_complete;
498 ret = dma_async_is_complete(cookie, last_complete, last_used);
499 if (ret == DMA_SUCCESS)
502 ioat_dma_memcpy_cleanup(ioat_chan);
504 last_used = chan->cookie;
505 last_complete = ioat_chan->completed_cookie;
508 *done = last_complete;
512 return dma_async_is_complete(cookie, last_complete, last_used);
517 static struct pci_device_id ioat_pci_tbl[] = {
518 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
519 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) },
520 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
521 { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
525 static struct pci_driver ioat_pci_driver = {
527 .id_table = ioat_pci_tbl,
529 .shutdown = ioat_shutdown,
530 .remove = __devexit_p(ioat_remove),
533 static irqreturn_t ioat_do_interrupt(int irq, void *data)
535 struct ioat_device *instance = data;
536 unsigned long attnstatus;
539 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
541 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
544 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
545 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
549 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
551 printk(KERN_ERR "ioatdma: interrupt! status %lx\n", attnstatus);
553 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
557 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
559 struct ioat_desc_sw *desc;
561 spin_lock_bh(&ioat_chan->desc_lock);
563 if (!list_empty(&ioat_chan->free_desc)) {
564 desc = to_ioat_desc(ioat_chan->free_desc.next);
565 list_del(&desc->node);
567 /* try to get another desc */
568 spin_unlock_bh(&ioat_chan->desc_lock);
569 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
570 spin_lock_bh(&ioat_chan->desc_lock);
571 /* will this ever happen? */
575 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
577 desc->async_tx.ack = 1;
579 list_add_tail(&desc->node, &ioat_chan->used_desc);
580 spin_unlock_bh(&ioat_chan->desc_lock);
582 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
583 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
584 writel(((u64) desc->async_tx.phys) >> 32,
585 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
587 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
591 * Perform a IOAT transaction to verify the HW works.
593 #define IOAT_TEST_SIZE 2000
595 static int ioat_self_test(struct ioat_device *device)
600 struct dma_chan *dma_chan;
601 struct dma_async_tx_descriptor *tx;
606 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
609 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
615 /* Fill in src buffer */
616 for (i = 0; i < IOAT_TEST_SIZE; i++)
619 /* Start copy, using first DMA channel */
620 dma_chan = container_of(device->common.channels.next,
623 if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
624 dev_err(&device->pdev->dev,
625 "selftest cannot allocate chan resource\n");
630 tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
632 addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
634 ioat_set_src(addr, tx, 0);
635 addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
637 ioat_set_dest(addr, tx, 0);
638 cookie = ioat_tx_submit(tx);
639 ioat_dma_memcpy_issue_pending(dma_chan);
642 if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
643 dev_err(&device->pdev->dev,
644 "ioatdma: Self-test copy timed out, disabling\n");
648 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
649 dev_err(&device->pdev->dev,
650 "ioatdma: Self-test copy failed compare, disabling\n");
656 ioat_dma_free_chan_resources(dma_chan);
663 static int __devinit ioat_probe(struct pci_dev *pdev,
664 const struct pci_device_id *ent)
667 unsigned long mmio_start, mmio_len;
668 void __iomem *reg_base;
669 struct ioat_device *device;
671 err = pci_enable_device(pdev);
673 goto err_enable_device;
675 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
677 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
679 goto err_set_dma_mask;
681 err = pci_request_regions(pdev, ioat_pci_driver.name);
683 goto err_request_regions;
685 mmio_start = pci_resource_start(pdev, 0);
686 mmio_len = pci_resource_len(pdev, 0);
688 reg_base = ioremap(mmio_start, mmio_len);
694 device = kzalloc(sizeof(*device), GFP_KERNEL);
700 /* DMA coherent memory pool for DMA descriptor allocations */
701 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
702 sizeof(struct ioat_dma_descriptor), 64, 0);
703 if (!device->dma_pool) {
708 device->completion_pool = pci_pool_create("completion_pool", pdev,
709 sizeof(u64), SMP_CACHE_BYTES,
711 if (!device->completion_pool) {
713 goto err_completion_pool;
717 pci_set_drvdata(pdev, device);
718 #ifdef CONFIG_PCI_MSI
719 if (pci_enable_msi(pdev) == 0) {
725 err = request_irq(pdev->irq, &ioat_do_interrupt, IRQF_SHARED, "ioat",
730 device->reg_base = reg_base;
732 writeb(IOAT_INTRCTRL_MASTER_INT_EN,
733 device->reg_base + IOAT_INTRCTRL_OFFSET);
734 pci_set_master(pdev);
736 INIT_LIST_HEAD(&device->common.channels);
737 ioat_dma_enumerate_channels(device);
739 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
740 device->common.device_alloc_chan_resources =
741 ioat_dma_alloc_chan_resources;
742 device->common.device_free_chan_resources =
743 ioat_dma_free_chan_resources;
744 device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
745 device->common.device_is_tx_complete = ioat_dma_is_complete;
746 device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
747 device->common.device_dependency_added = ioat_dma_dependency_added;
748 device->common.dev = &pdev->dev;
750 "ioatdma: Intel(R) I/OAT DMA Engine found, %d channels\n",
751 device->common.chancnt);
753 err = ioat_self_test(device);
757 dma_async_device_register(&device->common);
763 pci_pool_destroy(device->completion_pool);
765 pci_pool_destroy(device->dma_pool);
771 pci_release_regions(pdev);
774 pci_disable_device(pdev);
778 "ioatdma: Intel(R) I/OAT DMA Engine initialization failed\n");
783 static void ioat_shutdown(struct pci_dev *pdev)
785 struct ioat_device *device;
786 device = pci_get_drvdata(pdev);
788 dma_async_device_unregister(&device->common);
791 static void __devexit ioat_remove(struct pci_dev *pdev)
793 struct ioat_device *device;
794 struct dma_chan *chan, *_chan;
795 struct ioat_dma_chan *ioat_chan;
797 device = pci_get_drvdata(pdev);
798 dma_async_device_unregister(&device->common);
800 free_irq(device->pdev->irq, device);
801 #ifdef CONFIG_PCI_MSI
803 pci_disable_msi(device->pdev);
805 pci_pool_destroy(device->dma_pool);
806 pci_pool_destroy(device->completion_pool);
807 iounmap(device->reg_base);
808 pci_release_regions(pdev);
809 pci_disable_device(pdev);
810 list_for_each_entry_safe(chan, _chan,
811 &device->common.channels, device_node) {
812 ioat_chan = to_ioat_chan(chan);
813 list_del(&chan->device_node);
820 MODULE_VERSION("1.9");
821 MODULE_LICENSE("GPL");
822 MODULE_AUTHOR("Intel Corporation");
824 static int __init ioat_init_module(void)
826 /* it's currently unsafe to unload this module */
827 /* if forced, worst case is that rmmod hangs */
828 __unsafe(THIS_MODULE);
830 return pci_register_driver(&ioat_pci_driver);
833 module_init(ioat_init_module);
835 static void __exit ioat_exit_module(void)
837 pci_unregister_driver(&ioat_pci_driver);
840 module_exit(ioat_exit_module);