X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fdma%2Fioat_dma.c;h=318e8a22d81423a4da8cfb759be3c9854148252e;hb=a861beb1401d65e3f095fee074c13645ab06490e;hp=45e7b4666c7b3c5197eeaa0c256dcf76860a2161;hpb=03bbe082cffc4533f6557bf23f0c672307067246;p=linux-2.6 diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 45e7b4666c..318e8a22d8 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -159,20 +159,6 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) return device->common.chancnt; } -static void ioat_set_src(dma_addr_t addr, - struct dma_async_tx_descriptor *tx, - int index) -{ - tx_to_ioat_desc(tx)->src = addr; -} - -static void ioat_set_dest(dma_addr_t addr, - struct dma_async_tx_descriptor *tx, - int index) -{ - tx_to_ioat_desc(tx)->dst = addr; -} - /** * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended * descriptors to hw @@ -226,14 +212,14 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) u32 copy; size_t len; dma_addr_t src, dst; - int orig_ack; + unsigned long orig_flags; unsigned int desc_count = 0; /* src and dest and len are stored in the initial descriptor */ len = first->len; src = first->src; dst = first->dst; - orig_ack = first->async_tx.ack; + orig_flags = first->async_tx.flags; new = first; spin_lock_bh(&ioat_chan->desc_lock); @@ -242,7 +228,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) do { copy = min_t(size_t, len, ioat_chan->xfercap); - new->async_tx.ack = 1; + async_tx_ack(&new->async_tx); hw = new->hw; hw->size = copy; @@ -278,7 +264,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) } new->tx_cnt = desc_count; - new->async_tx.ack = orig_ack; /* client is in control of this ack */ + new->async_tx.flags = orig_flags; /* client is in control of this ack */ /* store the original values for use in later cleanup */ if (new != first) { @@ -318,14 +304,14 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) u32 copy; size_t len; dma_addr_t src, dst; - int orig_ack; + unsigned long orig_flags; unsigned int desc_count = 0; /* src and dest and len are stored in the initial descriptor */ len = first->len; src = first->src; dst = first->dst; - orig_ack = first->async_tx.ack; + orig_flags = first->async_tx.flags; new = first; /* @@ -335,7 +321,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) do { copy = min_t(size_t, len, ioat_chan->xfercap); - new->async_tx.ack = 1; + async_tx_ack(&new->async_tx); hw = new->hw; hw->size = copy; @@ -363,7 +349,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) } new->tx_cnt = desc_count; - new->async_tx.ack = orig_ack; /* client is in control of this ack */ + new->async_tx.flags = orig_flags; /* client is in control of this ack */ /* store the original values for use in later cleanup */ if (new != first) { @@ -415,8 +401,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor( memset(desc, 0, sizeof(*desc)); dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); - desc_sw->async_tx.tx_set_src = ioat_set_src; - desc_sw->async_tx.tx_set_dest = ioat_set_dest; switch (ioat_chan->device->version) { case IOAT_VER_1_2: desc_sw->async_tx.tx_submit = ioat1_tx_submit; @@ -714,8 +698,10 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor( static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( struct dma_chan *chan, + dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, - int int_en) + unsigned long flags) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); struct ioat_desc_sw *new; @@ -726,6 +712,9 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( if (new) { new->len = len; + new->dst = dma_dest; + new->src = dma_src; + new->async_tx.flags = flags; return &new->async_tx; } else return NULL; @@ -733,8 +722,10 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( struct dma_chan *chan, + dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, - int int_en) + unsigned long flags) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); struct ioat_desc_sw *new; @@ -749,6 +740,9 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( if (new) { new->len = len; + new->dst = dma_dest; + new->src = dma_src; + new->async_tx.flags = flags; return &new->async_tx; } else return NULL; @@ -848,7 +842,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) * a completed entry, but not the last, so clean * up if the client is done with the descriptor */ - if (desc->async_tx.ack) { + if (async_tx_test_ack(&desc->async_tx)) { list_del(&desc->node); list_add_tail(&desc->node, &ioat_chan->free_desc); @@ -930,17 +924,6 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) spin_unlock_bh(&ioat_chan->cleanup_lock); } -static void ioat_dma_dependency_added(struct dma_chan *chan) -{ - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - spin_lock_bh(&ioat_chan->desc_lock); - if (ioat_chan->pending == 0) { - spin_unlock_bh(&ioat_chan->desc_lock); - ioat_dma_memcpy_cleanup(ioat_chan); - } else - spin_unlock_bh(&ioat_chan->desc_lock); -} - /** * ioat_dma_is_complete - poll the status of a IOAT DMA transaction * @chan: IOAT DMA channel handle @@ -996,7 +979,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) desc->hw->size = 0; desc->hw->src_addr = 0; desc->hw->dst_addr = 0; - desc->async_tx.ack = 1; + async_tx_ack(&desc->async_tx); switch (ioat_chan->device->version) { case IOAT_VER_1_2: desc->hw->next = 0; @@ -1045,7 +1028,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) u8 *dest; struct dma_chan *dma_chan; struct dma_async_tx_descriptor *tx; - dma_addr_t addr; + dma_addr_t dma_dest, dma_src; dma_cookie_t cookie; int err = 0; @@ -1073,7 +1056,12 @@ static int ioat_dma_self_test(struct ioatdma_device *device) goto out; } - tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0); + dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, + DMA_TO_DEVICE); + dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, + DMA_FROM_DEVICE); + tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, + IOAT_TEST_SIZE, 0); if (!tx) { dev_err(&device->pdev->dev, "Self-test prep failed, disabling\n"); @@ -1082,12 +1070,6 @@ static int ioat_dma_self_test(struct ioatdma_device *device) } async_tx_ack(tx); - addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, - DMA_TO_DEVICE); - tx->tx_set_src(addr, tx, 0); - addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, - DMA_FROM_DEVICE); - tx->tx_set_dest(addr, tx, 0); tx->callback = ioat_dma_test_callback; tx->callback_param = (void *)0x8086; cookie = tx->tx_submit(tx); @@ -1323,7 +1305,6 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, dma_cap_set(DMA_MEMCPY, device->common.cap_mask); device->common.device_is_tx_complete = ioat_dma_is_complete; - device->common.device_dependency_added = ioat_dma_dependency_added; switch (device->version) { case IOAT_VER_1_2: device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;