+static void
+ioat_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index)
+{
+ struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
+
+ pci_unmap_addr_set(desc, src, addr);
+
+ list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
+ iter->hw->src_addr = addr;
+ addr += ioat_chan->xfercap;
+ }
+
+}
+
+static void
+ioat_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index)
+{
+ struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
+
+ pci_unmap_addr_set(desc, dst, addr);
+
+ list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
+ iter->hw->dst_addr = addr;
+ addr += ioat_chan->xfercap;
+ }
+}
+
+static dma_cookie_t
+ioat_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
+ struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
+ int append = 0;
+ dma_cookie_t cookie;
+ struct ioat_desc_sw *group_start;
+
+ group_start = list_entry(desc->async_tx.tx_list.next,
+ struct ioat_desc_sw, node);
+ spin_lock_bh(&ioat_chan->desc_lock);
+ /* cookie incr and addition to used_list must be atomic */
+ cookie = ioat_chan->common.cookie;
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ ioat_chan->common.cookie = desc->async_tx.cookie = cookie;
+
+ /* write address into NextDescriptor field of last desc in chain */
+ to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
+ group_start->async_tx.phys;
+ list_splice_init(&desc->async_tx.tx_list, ioat_chan->used_desc.prev);
+
+ ioat_chan->pending += desc->tx_cnt;
+ if (ioat_chan->pending >= 4) {
+ append = 1;
+ ioat_chan->pending = 0;
+ }
+ spin_unlock_bh(&ioat_chan->desc_lock);
+
+ if (append)
+ writeb(IOAT_CHANCMD_APPEND,
+ ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
+
+ return cookie;
+}
+