}
-static void set_sr(struct fsl_dma_chan *fsl_chan, dma_addr_t val)
+static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
{
DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
}
-static dma_addr_t get_sr(struct fsl_dma_chan *fsl_chan)
+static u32 get_sr(struct fsl_dma_chan *fsl_chan)
{
return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
}
return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
}
+static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
+{
+ return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
+}
+
static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
{
u32 sr = get_sr(fsl_chan);
dma_pool_destroy(fsl_chan->desc_pool);
}
+static struct dma_async_tx_descriptor *
+fsl_dma_prep_interrupt(struct dma_chan *chan)
+{
+ struct fsl_dma_chan *fsl_chan;
+ struct fsl_desc_sw *new;
+
+ if (!chan)
+ return NULL;
+
+ fsl_chan = to_fsl_chan(chan);
+
+ new = fsl_dma_alloc_descriptor(fsl_chan);
+ if (!new) {
+ dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
+ return NULL;
+ }
+
+ new->async_tx.cookie = -EBUSY;
+ new->async_tx.ack = 0;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &new->async_tx.tx_list);
+
+ /* Set End-of-link to the last link descriptor of new list*/
+ set_ld_eol(fsl_chan, new);
+
+ return &new->async_tx;
+}
+
static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
size_t len, unsigned long flags)
dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
#endif
- copy = min(len, FSL_DMA_BCR_MAX_CNT);
+ copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
set_desc_cnt(fsl_chan, &new->hw, copy);
set_desc_src(fsl_chan, &new->hw, dma_src);
spin_lock_irqsave(&fsl_chan->desc_lock, flags);
- fsl_dma_update_completed_cookie(fsl_chan);
dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
fsl_chan->completed_cookie);
list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
if (ld_node != &fsl_chan->ld_queue) {
/* Get the ld start address from ld_queue */
next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
- dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%016llx\n",
- (u64)next_dest_addr);
+ dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n",
+ (void *)next_dest_addr);
set_cdar(fsl_chan, next_dest_addr);
dma_start(fsl_chan);
} else {
static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
{
struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
- dma_addr_t stat;
+ u32 stat;
stat = get_sr(fsl_chan);
dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
if (stat & FSL_DMA_SR_TE)
dev_err(fsl_chan->dev, "Transfer Error!\n");
+ /* Programming Error
+ * The DMA_INTERRUPT async_tx is a NULL transfer, which will
+ * triger a PE interrupt.
+ */
+ if (stat & FSL_DMA_SR_PE) {
+ dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
+ if (get_bcr(fsl_chan) == 0) {
+ /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
+ * Now, update the completed cookie, and continue the
+ * next uncompleted transfer.
+ */
+ fsl_dma_update_completed_cookie(fsl_chan);
+ fsl_chan_xfer_ld_queue(fsl_chan);
+ }
+ stat &= ~FSL_DMA_SR_PE;
+ }
+
/* If the link descriptor segment transfer finishes,
* we will recycle the used descriptor.
*/
if (stat & FSL_DMA_SR_EOSI) {
dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
- dev_dbg(fsl_chan->dev, "event: clndar 0x%016llx, "
- "nlndar 0x%016llx\n", (u64)get_cdar(fsl_chan),
- (u64)get_ndar(fsl_chan));
+ dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n",
+ (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan));
stat &= ~FSL_DMA_SR_EOSI;
+ fsl_dma_update_completed_cookie(fsl_chan);
}
/* If it current transfer is the end-of-transfer,
fsl_chan_ld_cleanup(fsl_chan);
}
+#ifdef FSL_DMA_CALLBACKTEST
static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan)
{
if (fsl_chan)
dev_info(fsl_chan->dev, "selftest: callback is ok!\n");
}
+#endif
+#ifdef CONFIG_FSL_DMA_SELFTEST
static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
{
struct dma_chan *chan;
tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
async_tx_ack(tx3);
+ /* Interrupt tx test */
+ tx1 = fsl_dma_prep_interrupt(chan);
+ async_tx_ack(tx1);
+ cookie = fsl_dma_tx_submit(tx1);
+
/* Test exchanging the prepared tx sort */
cookie = fsl_dma_tx_submit(tx3);
cookie = fsl_dma_tx_submit(tx2);
if (err) {
for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size);
i++);
- dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%d is "
+ dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%ld is "
"error! src 0x%x, dest 0x%x\n",
- i, test_size, *(src + i), *(dest + i));
+ i, (long)test_size, *(src + i), *(dest + i));
}
free_resources:
kfree(src);
return err;
}
+#endif
static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
const struct of_device_id *match)
static struct of_device_id of_fsl_dma_chan_ids[] = {
{
- .compatible = "fsl,mpc8540-dma-channel",
+ .compatible = "fsl,eloplus-dma-channel",
.data = (void *)&mpc8540_dma_ip_feature,
},
{
- .compatible = "fsl,mpc8349-dma-channel",
+ .compatible = "fsl,elo-dma-channel",
.data = (void *)&mpc8349_dma_ip_feature,
},
{}
}
dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
- "controller at 0x%08x...\n",
- match->compatible, fdev->reg.start);
+ "controller at %p...\n",
+ match->compatible, (void *)fdev->reg.start);
fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
- fdev->reg.start + 1);
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
+ fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
fdev->common.device_is_tx_complete = fsl_dma_is_complete;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
}
static struct of_device_id of_fsl_dma_ids[] = {
- { .compatible = "fsl,mpc8540-dma", },
- { .compatible = "fsl,mpc8349-dma", },
+ { .compatible = "fsl,eloplus-dma", },
+ { .compatible = "fsl,elo-dma", },
{}
};