(dma_addr_t)((unsigned long)(v) - \
(unsigned long)((sp)->rx_desc)))
-#define DMA_SYNC_DESC_CPU(dev, addr) \
- do { dma_cache_sync((dev)->dev.parent, (void *)addr, \
- sizeof(struct sgiseeq_rx_desc), DMA_FROM_DEVICE); } while (0)
-
-#define DMA_SYNC_DESC_DEV(dev, addr) \
- do { dma_cache_sync((dev)->dev.parent, (void *)addr, \
- sizeof(struct sgiseeq_rx_desc), DMA_TO_DEVICE); } while (0)
-
/* Copy frames shorter than rx_copybreak, otherwise pass on up in
* a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
*/
spinlock_t tx_lock;
};
+static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
+{
+ dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
+ DMA_FROM_DEVICE);
+}
+
+static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
+{
+ dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
+ DMA_TO_DEVICE);
+}
+
static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
{
hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
/* Setup tx ring. */
for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
- DMA_SYNC_DESC_DEV(dev, &sp->tx_desc[i]);
+ dma_sync_desc_dev(dev, &sp->tx_desc[i]);
}
/* And now the rx ring. */
sp->rx_desc[i].rdma.pbuf = dma_addr;
}
sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
- DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[i]);
+ dma_sync_desc_dev(dev, &sp->rx_desc[i]);
}
sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
- DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[i - 1]);
+ dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
return 0;
}
/* Service every received packet. */
rd = &sp->rx_desc[sp->rx_new];
- DMA_SYNC_DESC_CPU(dev, rd);
+ dma_sync_desc_cpu(dev, rd);
while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
/* Return the entry to the ring pool. */
rd->rdma.cntinfo = RCNTINFO_INIT;
sp->rx_new = NEXT_RX(sp->rx_new);
- DMA_SYNC_DESC_DEV(dev, rd);
+ dma_sync_desc_dev(dev, rd);
rd = &sp->rx_desc[sp->rx_new];
- DMA_SYNC_DESC_CPU(dev, rd);
+ dma_sync_desc_cpu(dev, rd);
}
- DMA_SYNC_DESC_CPU(dev, &sp->rx_desc[orig_end]);
+ dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
- DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[orig_end]);
- DMA_SYNC_DESC_CPU(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
+ dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
+ dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
- DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
+ dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
rx_maybe_restart(sp, hregs, sregs);
}
* is not active!
*/
td = &sp->tx_desc[i];
- DMA_SYNC_DESC_CPU(dev, td);
+ dma_sync_desc_cpu(dev, td);
while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
(HPCDMA_XIU | HPCDMA_ETXD)) {
i = NEXT_TX(i);
td = &sp->tx_desc[i];
- DMA_SYNC_DESC_CPU(dev, td);
+ dma_sync_desc_cpu(dev, td);
}
if (td->tdma.cntinfo & HPCDMA_XIU) {
hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
td = &sp->tx_desc[j];
- DMA_SYNC_DESC_CPU(dev, td);
+ dma_sync_desc_cpu(dev, td);
if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
break;
if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
dev_kfree_skb_any(td->skb);
td->skb = NULL;
}
- DMA_SYNC_DESC_DEV(dev, td);
+ dma_sync_desc_dev(dev, td);
}
}
dev->stats.tx_bytes += len;
entry = sp->tx_new;
td = &sp->tx_desc[entry];
- DMA_SYNC_DESC_CPU(dev, td);
+ dma_sync_desc_cpu(dev, td);
/* Create entry. There are so many races with adding a new
* descriptor to the chain:
len, DMA_TO_DEVICE);
td->tdma.cntinfo = (len & HPCDMA_BCNT) |
HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
- DMA_SYNC_DESC_DEV(dev, td);
+ dma_sync_desc_dev(dev, td);
if (sp->tx_old != sp->tx_new) {
struct sgiseeq_tx_desc *backend;
backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
- DMA_SYNC_DESC_CPU(dev, backend);
+ dma_sync_desc_cpu(dev, backend);
backend->tdma.cntinfo &= ~HPCDMA_EOX;
- DMA_SYNC_DESC_DEV(dev, backend);
+ dma_sync_desc_dev(dev, backend);
}
sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
while (i < (nbufs - 1)) {
buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
buf[i].tdma.pbuf = 0;
- DMA_SYNC_DESC_DEV(dev, &buf[i]);
+ dma_sync_desc_dev(dev, &buf[i]);
i++;
}
buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
- DMA_SYNC_DESC_DEV(dev, &buf[i]);
+ dma_sync_desc_dev(dev, &buf[i]);
}
static inline void setup_rx_ring(struct net_device *dev,
while (i < (nbufs - 1)) {
buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
buf[i].rdma.pbuf = 0;
- DMA_SYNC_DESC_DEV(dev, &buf[i]);
+ dma_sync_desc_dev(dev, &buf[i]);
i++;
}
buf[i].rdma.pbuf = 0;
buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
- DMA_SYNC_DESC_DEV(dev, &buf[i]);
+ dma_sync_desc_dev(dev, &buf[i]);
}
static int __init sgiseeq_probe(struct platform_device *pdev)