desc->status = 0x0;
}
-static struct sk_buff *sis190_alloc_rx_skb(struct pci_dev *pdev,
- struct RxDesc *desc, u32 rx_buf_sz)
+static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
+ struct RxDesc *desc)
{
+ u32 rx_buf_sz = tp->rx_buf_sz;
struct sk_buff *skb;
- skb = dev_alloc_skb(rx_buf_sz);
+ skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
if (likely(skb)) {
dma_addr_t mapping;
- mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
+ mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
sis190_map_to_asic(desc, mapping, rx_buf_sz);
} else
if (tp->Rx_skbuff[i])
continue;
- tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp->pci_dev,
- tp->RxDescRing + i,
- tp->rx_buf_sz);
+ tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
+
if (!tp->Rx_skbuff[i])
break;
}
return cur - start;
}
-static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
- struct RxDesc *desc, int rx_buf_sz)
+static int sis190_try_rx_copy(struct sis190_private *tp,
+ struct sk_buff **sk_buff, int pkt_size,
+ struct RxDesc *desc)
{
int ret = -1;
if (pkt_size < rx_copybreak) {
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_size + 2);
+ skb = netdev_alloc_skb(tp->dev, pkt_size + 2);
if (skb) {
skb_reserve(skb, 2);
skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
*sk_buff = skb;
- sis190_give_to_asic(desc, rx_buf_sz);
+ sis190_give_to_asic(desc, tp->rx_buf_sz);
ret = 0;
}
}
le32_to_cpu(desc->addr), tp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
- if (sis190_try_rx_copy(&skb, pkt_size, desc,
- tp->rx_buf_sz)) {
+ if (sis190_try_rx_copy(tp, &skb, pkt_size, desc)) {
pci_action = pci_unmap_single;
tp->Rx_skbuff[entry] = NULL;
sis190_make_unusable_by_asic(desc);