*/
s32 atl1_reset_hw(struct atl1_hw *hw)
{
+ struct pci_dev *pdev = hw->back->pdev;
u32 icr;
int i;
}
if (icr) {
- printk (KERN_DEBUG "icr = %x\n", icr);
+ dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
return icr;
}
*/
static s32 atl1_phy_reset(struct atl1_hw *hw)
{
+ struct pci_dev *pdev = hw->back->pdev;
s32 ret_val;
u16 phy_data;
u32 val;
int i;
/* pcie serdes link may be down! */
- printk(KERN_DEBUG "%s: autoneg caused pcie phy link down\n",
- atl1_driver_name);
+ dev_dbg(&pdev->dev, "pcie phy link down\n");
for (i = 0; i < 25; i++) {
msleep(1);
}
if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
- printk(KERN_WARNING
- "%s: pcie link down at least for 25ms\n",
- atl1_driver_name);
+ dev_warn(&pdev->dev, "pcie link down at least 25ms\n");
return ret_val;
}
}
*/
static s32 atl1_setup_link(struct atl1_hw *hw)
{
+ struct pci_dev *pdev = hw->back->pdev;
s32 ret_val;
/*
*/
ret_val = atl1_phy_setup_autoneg_adv(hw);
if (ret_val) {
- printk(KERN_DEBUG "%s: error setting up autonegotiation\n",
- atl1_driver_name);
+ dev_dbg(&pdev->dev, "error setting up autonegotiation\n");
return ret_val;
}
/* SW.Reset , En-Auto-Neg if needed */
ret_val = atl1_phy_reset(hw);
if (ret_val) {
- printk(KERN_DEBUG "%s: error resetting the phy\n",
- atl1_driver_name);
+ dev_dbg(&pdev->dev, "error resetting phy\n");
return ret_val;
}
hw->phy_configured = true;
*/
s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
{
+ struct pci_dev *pdev = hw->back->pdev;
s32 ret_val;
u16 phy_data;
*speed = SPEED_10;
break;
default:
- printk(KERN_DEBUG "%s: error getting speed\n",
- atl1_driver_name);
+ dev_dbg(&pdev->dev, "error getting speed\n");
return ATL1_ERR_PHY_SPEED;
break;
}
size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
if (unlikely(!tpd_ring->buffer_info)) {
- printk(KERN_WARNING "%s: kzalloc failed , size = D%d\n",
- atl1_driver_name, size);
+ dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size);
goto err_nomem;
}
rfd_ring->buffer_info =
ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
&ring_header->dma);
if (unlikely(!ring_header->desc)) {
- printk(KERN_WARNING
- "%s: pci_alloc_consistent failed, size = D%d\n",
- atl1_driver_name, size);
+ dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
goto err_nomem;
}
if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
ERR_FLAG_CODE | ERR_FLAG_OV)) {
adapter->hw_csum_err++;
- printk(KERN_DEBUG "%s: rx checksum error\n",
- atl1_driver_name);
+ dev_dbg(&adapter->pdev->dev, "rx checksum error\n");
return;
}
}
}
/* IPv4, but hardware thinks its checksum is wrong */
- printk(KERN_DEBUG "%s: hw csum wrong pkt_flag:%x, err_flag:%x\n",
- atl1_driver_name, rrd->pkt_flg, rrd->err_flg);
+ dev_dbg(&adapter->pdev->dev,
+ "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
+ rrd->pkt_flg, rrd->err_flg);
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
adapter->hw_csum_err++;
/* rrd seems to be bad */
if (unlikely(i-- > 0)) {
/* rrd may not be DMAed completely */
- printk(KERN_DEBUG
- "%s: RRD may not be DMAed completely\n",
- atl1_driver_name);
+ dev_dbg(&adapter->pdev->dev,
+ "incomplete RRD DMA transfer\n");
udelay(1);
goto chk_rrd;
}
/* bad rrd */
- printk(KERN_DEBUG "%s: bad RRD\n", atl1_driver_name);
+ dev_dbg(&adapter->pdev->dev, "bad RRD\n");
/* see if update RFD index */
if (rrd->num_buf > 1) {
u16 num_buf;
/* notify upper layer link down ASAP */
if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */
if (netif_carrier_ok(netdev)) { /* old link state: Up */
- printk(KERN_INFO "%s: %s link is down\n",
- atl1_driver_name, netdev->name);
+ dev_info(&adapter->pdev->dev, "%s link is down\n",
+ netdev->name);
adapter->link_speed = SPEED_0;
netif_carrier_off(netdev);
netif_stop_queue(netdev);
/* check if PCIE PHY Link down */
if (status & ISR_PHY_LINKDOWN) {
- printk(KERN_DEBUG "%s: pcie phy link down %x\n",
- atl1_driver_name, status);
+ dev_dbg(&adapter->pdev->dev, "pcie phy link down %x\n",
+ status);
if (netif_running(adapter->netdev)) { /* reset MAC */
iowrite32(0, adapter->hw.hw_addr + REG_IMR);
schedule_work(&adapter->pcie_dma_to_rst_task);
/* check if DMA read/write error ? */
if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
- printk(KERN_DEBUG
- "%s: pcie DMA r/w error (status = 0x%x)\n",
- atl1_driver_name, status);
+ dev_dbg(&adapter->pdev->dev,
+ "pcie DMA r/w error (status = 0x%x)\n",
+ status);
iowrite32(0, adapter->hw.hw_addr + REG_IMR);
schedule_work(&adapter->pcie_dma_to_rst_task);
return IRQ_HANDLED;
/* rx exception */
if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
+ ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
+ ISR_HOST_RRD_OV | ISR_CMB_RX))) {
+ if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
- ISR_HOST_RRD_OV | ISR_CMB_RX))) {
- if (status &
- (ISR_RXF_OV | ISR_RFD_UNRUN | ISR_RRD_OV |
- ISR_HOST_RFD_UNRUN | ISR_HOST_RRD_OV))
- printk(KERN_INFO
- "%s: rx exception: status = 0x%x\n",
- atl1_driver_name, status);
+ ISR_HOST_RRD_OV))
+ dev_dbg(&adapter->pdev->dev,
+ "rx exception, ISR = 0x%x\n", status);
atl1_intr_rx(adapter);
}
atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
if (!(phy_data & BMSR_LSTATUS)) { /* link down */
if (netif_carrier_ok(netdev)) { /* old link state: Up */
- printk(KERN_INFO "%s: link is down\n",
- atl1_driver_name);
+ dev_info(&adapter->pdev->dev, "link is down\n");
adapter->link_speed = SPEED_0;
netif_carrier_off(netdev);
netif_stop_queue(netdev);
adapter->link_speed = speed;
adapter->link_duplex = duplex;
atl1_setup_mac_ctrl(adapter);
- printk(KERN_INFO "%s: %s link is up %d Mbps %s\n",
- atl1_driver_name, netdev->name,
- adapter->link_speed,
- adapter->link_duplex ==
- FULL_DUPLEX ? "full duplex" : "half duplex");
+ dev_info(&adapter->pdev->dev,
+ "%s link is up %d Mbps %s\n",
+ netdev->name, adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "full duplex" : "half duplex");
}
if (!netif_carrier_ok(netdev)) { /* Link down -> Up */
netif_carrier_on(netdev);
cso = skb_transport_offset(skb);
css = cso + skb->csum_offset;
if (unlikely(cso & 0x1)) {
- printk(KERN_DEBUG "%s: payload offset != even number\n",
- atl1_driver_name);
+ dev_dbg(&adapter->pdev->dev,
+ "payload offset not an even number\n");
return -1;
}
csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) <<
if (!spin_trylock(&adapter->lock)) {
/* Can't get lock - tell upper layer to requeue */
local_irq_restore(flags);
- printk(KERN_DEBUG "%s: TX locked\n", atl1_driver_name);
+ dev_dbg(&adapter->pdev->dev, "tx locked\n");
return NETDEV_TX_LOCKED;
}
/* not enough descriptors */
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->lock, flags);
- printk(KERN_DEBUG "%s: TX busy\n", atl1_driver_name);
+ dev_dbg(&adapter->pdev->dev, "tx busy\n");
return NETDEV_TX_BUSY;
}
if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
- printk(KERN_WARNING "%s: invalid MTU setting\n",
- atl1_driver_name);
+ dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
return -EINVAL;
}
if (err) {
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (err) {
- printk(KERN_DEBUG
- "%s: no usable DMA configuration, aborting\n",
- atl1_driver_name);
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
goto err_dma;
}
pci_using_64 = false;
goto err_pci_iomap;
}
/* get device revision number */
- adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + (REG_MASTER_CTRL + 2));
+ adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
+ (REG_MASTER_CTRL + 2));
+ dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
/* set default ring resource counts */
adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
*/
static int __init atl1_init_module(void)
{
- printk(KERN_INFO "%s - version %s\n", atl1_driver_string, DRIVER_VERSION);
- printk(KERN_INFO "%s\n", atl1_copyright);
return pci_register_driver(&atl1_driver);
}
} arg;
};
-static int __devinit atl1_validate_option(int *value, struct atl1_option *opt)
+static int __devinit atl1_validate_option(int *value, struct atl1_option *opt, struct pci_dev *pdev)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- printk(KERN_INFO "%s: %s Enabled\n", atl1_driver_name,
- opt->name);
+ dev_info(&pdev->dev, "%s enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
- printk(KERN_INFO "%s: %s Disabled\n", atl1_driver_name,
- opt->name);
+ dev_info(&pdev->dev, "%s disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- printk(KERN_INFO "%s: %s set to %i\n",
- atl1_driver_name, opt->name, *value);
+ dev_info(&pdev->dev, "%s set to %i\n", opt->name,
+ *value);
return 0;
}
break;
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- printk(KERN_INFO "%s: %s\n",
- atl1_driver_name, ent->str);
+ dev_info(&pdev->dev, "%s\n",
+ ent->str);
return 0;
}
}
break;
}
- printk(KERN_INFO "%s: invalid %s specified (%i) %s\n",
- atl1_driver_name, opt->name, *value, opt->err);
+ dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
+ opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
*/
void __devinit atl1_check_options(struct atl1_adapter *adapter)
{
+ struct pci_dev *pdev = adapter->pdev;
int bd = adapter->bd_number;
if (bd >= ATL1_MAX_NIC) {
- printk(KERN_NOTICE "%s: warning: no configuration for board #%i\n",
- atl1_driver_name, bd);
- printk(KERN_NOTICE "%s: using defaults for all values\n",
- atl1_driver_name);
+ dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
+ dev_notice(&pdev->dev, "using defaults for all values\n");
}
{ /* Interrupt Moderate Timer */
struct atl1_option opt = {
int val;
if (num_int_mod_timer > bd) {
val = int_mod_timer[bd];
- atl1_validate_option(&val, &opt);
+ atl1_validate_option(&val, &opt, pdev);
adapter->imt = (u16) val;
} else
adapter->imt = (u16) (opt.def);
int val;
if (num_flash_vendor > bd) {
val = flash_vendor[bd];
- atl1_validate_option(&val, &opt);
+ atl1_validate_option(&val, &opt, pdev);
adapter->hw.flash_vendor = (u8) val;
} else
adapter->hw.flash_vendor = (u8) (opt.def);