void ata_dev_disable(struct ata_device *dev)
{
- if (ata_dev_enabled(dev)) {
+ if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
ata_dev_printk(dev, KERN_WARNING, "disabled\n");
dev->class++;
}
void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep)
{
- VPRINTK("ENTER, ata%u: device %u, wait %u\n",
- ap->id, device, wait);
+ if (ata_msg_probe(ap)) {
+ ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
+ "device %u, wait %u\n",
+ ap->id, device, wait);
+ }
if (wait)
ata_wait_idle(ap);
* Cancel and flush.
*/
if (!cancel_delayed_work(&ap->port_task)) {
- DPRINTK("flush #2\n");
+ if (ata_msg_ctl(ap))
+ ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", __FUNCTION__);
flush_workqueue(ata_wq);
}
ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
spin_unlock_irqrestore(ap->lock, flags);
- DPRINTK("EXIT\n");
+ if (ata_msg_ctl(ap))
+ ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
}
void ata_qc_complete_internal(struct ata_queued_cmd *qc)
else
ata_qc_complete(qc);
- ata_dev_printk(dev, KERN_WARNING,
+ if (ata_msg_warn(ap))
+ ata_dev_printk(dev, KERN_WARNING,
"qc timeout (cmd 0x%x)\n", command);
}
ap->ops->post_internal_cmd(qc);
if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
- ata_dev_printk(dev, KERN_WARNING, "zero err_mask for failed "
+ if (ata_msg_warn(ap))
+ ata_dev_printk(dev, KERN_WARNING,
+ "zero err_mask for failed "
"internal command, assuming AC_ERR_OTHER\n");
qc->err_mask |= AC_ERR_OTHER;
}
const char *reason;
int rc;
- DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
+ if (ata_msg_ctl(ap))
+ ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
+ __FUNCTION__, ap->id, dev->devno);
ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
return 0;
err_out:
- ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
+ if (ata_msg_warn(ap))
+ ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
"(%s, err_mask=0x%x)\n", reason, err_mask);
return rc;
}
unsigned int xfer_mask;
int i, rc;
- if (!ata_dev_enabled(dev)) {
- DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
- ap->id, dev->devno);
+ if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
+ ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
+ __FUNCTION__, ap->id, dev->devno);
return 0;
}
- DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
+ if (ata_msg_probe(ap))
+ ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
+ __FUNCTION__, ap->id, dev->devno);
/* print device capabilities */
- if (print_info)
- ata_dev_printk(dev, KERN_DEBUG, "cfg 49:%04x 82:%04x 83:%04x "
+ if (ata_msg_probe(ap))
+ ata_dev_printk(dev, KERN_DEBUG, "%s: cfg 49:%04x 82:%04x 83:%04x "
"84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
+ __FUNCTION__,
id[49], id[82], id[83], id[84],
id[85], id[86], id[87], id[88]);
/* find max transfer mode; for printk only */
xfer_mask = ata_id_xfermask(id);
- ata_dump_id(id);
+ if (ata_msg_probe(ap))
+ ata_dump_id(id);
/* ATA-specific feature tests */
if (dev->class == ATA_DEV_ATA) {
ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
/* print device info to dmesg */
- if (print_info)
+ if (ata_msg_info(ap))
ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
"max %s, %Lu sectors: %s %s\n",
ata_id_major_version(id),
}
/* print device info to dmesg */
- if (print_info)
+ if (ata_msg_info(ap))
ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
"max %s, %Lu sectors: CHS %u/%u/%u\n",
ata_id_major_version(id),
if (dev->id[59] & 0x100) {
dev->multi_count = dev->id[59] & 0xff;
- DPRINTK("ata%u: dev %u multi count %u\n",
+ if (ata_msg_info(ap))
+ ata_dev_printk(dev, KERN_INFO, "ata%u: dev %u multi count %u\n",
ap->id, dev->devno, dev->multi_count);
}
rc = atapi_cdb_len(id);
if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
- ata_dev_printk(dev, KERN_WARNING,
- "unsupported CDB len\n");
+ if (ata_msg_warn(ap))
+ ata_dev_printk(dev, KERN_WARNING,
+ "unsupported CDB len\n");
rc = -EINVAL;
goto err_out_nosup;
}
}
/* print device info to dmesg */
- if (print_info)
+ if (ata_msg_info(ap))
ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
ata_mode_string(xfer_mask),
cdb_intr_string);
/* limit bridge transfers to udma5, 200 sectors */
if (ata_dev_knobble(dev)) {
- if (print_info)
+ if (ata_msg_info(ap))
ata_dev_printk(dev, KERN_INFO,
"applying bridge limits\n");
dev->udma_mask &= ATA_UDMA5;
if (ap->ops->dev_config)
ap->ops->dev_config(ap, dev);
- DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
+ if (ata_msg_probe(ap))
+ ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
+ __FUNCTION__, ata_chk_status(ap));
return 0;
err_out_nosup:
- DPRINTK("EXIT, err\n");
+ if (ata_msg_probe(ap))
+ ata_dev_printk(dev, KERN_DEBUG,
+ "%s: EXIT, err\n", __FUNCTION__);
return rc;
}
#elif defined(ATA_DEBUG)
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
#else
- ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR;
+ ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif
INIT_WORK(&ap->port_task, NULL, NULL);