WARN_ON(sg == NULL);
if (qc->flags & ATA_QCFLAG_SINGLE)
- WARN_ON(qc->n_elem != 1);
+ WARN_ON(qc->n_elem > 1);
VPRINTK("unmapping %u sg elements\n", qc->n_elem);
kunmap_atomic(addr, KM_IRQ0);
}
} else {
- if (sg_dma_len(&sg[0]) > 0)
+ if (qc->n_elem)
dma_unmap_single(ap->host_set->dev,
sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
dir);
unsigned int idx;
WARN_ON(qc->__sg == NULL);
- WARN_ON(qc->n_elem == 0);
+ WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
idx = 0;
ata_for_each_sg(sg, qc) {
int dir = qc->dma_dir;
struct scatterlist *sg = qc->__sg;
dma_addr_t dma_address;
+ int trim_sg = 0;
/* we must lengthen transfers to end on a 32-bit boundary */
qc->pad_len = sg->length & 3;
sg_dma_len(psg) = ATA_DMA_PAD_SZ;
/* trim sg */
sg->length -= qc->pad_len;
+ if (sg->length == 0)
+ trim_sg = 1;
DPRINTK("padding done, sg->length=%u pad_len=%u\n",
sg->length, qc->pad_len);
}
- if (!sg->length) {
- sg_dma_address(sg) = 0;
+ if (trim_sg) {
+ qc->n_elem--;
goto skip_map;
}
}
sg_dma_address(sg) = dma_address;
-skip_map:
sg_dma_len(sg) = sg->length;
+skip_map:
DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
return 0;
}
+static inline struct scatterlist *
+ata_qc_first_sg(struct ata_queued_cmd *qc)
+{
+ if (qc->n_elem)
+ return qc->__sg;
+ if (qc->pad_len)
+ return &qc->pad_sgent;
+ return NULL;
+}
+
static inline struct scatterlist *
ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
{
return NULL;
if (++sg - qc->__sg < qc->n_elem)
return sg;
- return qc->pad_len ? &qc->pad_sgent : NULL;
+ if (qc->pad_len)
+ return &qc->pad_sgent;
+ return NULL;
}
#define ata_for_each_sg(sg, qc) \
- for (sg = qc->__sg; sg; sg = ata_qc_next_sg(sg, qc))
+ for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc))
static inline unsigned int ata_tag_valid(unsigned int tag)
{