page = sg_page(sg);
off = sg->offset;
len = sg->length;
- data_len += len;
while (len > 0 && data_len > 0) {
/*
}
req->buffer = NULL;
- if (blk_pc_request(req))
- sdb->length = req->data_len;
- else
- sdb->length = req->nr_sectors << 9;
/*
* Next, walk the list, and fill in the addresses and sizes of
count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
BUG_ON(count > sdb->table.nents);
sdb->table.nents = count;
+ if (blk_pc_request(req))
+ sdb->length = req->data_len;
+ else
+ sdb->length = req->nr_sectors << 9;
return BLKPREP_OK;
}
request_fn_proc *request_fn)
{
struct request_queue *q;
+ struct device *dev = shost->shost_gendev.parent;
q = blk_init_queue(request_fn, NULL);
if (!q)
blk_queue_max_sectors(q, shost->max_sectors);
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
blk_queue_segment_boundary(q, shost->dma_boundary);
+ dma_set_seg_boundary(dev, shost->dma_boundary);
+
+ blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
if (!shost->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
{
unsigned long flags;
+#if 0
+ /* FIXME: currently this check eliminates all media change events
+ * for polled devices. Need to update to discriminate between AN
+ * and polled events */
if (!test_bit(evt->evt_type, sdev->supported_events)) {
kfree(evt);
return;
}
+#endif
spin_lock_irqsave(&sdev->list_lock, flags);
list_add_tail(&evt->node, &sdev->event_list);