#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
#define SG_MEMPOOL_SIZE 2
+/*
+ * The maximum number of SG segments that we will put inside a scatterlist
+ * (unless chaining is used). Should ideally fit inside a single page, to
+ * avoid a higher order allocation.
+ */
+#define SCSI_MAX_SG_SEGMENTS 128
+
struct scsi_host_sg_pool {
size_t size;
char *name;
static struct scsi_host_sg_pool scsi_sg_pools[] = {
SP(8),
SP(16),
+#if (SCSI_MAX_SG_SEGMENTS > 16)
SP(32),
+#if (SCSI_MAX_SG_SEGMENTS > 32)
SP(64),
+#if (SCSI_MAX_SG_SEGMENTS > 64)
SP(128),
+#endif
+#endif
+#endif
};
#undef SP
int i, err, nr_vecs = 0;
for_each_sg(sgl, sg, nsegs, i) {
- page = sg->page;
+ page = sg_page(sg);
off = sg->offset;
len = sg->length;
data_len += len;
return NULL;
}
-/*
- * The maximum number of SG segments that we will put inside a scatterlist
- * (unless chaining is used). Should ideally fit inside a single page, to
- * avoid a higher order allocation.
- */
-#define SCSI_MAX_SG_SEGMENTS 128
-
/*
* Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
* is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
case 9 ... 16:
index = 1;
break;
+#if (SCSI_MAX_SG_SEGMENTS > 16)
case 17 ... 32:
index = 2;
break;
+#if (SCSI_MAX_SG_SEGMENTS > 32)
case 33 ... 64:
index = 3;
break;
- case 65 ... SCSI_MAX_SG_SEGMENTS:
+#if (SCSI_MAX_SG_SEGMENTS > 64)
+ case 65 ... 128:
index = 4;
break;
+#endif
+#endif
+#endif
default:
printk(KERN_ERR "scsi: bad segment count=%d\n", nents);
BUG();
if (unlikely(!sgl))
goto enomem;
- memset(sgl, 0, sizeof(*sgl) * sgp->size);
+ sg_init_table(sgl, sgp->size);
/*
* first loop through, set initial index and return value
*/
- if (!ret) {
- cmd->sglist_len = index;
+ if (!ret)
ret = sgl;
- }
/*
* chain previous sglist, if any. we know the previous
if (prev)
sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl);
+ /*
+ * if we have nothing left, mark the last segment as
+ * end-of-list
+ */
+ if (!left)
+ sg_mark_end(&sgl[this - 1]);
+
/*
* don't allow subsequent mempool allocs to sleep, it would
* violate the mempool principle.
struct scatterlist *sgl = cmd->request_buffer;
struct scsi_host_sg_pool *sgp;
- BUG_ON(cmd->sglist_len >= SG_MEMPOOL_NR);
-
/*
* if this is the biggest size sglist, check if we have
* chained parts we need to free
* Restore original, will be freed below
*/
sgl = cmd->request_buffer;
- }
+ sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
+ } else
+ sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg);
- sgp = scsi_sg_pools + cmd->sglist_len;
mempool_free(sgl, sgp->pool);
}
* converted, so better keep it safe.
*/
#ifdef ARCH_HAS_SG_CHAIN
- blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
+ if (shost->use_sg_chaining)
+ blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
+ else
+ blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
#else
blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
#endif
}
EXPORT_SYMBOL(scsi_device_set_state);
+/**
+ * sdev_evt_emit - emit a single SCSI device uevent
+ * @sdev: associated SCSI device
+ * @evt: event to emit
+ *
+ * Send a single uevent (scsi_event) to the associated scsi_device.
+ */
+static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
+{
+ int idx = 0;
+ char *envp[3];
+
+ switch (evt->evt_type) {
+ case SDEV_EVT_MEDIA_CHANGE:
+ envp[idx++] = "SDEV_MEDIA_CHANGE=1";
+ break;
+
+ default:
+ /* do nothing */
+ break;
+ }
+
+ envp[idx++] = NULL;
+
+ kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
+}
+
+/**
+ * sdev_evt_thread - send a uevent for each scsi event
+ * @work: work struct for scsi_device
+ *
+ * Dispatch queued events to their associated scsi_device kobjects
+ * as uevents.
+ */
+void scsi_evt_thread(struct work_struct *work)
+{
+ struct scsi_device *sdev;
+ LIST_HEAD(event_list);
+
+ sdev = container_of(work, struct scsi_device, event_work);
+
+ while (1) {
+ struct scsi_event *evt;
+ struct list_head *this, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdev->list_lock, flags);
+ list_splice_init(&sdev->event_list, &event_list);
+ spin_unlock_irqrestore(&sdev->list_lock, flags);
+
+ if (list_empty(&event_list))
+ break;
+
+ list_for_each_safe(this, tmp, &event_list) {
+ evt = list_entry(this, struct scsi_event, node);
+ list_del(&evt->node);
+ scsi_evt_emit(sdev, evt);
+ kfree(evt);
+ }
+ }
+}
+
+/**
+ * sdev_evt_send - send asserted event to uevent thread
+ * @sdev: scsi_device event occurred on
+ * @evt: event to send
+ *
+ * Assert scsi device event asynchronously.
+ */
+void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
+{
+ unsigned long flags;
+
+ if (!test_bit(evt->evt_type, sdev->supported_events)) {
+ kfree(evt);
+ return;
+ }
+
+ spin_lock_irqsave(&sdev->list_lock, flags);
+ list_add_tail(&evt->node, &sdev->event_list);
+ schedule_work(&sdev->event_work);
+ spin_unlock_irqrestore(&sdev->list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(sdev_evt_send);
+
+/**
+ * sdev_evt_alloc - allocate a new scsi event
+ * @evt_type: type of event to allocate
+ * @gfpflags: GFP flags for allocation
+ *
+ * Allocates and returns a new scsi_event.
+ */
+struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
+ gfp_t gfpflags)
+{
+ struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
+ if (!evt)
+ return NULL;
+
+ evt->evt_type = evt_type;
+ INIT_LIST_HEAD(&evt->node);
+
+ /* evt_type-specific initialization, if any */
+ switch (evt_type) {
+ case SDEV_EVT_MEDIA_CHANGE:
+ default:
+ /* do nothing */
+ break;
+ }
+
+ return evt;
+}
+EXPORT_SYMBOL_GPL(sdev_evt_alloc);
+
+/**
+ * sdev_evt_send_simple - send asserted event to uevent thread
+ * @sdev: scsi_device event occurred on
+ * @evt_type: type of event to send
+ * @gfpflags: GFP flags for allocation
+ *
+ * Assert scsi device event asynchronously, given an event type.
+ */
+void sdev_evt_send_simple(struct scsi_device *sdev,
+ enum scsi_device_event evt_type, gfp_t gfpflags)
+{
+ struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
+ if (!evt) {
+ sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
+ evt_type);
+ return;
+ }
+
+ sdev_evt_send(sdev, evt);
+}
+EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
+
/**
* scsi_device_quiesce - Block user issued commands.
* @sdev: scsi device to quiesce.
*offset = *offset - len_complete + sg->offset;
/* Assumption: contiguous pages can be accessed as "page + i" */
- page = nth_page(sg->page, (*offset >> PAGE_SHIFT));
+ page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
*offset &= ~PAGE_MASK;
/* Bytes in this sg-entry from *offset to the end of the page */