M: colin@colino.net
S: Maintained
-ADVANSYS SCSI DRIVER
-P: Bob Frey
-M: linux@advansys.com
-W: http://www.advansys.com/linux.html
-L: linux-scsi@vger.kernel.org
-S: Maintained
-
AEDSP16 DRIVER
P: Riccardo Facchetti
M: fizban@tin.it
W: http://www.debian.org/~dz/i8k/
S: Maintained
+DELL SYSTEMS MANAGEMENT BASE DRIVER (dcdbas)
+P: Doug Warzecha
+M: Douglas_Warzecha@dell.com
+S: Maintained
+
DEVICE-MAPPER
P: Alasdair Kergon
L: dm-devel@redhat.com
W: http://sourceforge.net/projects/emu10k1/
S: Maintained
+ EMULEX LPFC FC SCSI DRIVER
+ P: James Smart
+ M: james.smart@emulex.com
+ L: linux-scsi@vger.kernel.org
+ W: http://sourceforge.net/projects/lpfcxxxx
+ S: Supported
+
EPSON 1355 FRAMEBUFFER DRIVER
P: Christopher Hoover
M: ch@murgatroid.com, ch@hpl.hp.com
FILESYSTEMS (VFS and infrastructure)
P: Alexander Viro
-M: viro@parcelfarce.linux.theplanet.co.uk
+M: viro@zeniv.linux.org.uk
S: Maintained
FIRMWARE LOADER (request_firmware)
W: http://www.kernel.org/pub/linux/utils/net/hdlc/
S: Maintained
+HARDWARE MONITORING
+P: Jean Delvare
+M: khali@linux-fr.org
+L: lm-sensors@lm-sensors.org
+W: http://www.lm-sensors.nu/
+S: Maintained
+
HARMONY SOUND DRIVER
P: Kyle McMartin
M: kyle@parisc-linux.org
L: iss_storagedev@hp.com
S: Supported
+HOST AP DRIVER
+P: Jouni Malinen
+M: jkmaline@cc.hut.fi
+L: hostap@shmoo.com
+W: http://hostap.epitest.fi/
+S: Maintained
+
HP100: Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series
P: Jaroslav Kysela
M: perex@suse.cz
M: wli@holomorphy.com
S: Maintained
-I2C AND SENSORS DRIVERS
+I2C SUBSYSTEM
P: Greg Kroah-Hartman
M: greg@kroah.com
P: Jean Delvare
ROCKETPORT DRIVER
P: Comtrol Corp.
-M: support@comtrol.com
W: http://www.comtrol.com
S: Maintained
W: http://www.simtec.co.uk/products/EB2410ITX/
S: Supported
+SIS 190 ETHERNET DRIVER
+P: Francois Romieu
+M: romieu@fr.zoreil.com
+L: netdev@vger.kernel.org
+S: Maintained
+
SIS 5513 IDE CONTROLLER DRIVER
P: Lionel Bouton
M: Lionel.Bouton@inet6.fr
UCLINUX (AND M68KNOMMU)
P: Greg Ungerer
M: gerg@uclinux.org
-M: gerg@snapgear.com
-P: David McCullough
-M: davidm@snapgear.com
-P: D. Jeff Dionne (created first uClinux port)
-M: jeff@uclinux.org
W: http://www.uclinux.org/
L: uclinux-dev@uclinux.org (subscribers-only)
S: Maintained
* set defaults
*/
q->nr_requests = BLKDEV_MAX_RQ;
- q->max_phys_segments = MAX_PHYS_SEGMENTS;
- q->max_hw_segments = MAX_HW_SEGMENTS;
+ blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
+ blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
q->make_request_fn = mfn;
q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0;
rq->special = NULL;
rq->data_len = 0;
rq->data = NULL;
+ rq->nr_phys_segments = 0;
rq->sense = NULL;
rq->end_io = NULL;
rq->end_io_data = NULL;
/**
* blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
* @q: request queue where request should be inserted
- * @rw: READ or WRITE data
+ * @rq: request structure to fill
* @ubuf: the user buffer
* @len: length of user data
*
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
- struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
- unsigned int len)
+ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
+ unsigned int len)
{
unsigned long uaddr;
- struct request *rq;
struct bio *bio;
+ int reading;
if (len > (q->max_sectors << 9))
- return ERR_PTR(-EINVAL);
- if ((!len && ubuf) || (len && !ubuf))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
+ if (!len || !ubuf)
+ return -EINVAL;
- rq = blk_get_request(q, rw, __GFP_WAIT);
- if (!rq)
- return ERR_PTR(-ENOMEM);
+ reading = rq_data_dir(rq) == READ;
/*
* if alignment requirement is satisfied, map in user pages for
*/
uaddr = (unsigned long) ubuf;
if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
- bio = bio_map_user(q, NULL, uaddr, len, rw == READ);
+ bio = bio_map_user(q, NULL, uaddr, len, reading);
else
- bio = bio_copy_user(q, uaddr, len, rw == READ);
+ bio = bio_copy_user(q, uaddr, len, reading);
if (!IS_ERR(bio)) {
rq->bio = rq->biotail = bio;
rq->buffer = rq->data = NULL;
rq->data_len = len;
- return rq;
+ return 0;
}
/*
* bio is the err-ptr
*/
- blk_put_request(rq);
- return (struct request *) bio;
+ return PTR_ERR(bio);
}
EXPORT_SYMBOL(blk_rq_map_user);
+ /**
+ * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
+ * @q: request queue where request should be inserted
+ * @rq: request to map data to
+ * @iov: pointer to the iovec
+ * @iov_count: number of elements in the iovec
+ *
+ * Description:
+ * Data will be mapped directly for zero copy io, if possible. Otherwise
+ * a kernel bounce buffer is used.
+ *
+ * A matching blk_rq_unmap_user() must be issued at the end of io, while
+ * still in process context.
+ *
+ * Note: The mapped bio may need to be bounced through blk_queue_bounce()
+ * before being submitted to the device, as pages mapped may be out of
+ * reach. It's the callers responsibility to make sure this happens. The
+ * original bio must be passed back in to blk_rq_unmap_user() for proper
+ * unmapping.
+ */
+ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
+ struct sg_iovec *iov, int iov_count)
+ {
+ struct bio *bio;
+
+ if (!iov || iov_count <= 0)
+ return -EINVAL;
+
+ /* we don't allow misaligned data like bio_map_user() does. If the
+ * user is using sg, they're expected to know the alignment constraints
+ * and respect them accordingly */
+ bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ rq->bio = rq->biotail = bio;
+ blk_rq_bio_prep(q, rq, bio);
+ rq->buffer = rq->data = NULL;
+ rq->data_len = bio->bi_size;
+ return 0;
+ }
+
+ EXPORT_SYMBOL(blk_rq_map_user_iov);
+
/**
* blk_rq_unmap_user - unmap a request with user data
- * @rq: request to be unmapped
- * @bio: bio for the request
+ * @bio: bio to be unmapped
* @ulen: length of user buffer
*
* Description:
- * Unmap a request previously mapped by blk_rq_map_user().
+ * Unmap a bio previously mapped by blk_rq_map_user().
*/
- int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
+ int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
{
int ret = 0;
ret = bio_uncopy_user(bio);
}
- blk_put_request(rq);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(blk_rq_unmap_user);
+ /**
+ * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
+ * @q: request queue where request should be inserted
+ * @rq: request to fill
+ * @kbuf: the kernel buffer
+ * @len: length of user data
+ * @gfp_mask: memory allocation flags
+ */
+ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
+ unsigned int len, unsigned int gfp_mask)
+ {
+ struct bio *bio;
+
+ if (len > (q->max_sectors << 9))
+ return -EINVAL;
+ if (!len || !kbuf)
+ return -EINVAL;
+
+ bio = bio_map_kern(q, kbuf, len, gfp_mask);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ if (rq_data_dir(rq) == WRITE)
+ bio->bi_rw |= (1 << BIO_RW);
+
+ rq->bio = rq->biotail = bio;
+ blk_rq_bio_prep(q, rq, bio);
+
+ rq->buffer = rq->data = NULL;
+ rq->data_len = len;
+ return 0;
+ }
+
+ EXPORT_SYMBOL(blk_rq_map_kern);
+
+ /**
+ * blk_execute_rq_nowait - insert a request into queue for execution
+ * @q: queue to insert the request in
+ * @bd_disk: matching gendisk
+ * @rq: request to insert
+ * @at_head: insert request at head or tail of queue
+ * @done: I/O completion handler
+ *
+ * Description:
+ * Insert a fully prepared request at the back of the io scheduler queue
+ * for execution. Don't wait for completion.
+ */
+ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
+ struct request *rq, int at_head,
+ void (*done)(struct request *))
+ {
+ int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+
+ rq->rq_disk = bd_disk;
+ rq->flags |= REQ_NOMERGE;
+ rq->end_io = done;
+ elv_add_request(q, rq, where, 1);
+ generic_unplug_device(q);
+ }
+
/**
* blk_execute_rq - insert a request into queue for execution
* @q: queue to insert the request in
* @bd_disk: matching gendisk
* @rq: request to insert
+ * @at_head: insert request at head or tail of queue
*
* Description:
* Insert a fully prepared request at the back of the io scheduler queue
- * for execution.
+ * for execution and wait for completion.
*/
int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
- struct request *rq)
+ struct request *rq, int at_head)
{
DECLARE_COMPLETION(wait);
char sense[SCSI_SENSE_BUFFERSIZE];
int err = 0;
- rq->rq_disk = bd_disk;
-
/*
* we need an extra reference to the request, so we can look at
* it after io completion
rq->sense_len = 0;
}
- rq->flags |= REQ_NOMERGE;
rq->waiting = &wait;
- rq->end_io = blk_end_sync_rq;
- elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
- generic_unplug_device(q);
+ blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
wait_for_completion(&wait);
rq->waiting = NULL;
EXPORT_SYMBOL(blkdev_issue_flush);
+ /**
+ * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
+ * @q: device queue
+ * @disk: gendisk
+ * @error_sector: error offset
+ *
+ * Description:
+ * Devices understanding the SCSI command set, can use this function as
+ * a helper for issuing a cache flush. Note: driver is required to store
+ * the error offset (in case of error flushing) in ->sector of struct
+ * request.
+ */
+ int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
+ sector_t *error_sector)
+ {
+ struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
+ int ret;
+
+ rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
+ rq->sector = 0;
+ memset(rq->cmd, 0, sizeof(rq->cmd));
+ rq->cmd[0] = 0x35;
+ rq->cmd_len = 12;
+ rq->data = NULL;
+ rq->data_len = 0;
+ rq->timeout = 60 * HZ;
+
+ ret = blk_execute_rq(q, disk, rq, 0);
+
+ if (ret && error_sector)
+ *error_sector = rq->sector;
+
+ blk_put_request(rq);
+ return ret;
+ }
+
+ EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
+
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
{
int rw = rq_data_dir(rq);
menu "SCSI device support"
+ config RAID_ATTRS
+ tristate "RAID Transport Class"
+ default n
+ ---help---
+ Provides RAID
+
config SCSI
tristate "SCSI device support"
---help---
config SCSI_DECSII
tristate "DEC SII Scsi Driver"
- depends on MACH_DECSTATION && SCSI && MIPS32
+ depends on MACH_DECSTATION && SCSI && 32BIT
config BLK_DEV_3W_XXXX_RAID
tristate "3ware 5/6/7/8xxx ATA-RAID support"
If unsure, say N.
+config SCSI_SATA_MV
+ tristate "Marvell SATA support"
+ depends on SCSI_SATA && PCI && EXPERIMENTAL
+ help
+ This option enables support for the Marvell Serial ATA family.
+ Currently supports 88SX[56]0[48][01] chips.
+
+ If unsure, say N.
+
config SCSI_SATA_NV
tristate "NVIDIA SATA support"
depends on SCSI_SATA && PCI && EXPERIMENTAL
obj-$(CONFIG_SCSI) += scsi_mod.o
+ obj-$(CONFIG_RAID_ATTRS) += raid_class.o
+
# --- NOTE ORDERING HERE ---
# For kernel non-modular link, transport attributes need to
# be initialised before drivers
obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o
obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o
obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o
+obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o
obj-$(CONFIG_ARM) += arm/
static int init_timeout = 5;
static int max_requests = 50;
- #define IBMVSCSI_VERSION "1.5.6"
+ #define IBMVSCSI_VERSION "1.5.7"
MODULE_DESCRIPTION("IBM Virtual SCSI");
MODULE_AUTHOR("Dave Boutcher");
sizeof(*evt->xfer_iu) * i;
evt->xfer_iu = pool->iu_storage + i;
evt->hostdata = hostdata;
+ evt->ext_list = NULL;
+ evt->ext_list_token = 0;
}
return 0;
struct ibmvscsi_host_data *hostdata)
{
int i, in_use = 0;
- for (i = 0; i < pool->size; ++i)
+ for (i = 0; i < pool->size; ++i) {
if (atomic_read(&pool->events[i].free) != 1)
++in_use;
+ if (pool->events[i].ext_list) {
+ dma_free_coherent(hostdata->dev,
+ SG_ALL * sizeof(struct memory_descriptor),
+ pool->events[i].ext_list,
+ pool->events[i].ext_list_token);
+ }
+ }
if (in_use)
printk(KERN_WARNING
"ibmvscsi: releasing event pool with %d "
} else {
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
srp_cmd->data_out_format = SRP_INDIRECT_BUFFER;
- srp_cmd->data_out_count = numbuf;
+ srp_cmd->data_out_count =
+ numbuf < MAX_INDIRECT_BUFS ?
+ numbuf: MAX_INDIRECT_BUFS;
} else {
srp_cmd->data_in_format = SRP_INDIRECT_BUFFER;
- srp_cmd->data_in_count = numbuf;
+ srp_cmd->data_in_count =
+ numbuf < MAX_INDIRECT_BUFS ?
+ numbuf: MAX_INDIRECT_BUFS;
}
}
}
+ static void unmap_sg_list(int num_entries,
+ struct device *dev,
+ struct memory_descriptor *md)
+ {
+ int i;
+
+ for (i = 0; i < num_entries; ++i) {
+ dma_unmap_single(dev,
+ md[i].virtual_address,
+ md[i].length, DMA_BIDIRECTIONAL);
+ }
+ }
+
/**
* unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
* @cmd: srp_cmd whose additional_data member will be unmapped
* @dev: device for which the memory is mapped
*
*/
- static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev)
+ static void unmap_cmd_data(struct srp_cmd *cmd,
+ struct srp_event_struct *evt_struct,
+ struct device *dev)
{
- int i;
-
if ((cmd->data_out_format == SRP_NO_BUFFER) &&
(cmd->data_in_format == SRP_NO_BUFFER))
return;
(struct indirect_descriptor *)cmd->additional_data;
int num_mapped = indirect->head.length /
sizeof(indirect->list[0]);
- for (i = 0; i < num_mapped; ++i) {
- struct memory_descriptor *data = &indirect->list[i];
- dma_unmap_single(dev,
- data->virtual_address,
- data->length, DMA_BIDIRECTIONAL);
+
+ if (num_mapped <= MAX_INDIRECT_BUFS) {
+ unmap_sg_list(num_mapped, dev, &indirect->list[0]);
+ return;
}
+
+ unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
}
}
+ static int map_sg_list(int num_entries,
+ struct scatterlist *sg,
+ struct memory_descriptor *md)
+ {
+ int i;
+ u64 total_length = 0;
+
+ for (i = 0; i < num_entries; ++i) {
+ struct memory_descriptor *descr = md + i;
+ struct scatterlist *sg_entry = &sg[i];
+ descr->virtual_address = sg_dma_address(sg_entry);
+ descr->length = sg_dma_len(sg_entry);
+ descr->memory_handle = 0;
+ total_length += sg_dma_len(sg_entry);
+ }
+ return total_length;
+ }
+
/**
* map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
* @cmd: Scsi_Cmnd with the scatterlist
* Returns 1 on success.
*/
static int map_sg_data(struct scsi_cmnd *cmd,
+ struct srp_event_struct *evt_struct,
struct srp_cmd *srp_cmd, struct device *dev)
{
- int i, sg_mapped;
+ int sg_mapped;
u64 total_length = 0;
struct scatterlist *sg = cmd->request_buffer;
struct memory_descriptor *data =
return 1;
}
- if (sg_mapped > MAX_INDIRECT_BUFS) {
+ if (sg_mapped > SG_ALL) {
printk(KERN_ERR
"ibmvscsi: More than %d mapped sg entries, got %d\n",
- MAX_INDIRECT_BUFS, sg_mapped);
+ SG_ALL, sg_mapped);
return 0;
}
indirect->head.virtual_address = 0;
indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
indirect->head.memory_handle = 0;
- for (i = 0; i < sg_mapped; ++i) {
- struct memory_descriptor *descr = &indirect->list[i];
- struct scatterlist *sg_entry = &sg[i];
- descr->virtual_address = sg_dma_address(sg_entry);
- descr->length = sg_dma_len(sg_entry);
- descr->memory_handle = 0;
- total_length += sg_dma_len(sg_entry);
+
+ if (sg_mapped <= MAX_INDIRECT_BUFS) {
+ total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]);
+ indirect->total_length = total_length;
+ return 1;
}
- indirect->total_length = total_length;
- return 1;
+ /* get indirect table */
+ if (!evt_struct->ext_list) {
+ evt_struct->ext_list =(struct memory_descriptor*)
+ dma_alloc_coherent(dev,
+ SG_ALL * sizeof(struct memory_descriptor),
+ &evt_struct->ext_list_token, 0);
+ if (!evt_struct->ext_list) {
+ printk(KERN_ERR
+ "ibmvscsi: Can't allocate memory for indirect table\n");
+ return 0;
+
+ }
+ }
+
+ total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
+
+ indirect->total_length = total_length;
+ indirect->head.virtual_address = evt_struct->ext_list_token;
+ indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
+ memcpy(indirect->list, evt_struct->ext_list,
+ MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor));
+
+ return 1;
}
/**
* Returns 1 on success.
*/
static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
+ struct srp_event_struct *evt_struct,
struct srp_cmd *srp_cmd, struct device *dev)
{
switch (cmd->sc_data_direction) {
if (!cmd->request_buffer)
return 1;
if (cmd->use_sg)
- return map_sg_data(cmd, srp_cmd, dev);
+ return map_sg_data(cmd, evt_struct, srp_cmd, dev);
return map_single_data(cmd, srp_cmd, dev);
}
printk(KERN_WARNING
"ibmvscsi: Warning, request_limit exceeded\n");
unmap_cmd_data(&evt_struct->iu.srp.cmd,
+ evt_struct,
hostdata->dev);
free_event_struct(&hostdata->pool, evt_struct);
return SCSI_MLQUEUE_HOST_BUSY;
return 0;
send_error:
- unmap_cmd_data(&evt_struct->iu.srp.cmd, hostdata->dev);
+ unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
if ((cmnd = evt_struct->cmnd) != NULL) {
cmnd->result = DID_ERROR << 16;
rsp->sense_and_response_data,
rsp->sense_data_list_length);
unmap_cmd_data(&evt_struct->iu.srp.cmd,
+ evt_struct,
evt_struct->hostdata->dev);
if (rsp->doover)
{
struct srp_cmd *srp_cmd;
struct srp_event_struct *evt_struct;
+ struct indirect_descriptor *indirect;
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
u16 lun = lun_from_dev(cmnd->device);
if (!evt_struct)
return SCSI_MLQUEUE_HOST_BUSY;
- init_event_struct(evt_struct,
- handle_cmd_rsp,
- VIOSRP_SRP_FORMAT,
- cmnd->timeout);
-
- evt_struct->cmnd = cmnd;
- evt_struct->cmnd_done = done;
-
/* Set up the actual SRP IU */
srp_cmd = &evt_struct->iu.srp.cmd;
memset(srp_cmd, 0x00, sizeof(*srp_cmd));
memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
srp_cmd->lun = ((u64) lun) << 48;
- if (!map_data_for_srp_cmd(cmnd, srp_cmd, hostdata->dev)) {
+ if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");
free_event_struct(&hostdata->pool, evt_struct);
return SCSI_MLQUEUE_HOST_BUSY;
}
+ init_event_struct(evt_struct,
+ handle_cmd_rsp,
+ VIOSRP_SRP_FORMAT,
+ cmnd->timeout_per_command/HZ);
+
+ evt_struct->cmnd = cmnd;
+ evt_struct->cmnd_done = done;
+
/* Fix up dma address of the buffer itself */
- if ((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) ||
- (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) {
- struct indirect_descriptor *indirect =
- (struct indirect_descriptor *)srp_cmd->additional_data;
+ indirect = (struct indirect_descriptor *)srp_cmd->additional_data;
+ if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) ||
+ (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) &&
+ (indirect->head.virtual_address == 0)) {
indirect->head.virtual_address = evt_struct->crq.IU_data_ptr +
offsetof(struct srp_cmd, additional_data) +
offsetof(struct indirect_descriptor, list);
struct srp_event_struct *tmp_evt, *found_evt;
union viosrp_iu srp_rsp;
int rsp_rc;
+ unsigned long flags;
u16 lun = lun_from_dev(cmd->device);
/* First, find this command in our sent list so we can figure
* out the correct tag
*/
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
found_evt = NULL;
list_for_each_entry(tmp_evt, &hostdata->sent, list) {
if (tmp_evt->cmnd == cmd) {
}
}
- if (!found_evt)
+ if (!found_evt) {
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
return FAILED;
+ }
evt = get_event_struct(&hostdata->pool);
if (evt == NULL) {
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n");
return FAILED;
}
evt->sync_srp = &srp_rsp;
init_completion(&evt->comp);
- if (ibmvscsi_send_srp_event(evt, hostdata) != 0) {
+ rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ if (rsp_rc != 0) {
printk(KERN_ERR "ibmvscsi: failed to send abort() event\n");
return FAILED;
}
* The event is no longer in our list. Make sure it didn't
* complete while we were aborting
*/
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
found_evt = NULL;
list_for_each_entry(tmp_evt, &hostdata->sent, list) {
if (tmp_evt->cmnd == cmd) {
}
if (found_evt == NULL) {
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
printk(KERN_INFO
"ibmvscsi: aborted task tag 0x%lx completed\n",
tsk_mgmt->managed_task_tag);
cmd->result = (DID_ABORT << 16);
list_del(&found_evt->list);
- unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt->hostdata->dev);
+ unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
+ found_evt->hostdata->dev);
free_event_struct(&found_evt->hostdata->pool, found_evt);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
atomic_inc(&hostdata->request_limit);
return SUCCESS;
}
struct srp_event_struct *tmp_evt, *pos;
union viosrp_iu srp_rsp;
int rsp_rc;
+ unsigned long flags;
u16 lun = lun_from_dev(cmd->device);
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
evt = get_event_struct(&hostdata->pool);
if (evt == NULL) {
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n");
return FAILED;
}
evt->sync_srp = &srp_rsp;
init_completion(&evt->comp);
- if (ibmvscsi_send_srp_event(evt, hostdata) != 0) {
+ rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ if (rsp_rc != 0) {
printk(KERN_ERR "ibmvscsi: failed to send reset event\n");
return FAILED;
}
/* We need to find all commands for this LUN that have not yet been
* responded to, and fail them with DID_RESET
*/
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
if (tmp_evt->cmnd)
tmp_evt->cmnd->result = (DID_RESET << 16);
list_del(&tmp_evt->list);
- unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt->hostdata->dev);
+ unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
+ tmp_evt->hostdata->dev);
free_event_struct(&tmp_evt->hostdata->pool,
tmp_evt);
atomic_inc(&hostdata->request_limit);
tmp_evt->done(tmp_evt);
}
}
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
return SUCCESS;
}
if (tmp_evt->cmnd) {
tmp_evt->cmnd->result = (DID_ERROR << 16);
unmap_cmd_data(&tmp_evt->iu.srp.cmd,
+ tmp_evt,
tmp_evt->hostdata->dev);
if (tmp_evt->cmnd_done)
tmp_evt->cmnd_done(tmp_evt->cmnd);
.cmd_per_lun = 16,
.can_queue = 1, /* Updated after SRP_LOGIN */
.this_id = -1,
- .sg_tablesize = MAX_INDIRECT_BUFS,
+ .sg_tablesize = SG_ALL,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = ibmvscsi_attrs,
};
*/
static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
{"vscsi", "IBM,v-scsi"},
- {0,}
+ { "", "" }
};
MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
+ #include <scsi/sg.h> /* for struct sg_iovec */
#define BIO_POOL_SIZE 256
return bvl;
}
-/*
- * default destructor for a bio allocated with bio_alloc_bioset()
- */
-static void bio_destructor(struct bio *bio)
+void bio_free(struct bio *bio, struct bio_set *bio_set)
{
const int pool_idx = BIO_POOL_IDX(bio);
- struct bio_set *bs = bio->bi_set;
BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
- mempool_free(bio->bi_io_vec, bs->bvec_pools[pool_idx]);
- mempool_free(bio, bs->bio_pool);
+ mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
+ mempool_free(bio, bio_set->bio_pool);
+}
+
+/*
+ * default destructor for a bio allocated with bio_alloc_bioset()
+ */
+static void bio_fs_destructor(struct bio *bio)
+{
+ bio_free(bio, fs_bio_set);
}
inline void bio_init(struct bio *bio)
bio->bi_max_vecs = bvec_slabs[idx].nr_vecs;
}
bio->bi_io_vec = bvl;
- bio->bi_destructor = bio_destructor;
- bio->bi_set = bs;
}
out:
return bio;
struct bio *bio_alloc(unsigned int __nocast gfp_mask, int nr_iovecs)
{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
+ struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
+
+ if (bio)
+ bio->bi_destructor = bio_fs_destructor;
+
+ return bio;
}
void zero_fill_bio(struct bio *bio)
{
struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
- if (b)
+ if (b) {
+ b->bi_destructor = bio_fs_destructor;
__bio_clone(b, bio);
+ }
return b;
}
return ERR_PTR(ret);
}
- static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
- unsigned long uaddr, unsigned int len,
- int write_to_vm)
+ static struct bio *__bio_map_user_iov(request_queue_t *q,
+ struct block_device *bdev,
+ struct sg_iovec *iov, int iov_count,
+ int write_to_vm)
{
- unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = uaddr >> PAGE_SHIFT;
- const int nr_pages = end - start;
- int ret, offset, i;
+ int i, j;
+ int nr_pages = 0;
struct page **pages;
struct bio *bio;
+ int cur_page = 0;
+ int ret, offset;
- /*
- * transfer and buffer must be aligned to at least hardsector
- * size for now, in the future we can relax this restriction
- */
- if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
+ for (i = 0; i < iov_count; i++) {
+ unsigned long uaddr = (unsigned long)iov[i].iov_base;
+ unsigned long len = iov[i].iov_len;
+ unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = uaddr >> PAGE_SHIFT;
+
+ nr_pages += end - start;
+ /*
+ * transfer and buffer must be aligned to at least hardsector
+ * size for now, in the future we can relax this restriction
+ */
+ if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!nr_pages)
return ERR_PTR(-EINVAL);
bio = bio_alloc(GFP_KERNEL, nr_pages);
if (!pages)
goto out;
- down_read(¤t->mm->mmap_sem);
- ret = get_user_pages(current, current->mm, uaddr, nr_pages,
- write_to_vm, 0, pages, NULL);
- up_read(¤t->mm->mmap_sem);
-
- if (ret < nr_pages)
- goto out;
-
- bio->bi_bdev = bdev;
-
- offset = uaddr & ~PAGE_MASK;
- for (i = 0; i < nr_pages; i++) {
- unsigned int bytes = PAGE_SIZE - offset;
-
- if (len <= 0)
- break;
-
- if (bytes > len)
- bytes = len;
+ memset(pages, 0, nr_pages * sizeof(struct page *));
+
+ for (i = 0; i < iov_count; i++) {
+ unsigned long uaddr = (unsigned long)iov[i].iov_base;
+ unsigned long len = iov[i].iov_len;
+ unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = uaddr >> PAGE_SHIFT;
+ const int local_nr_pages = end - start;
+ const int page_limit = cur_page + local_nr_pages;
+
+ down_read(¤t->mm->mmap_sem);
+ ret = get_user_pages(current, current->mm, uaddr,
+ local_nr_pages,
+ write_to_vm, 0, &pages[cur_page], NULL);
+ up_read(¤t->mm->mmap_sem);
+
+ if (ret < local_nr_pages)
+ goto out_unmap;
+
+
+ offset = uaddr & ~PAGE_MASK;
+ for (j = cur_page; j < page_limit; j++) {
+ unsigned int bytes = PAGE_SIZE - offset;
+
+ if (len <= 0)
+ break;
+
+ if (bytes > len)
+ bytes = len;
+
+ /*
+ * sorry...
+ */
+ if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes)
+ break;
+
+ len -= bytes;
+ offset = 0;
+ }
+ cur_page = j;
/*
- * sorry...
+ * release the pages we didn't map into the bio, if any
*/
- if (__bio_add_page(q, bio, pages[i], bytes, offset) < bytes)
- break;
-
- len -= bytes;
- offset = 0;
+ while (j < page_limit)
+ page_cache_release(pages[j++]);
}
- /*
- * release the pages we didn't map into the bio, if any
- */
- while (i < nr_pages)
- page_cache_release(pages[i++]);
-
kfree(pages);
/*
if (!write_to_vm)
bio->bi_rw |= (1 << BIO_RW);
+ bio->bi_bdev = bdev;
bio->bi_flags |= (1 << BIO_USER_MAPPED);
return bio;
- out:
+
+ out_unmap:
+ for (i = 0; i < nr_pages; i++) {
+ if(!pages[i])
+ break;
+ page_cache_release(pages[i]);
+ }
+ out:
kfree(pages);
bio_put(bio);
return ERR_PTR(ret);
*/
struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
unsigned long uaddr, unsigned int len, int write_to_vm)
+ {
+ struct sg_iovec iov;
+
+ iov.iov_base = (__user void *)uaddr;
+ iov.iov_len = len;
+
+ return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm);
+ }
+
+ /**
+ * bio_map_user_iov - map user sg_iovec table into bio
+ * @q: the request_queue_t for the bio
+ * @bdev: destination block device
+ * @iov: the iovec.
+ * @iov_count: number of elements in the iovec
+ * @write_to_vm: bool indicating writing to pages or not
+ *
+ * Map the user space address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
+ */
+ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
+ struct sg_iovec *iov, int iov_count,
+ int write_to_vm)
{
struct bio *bio;
+ int len = 0, i;
- bio = __bio_map_user(q, bdev, uaddr, len, write_to_vm);
+ bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
if (IS_ERR(bio))
return bio;
*/
bio_get(bio);
+ for (i = 0; i < iov_count; i++)
+ len += iov[i].iov_len;
+
if (bio->bi_size == len)
return bio;
bio_put(bio);
}
+ static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
+ {
+ if (bio->bi_size)
+ return 1;
+
+ bio_put(bio);
+ return 0;
+ }
+
+
+ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
+ unsigned int len, unsigned int gfp_mask)
+ {
+ unsigned long kaddr = (unsigned long)data;
+ unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = kaddr >> PAGE_SHIFT;
+ const int nr_pages = end - start;
+ int offset, i;
+ struct bio *bio;
+
+ bio = bio_alloc(gfp_mask, nr_pages);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+
+ offset = offset_in_page(kaddr);
+ for (i = 0; i < nr_pages; i++) {
+ unsigned int bytes = PAGE_SIZE - offset;
+
+ if (len <= 0)
+ break;
+
+ if (bytes > len)
+ bytes = len;
+
+ if (__bio_add_page(q, bio, virt_to_page(data), bytes,
+ offset) < bytes)
+ break;
+
+ data += bytes;
+ len -= bytes;
+ offset = 0;
+ }
+
+ bio->bi_end_io = bio_map_kern_endio;
+ return bio;
+ }
+
+ /**
+ * bio_map_kern - map kernel address into bio
+ * @q: the request_queue_t for the bio
+ * @data: pointer to buffer to map
+ * @len: length in bytes
+ * @gfp_mask: allocation flags for bio allocation
+ *
+ * Map the kernel address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
+ */
+ struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,
+ unsigned int gfp_mask)
+ {
+ struct bio *bio;
+
+ bio = __bio_map_kern(q, data, len, gfp_mask);
+ if (IS_ERR(bio))
+ return bio;
+
+ if (bio->bi_size == len)
+ return bio;
+
+ /*
+ * Don't support partial mappings.
+ */
+ bio_put(bio);
+ return ERR_PTR(-EINVAL);
+ }
+
/*
* bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
* for performing direct-IO in BIOs.
EXPORT_SYMBOL(bio_alloc);
EXPORT_SYMBOL(bio_put);
+EXPORT_SYMBOL(bio_free);
EXPORT_SYMBOL(bio_endio);
EXPORT_SYMBOL(bio_init);
EXPORT_SYMBOL(__bio_clone);
EXPORT_SYMBOL(bio_get_nr_vecs);
EXPORT_SYMBOL(bio_map_user);
EXPORT_SYMBOL(bio_unmap_user);
+ EXPORT_SYMBOL(bio_map_kern);
EXPORT_SYMBOL(bio_pair_release);
EXPORT_SYMBOL(bio_split);
EXPORT_SYMBOL(bio_split_pool);
void *bi_private;
bio_destructor_t *bi_destructor; /* destructor */
- struct bio_set *bi_set; /* memory pools set */
};
/*
extern struct bio *bio_alloc(unsigned int __nocast, int);
extern struct bio *bio_alloc_bioset(unsigned int __nocast, int, struct bio_set *);
extern void bio_put(struct bio *);
+extern void bio_free(struct bio *, struct bio_set *);
extern void bio_endio(struct bio *, unsigned int, int);
struct request_queue;
extern int bio_get_nr_vecs(struct block_device *);
extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
unsigned long, unsigned int, int);
+ struct sg_iovec;
+ extern struct bio *bio_map_user_iov(struct request_queue *,
+ struct block_device *,
+ struct sg_iovec *, int, int);
extern void bio_unmap_user(struct bio *);
+ extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
+ unsigned int);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);