# HG changeset patch
# User fujita.tomonori@xxxxxxxxxxxxx
# Node ID 3749a0e2580a668bfb029fb0348e9f89660becd6
# Parent 840f33e54054270e3f4b9704111ed52bd381653b
Add scsi target patch
This patch includes the SCSI target framework (tgt) that adds target
driver support in Linux SCSI mid layer. This is a modified version of
the version included in the -mm tree.
diff -r 840f33e54054 -r 3749a0e2580a patches/linux-2.6.16.13/tgt.patch
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/patches/linux-2.6.16.13/tgt.patch Wed Aug 02 15:03:05 2006 +0900
@@ -0,0 +1,3253 @@
+Subject: [PATCH] scsi target: add target support and IBM VIO driver
+
+---
+
+ block/ll_rw_blk.c | 40 +-
+ block/scsi_ioctl.c | 3
+ drivers/scsi/Kconfig | 30 +
+ drivers/scsi/Makefile | 5
+ drivers/scsi/hosts.c | 5
+ drivers/scsi/ibmvscsi/Makefile | 2
+ drivers/scsi/ibmvscsi/ibmvstgt.c | 943 ++++++++++++++++++++++++++++++++++++++
+ drivers/scsi/libsrp.c | 450 ++++++++++++++++++
+ drivers/scsi/scsi.c | 43 +-
+ drivers/scsi/scsi_lib.c | 33 +
+ drivers/scsi/scsi_tgt_if.c | 316 +++++++++++++
+ drivers/scsi/scsi_tgt_lib.c | 707 ++++++++++++++++++++++++++++
+ drivers/scsi/scsi_tgt_priv.h | 24 +
+ fs/bio.c | 19 -
+ include/linux/blkdev.h | 3
+ include/scsi/libsrp.h | 75 +++
+ include/scsi/scsi_cmnd.h | 8
+ include/scsi/scsi_host.h | 43 ++
+ include/scsi/scsi_tgt.h | 17 +
+ include/scsi/scsi_tgt_if.h | 91 ++++
+ 20 files changed, 2793 insertions(+), 64 deletions(-)
+ create mode 100644 drivers/scsi/ibmvscsi/ibmvstgt.c
+ create mode 100644 drivers/scsi/libsrp.c
+ create mode 100644 drivers/scsi/scsi_tgt_if.c
+ create mode 100644 drivers/scsi/scsi_tgt_lib.c
+ create mode 100644 drivers/scsi/scsi_tgt_priv.h
+ create mode 100644 include/scsi/libsrp.h
+ create mode 100644 include/scsi/scsi_tgt.h
+ create mode 100644 include/scsi/scsi_tgt_if.h
+
+f21c20da255c84fe2072df460048f379af3eeb29
+diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
+index 7eb36c5..4fe85d7 100644
+--- a/block/ll_rw_blk.c
++++ b/block/ll_rw_blk.c
+@@ -2351,19 +2351,20 @@ int blk_rq_map_user(request_queue_t *q,
+ else
+ bio = bio_copy_user(q, uaddr, len, reading);
+
+- if (!IS_ERR(bio)) {
+- rq->bio = rq->biotail = bio;
+- blk_rq_bio_prep(q, rq, bio);
++ if (IS_ERR(bio))
++ return PTR_ERR(bio);
+
+- rq->buffer = rq->data = NULL;
+- rq->data_len = len;
+- return 0;
++ if (bio->bi_size != len) {
++ bio_endio(bio, bio->bi_size, 0);
++ bio_unmap_user(bio);
++ return -EINVAL;
+ }
+
+- /*
+- * bio is the err-ptr
+- */
+- return PTR_ERR(bio);
++ rq->bio = rq->biotail = bio;
++ blk_rq_bio_prep(q, rq, bio);
++ rq->buffer = rq->data = NULL;
++ rq->data_len = len;
++ return 0;
+ }
+
+ EXPORT_SYMBOL(blk_rq_map_user);
+@@ -2389,7 +2390,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
+ * unmapping.
+ */
+ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
+- struct sg_iovec *iov, int iov_count)
++ struct sg_iovec *iov, int iov_count, unsigned int len)
+ {
+ struct bio *bio;
+
+@@ -2403,6 +2404,12 @@ int blk_rq_map_user_iov(request_queue_t
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
++ if (bio->bi_size != len) {
++ bio_endio(bio, bio->bi_size, 0);
++ bio_unmap_user(bio);
++ return -EINVAL;
++ }
++
+ rq->bio = rq->biotail = bio;
+ blk_rq_bio_prep(q, rq, bio);
+ rq->buffer = rq->data = NULL;
+@@ -2826,16 +2833,12 @@ static void init_request_from_bio(struct
+
+ req->errors = 0;
+ req->hard_sector = req->sector = bio->bi_sector;
+- req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
+- req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
+- req->nr_phys_segments = bio_phys_segments(req->q, bio);
+- req->nr_hw_segments = bio_hw_segments(req->q, bio);
+- req->buffer = bio_data(bio); /* see ->buffer comment above */
+ req->waiting = NULL;
+- req->bio = req->biotail = bio;
+ req->ioprio = bio_prio(bio);
+ req->rq_disk = bio->bi_bdev->bd_disk;
+ req->start_time = jiffies;
++
++ blk_rq_bio_prep(req->q, req, bio);
+ }
+
+ static int __make_request(request_queue_t *q, struct bio *bio)
+@@ -3487,9 +3490,6 @@ EXPORT_SYMBOL(end_request);
+
+ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
+ {
+- /* first three bits are identical in rq->flags and bio->bi_rw */
+- rq->flags |= (bio->bi_rw & 7);
+-
+ rq->nr_phys_segments = bio_phys_segments(q, bio);
+ rq->nr_hw_segments = bio_hw_segments(q, bio);
+ rq->current_nr_sectors = bio_cur_sectors(bio);
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index b33eda2..b77e185 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -274,7 +274,8 @@ static int sg_io(struct file *file, requ
+ goto out;
+ }
+
+- ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);
++ ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
++ hdr->dxfer_len);
+ kfree(iov);
+ } else if (hdr->dxfer_len)
+ ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
+diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
+index a480a37..82234ec 100644
+--- a/drivers/scsi/Kconfig
++++ b/drivers/scsi/Kconfig
+@@ -27,6 +27,13 @@ config SCSI
+ However, do not compile this as a module if your root file system
+ (the one containing the directory /) is located on a SCSI device.
+
++config SCSI_TGT
++ tristate "SCSI target support"
++ depends on SCSI && NET && EXPERIMENTAL
++ ---help---
++ If you want to use SCSI target mode drivers enable this option.
++ If you choose M, the module will be called scsi_tgt.
++
+ config SCSI_PROC_FS
+ bool "legacy /proc/scsi/ support"
+ depends on SCSI && PROC_FS
+@@ -900,6 +907,20 @@ config SCSI_IBMVSCSI
+ To compile this driver as a module, choose M here: the
+ module will be called ibmvscsic.
+
++config SCSI_IBMVSCSIS
++ tristate "IBM Virtual SCSI Server support"
++ depends on PPC_PSERIES && SCSI_TGT && SCSI_SRP
++ help
++ This is the SRP target driver for IBM pSeries virtual environments.
++
++ The userspace component needed to initialize the driver and
++ documentation can be found:
++
++ http://stgt.berlios.de/
++
++ To compile this driver as a module, choose M here: the
++ module will be called ibmvstgt.
++
+ config SCSI_INITIO
+ tristate "Initio 9100U(W) support"
+ depends on PCI && SCSI
+@@ -1829,6 +1850,15 @@ config ZFCP
+ called zfcp. If you want to compile it as a module, say M here
+ and read <file:Documentation/modules.txt>.
+
++config SCSI_SRP
++ tristate "SCSI RDMA Protocol helper library"
++ depends on SCSI
++ help
++ If you wish to use SRP target drivers, say Y.
++
++ To compile this driver as a module, choose M here: the
++ module will be called libsrp.
++
+ endmenu
+
+ source "drivers/scsi/pcmcia/Kconfig"
+diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
+index 81803a1..b4eb854 100644
+--- a/drivers/scsi/Makefile
++++ b/drivers/scsi/Makefile
+@@ -21,6 +21,7 @@ CFLAGS_seagate.o = -DARBITRATE -DPARIT
+ subdir-$(CONFIG_PCMCIA) += pcmcia
+
+ obj-$(CONFIG_SCSI) += scsi_mod.o
++obj-$(CONFIG_SCSI_TGT) += scsi_tgt.o
+
+ obj-$(CONFIG_RAID_ATTRS) += raid_class.o
+
+@@ -120,7 +121,9 @@ obj-$(CONFIG_SCSI_FCAL) += fcal.o
+ obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
+ obj-$(CONFIG_SCSI_NSP32) += nsp32.o
+ obj-$(CONFIG_SCSI_IPR) += ipr.o
++obj-$(CONFIG_SCSI_SRP) += libsrp.o
+ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
++obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
+ obj-$(CONFIG_SCSI_SATA_AHCI) += libata.o ahci.o
+ obj-$(CONFIG_SCSI_SATA_SVW) += libata.o sata_svw.o
+ obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o
+@@ -156,6 +159,8 @@ scsi_mod-y += scsi.o hosts.o scsi_ioct
+ scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
+ scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
+
++scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
++
+ sd_mod-objs := sd.o
+ sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
+ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index dfcb96f..f8cce09 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -264,6 +264,11 @@ static void scsi_host_dev_release(struct
+ if (shost->work_q)
+ destroy_workqueue(shost->work_q);
+
++ if (shost->uspace_req_q) {
++ kfree(shost->uspace_req_q->queuedata);
++ scsi_free_queue(shost->uspace_req_q);
++ }
++
+ scsi_destroy_command_freelist(shost);
+ kfree(shost->shost_data);
+
+diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
+index 4e247b6..6ac0633 100644
+--- a/drivers/scsi/ibmvscsi/Makefile
++++ b/drivers/scsi/ibmvscsi/Makefile
+@@ -3,3 +3,5 @@ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic
+ ibmvscsic-y += ibmvscsi.o
+ ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o
+ ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
++
++obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
+diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c
b/drivers/scsi/ibmvscsi/ibmvstgt.c
+new file mode 100644
+index 0000000..cf1e851
+--- /dev/null
++++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
+@@ -0,0 +1,943 @@
++/*
++ * IBM eServer i/pSeries Virtual SCSI Target Driver
++ * Copyright (C) 2003-2005 Dave Boutcher (boutcher@xxxxxxxxxx) IBM Corp.
++ * Santiago Leon (santil@xxxxxxxxxx) IBM Corp.
++ * Linda Xie (lxie@xxxxxxxxxx) IBM Corp.
++ *
++ * Copyright (C) 2005-2006 FUJITA Tomonori <tomof@xxxxxxx>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
++ * USA
++ */
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_tgt.h>
++#include <scsi/libsrp.h>
++#include <asm/hvcall.h>
++#include <asm/iommu.h>
++#include <asm/prom.h>
++#include <asm/vio.h>
++
++#include "ibmvscsi.h"
++
++#define INITIAL_SRP_LIMIT 16
++#define DEFAULT_MAX_SECTORS 512
++
++#define TGT_NAME "ibmvstgt"
++
++/*
++ * Hypervisor calls.
++ */
++#define h_copy_rdma(l, sa, sb, da, db) \
++ plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
++#define h_send_crq(ua, l, h) \
++ plpar_hcall_norets(H_SEND_CRQ, ua, l, h)
++#define h_reg_crq(ua, tok, sz)\
++ plpar_hcall_norets(H_REG_CRQ, ua, tok, sz);
++#define h_free_crq(ua) \
++ plpar_hcall_norets(H_FREE_CRQ, ua);
++
++/* tmp - will replace with SCSI logging stuff */
++#define eprintk(fmt, args...) \
++do { \
++ printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
++} while (0)
++/* #define dprintk eprintk */
++#define dprintk(fmt, args...)
++
++struct vio_port {
++ struct vio_dev *dma_dev;
++
++ struct crq_queue crq_queue;
++ struct work_struct crq_work;
++
++ unsigned long liobn;
++ unsigned long riobn;
++};
++
++static struct workqueue_struct *vtgtd;
++
++/*
++ * These are fixed for the system and come from the Open Firmware device tree.
++ * We just store them here to save getting them every time.
++ */
++static char system_id[64] = "";
++static char partition_name[97] = "UNKNOWN";
++static unsigned int partition_number = -1;
++
++static struct vio_port *target_to_port(struct srp_target *target)
++{
++ return (struct vio_port *) target->ldata;
++}
++
++static inline union viosrp_iu *vio_iu(struct iu_entry *iue)
++{
++ return (union viosrp_iu *) (iue->sbuf->buf);
++}
++
++static int send_iu(struct iu_entry *iue, uint64_t length, uint8_t format)
++{
++ struct srp_target *target = iue->target;
++ struct vio_port *vport = target_to_port(target);
++ long rc, rc1;
++ union {
++ struct viosrp_crq cooked;
++ uint64_t raw[2];
++ } crq;
++
++ /* First copy the SRP */
++ rc = h_copy_rdma(length, vport->liobn, iue->sbuf->dma,
++ vport->riobn, iue->remote_token);
++
++ if (rc)
++ eprintk("Error %ld transferring data\n", rc);
++
++ crq.cooked.valid = 0x80;
++ crq.cooked.format = format;
++ crq.cooked.reserved = 0x00;
++ crq.cooked.timeout = 0x00;
++ crq.cooked.IU_length = length;
++ crq.cooked.IU_data_ptr = vio_iu(iue)->srp.rsp.tag;
++
++ if (rc == 0)
++ crq.cooked.status = 0x99; /* Just needs to be non-zero */
++ else
++ crq.cooked.status = 0x00;
++
++ rc1 = h_send_crq(vport->dma_dev->unit_address, crq.raw[0], crq.raw[1]);
++
++ if (rc1) {
++ eprintk("%ld sending response\n", rc1);
++ return rc1;
++ }
++
++ return rc;
++}
++
++#define SRP_RSP_SENSE_DATA_LEN 18
++
++static int send_rsp(struct iu_entry *iue, unsigned char status,
++ unsigned char asc)
++{
++ union viosrp_iu *iu = vio_iu(iue);
++ uint64_t tag = iu->srp.rsp.tag;
++
++ /* If the linked bit is on and status is good */
++ if (test_bit(V_LINKED, &iue->flags) && (status == NO_SENSE))
++ status = 0x10;
++
++ memset(iu, 0, sizeof(struct srp_rsp));
++ iu->srp.rsp.opcode = SRP_RSP;
++ iu->srp.rsp.req_lim_delta = 1;
++ iu->srp.rsp.tag = tag;
++
++ if (test_bit(V_DIOVER, &iue->flags))
++ iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER;
++
++ iu->srp.rsp.data_in_res_cnt = 0;
++ iu->srp.rsp.data_out_res_cnt = 0;
++
++ iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID;
++
++ iu->srp.rsp.resp_data_len = 0;
++ iu->srp.rsp.status = status;
++ if (status) {
++ uint8_t *sense = iu->srp.rsp.data;
++
++ if (iue->scmd) {
++ iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
++ iu->srp.rsp.sense_data_len = SCSI_SENSE_BUFFERSIZE;
++ memcpy(sense, iue->scmd->sense_buffer,
++ SCSI_SENSE_BUFFERSIZE);
++ } else {
++ iu->srp.rsp.status = SAM_STAT_CHECK_CONDITION;
++ iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
++ iu->srp.rsp.sense_data_len = SRP_RSP_SENSE_DATA_LEN;
++
++ /* Valid bit and 'current errors' */
++ sense[0] = (0x1 << 7 | 0x70);
++ /* Sense key */
++ sense[2] = status;
++ /* Additional sense length */
++ sense[7] = 0xa; /* 10 bytes */
++ /* Additional sense code */
++ sense[12] = asc;
++ }
++ }
++
++ send_iu(iue, sizeof(iu->srp.rsp) + SRP_RSP_SENSE_DATA_LEN,
++ VIOSRP_SRP_FORMAT);
++
++ return 0;
++}
++
++static void handle_cmd_queue(struct srp_target *target)
++{
++ struct iu_entry *iue;
++ unsigned long flags;
++
++retry:
++ spin_lock_irqsave(&target->lock, flags);
++
++ list_for_each_entry(iue, &target->cmd_queue, ilist) {
++ if (!test_and_set_bit(V_FLYING, &iue->flags)) {
++ spin_unlock_irqrestore(&target->lock, flags);
++ srp_cmd_perform(iue, (struct srp_cmd *) iue->sbuf->buf);
++ goto retry;
++ }
++ }
++
++ spin_unlock_irqrestore(&target->lock, flags);
++}
++
++static int ibmvstgt_rdma(struct iu_entry *iue, struct scatterlist *sg, int
nsg,
++ struct srp_direct_buf *md, int nmd,
++ enum dma_data_direction dir, unsigned int rest)
++{
++ struct srp_target *target = iue->target;
++ struct vio_port *vport = target_to_port(target);
++ dma_addr_t token;
++ long err;
++ unsigned int done = 0;
++ int i, sidx, soff;
++
++ sidx = soff = 0;
++ token = sg_dma_address(sg + sidx);
++
++ for (i = 0; i < nmd && rest; i++) {
++ unsigned int mdone, mlen;
++
++ mlen = min(rest, md[i].len);
++ for (mdone = 0; mlen;) {
++ int slen = min(sg_dma_len(sg + sidx) - soff, mlen);
++
++ if (dir == DMA_TO_DEVICE)
++ err = h_copy_rdma(slen,
++ vport->riobn,
++ md[i].va + mdone,
++ vport->liobn,
++ token + soff);
++ else
++ err = h_copy_rdma(slen,
++ vport->liobn,
++ token + soff,
++ vport->riobn,
++ md[i].va + mdone);
++
++ if (err != H_SUCCESS) {
++ eprintk("rdma error %d %d\n", dir, slen);
++ goto out;
++ }
++
++ mlen -= slen;
++ mdone += slen;
++ soff += slen;
++ done += slen;
++
++ if (soff == sg_dma_len(sg + sidx)) {
++ sidx++;
++ soff = 0;
++ token = sg_dma_address(sg + sidx);
++
++ if (sidx > nsg) {
++ eprintk("out of sg %p %d %d\n",
++ iue, sidx, nsg);
++ goto out;
++ }
++ }
++ };
++
++ rest -= mlen;
++ }
++out:
++
++ return 0;
++}
++
++static int ibmvstgt_transfer_data(struct scsi_cmnd *scmd,
++ void (*done)(struct scsi_cmnd *))
++{
++ struct iu_entry *iue = (struct iu_entry *) scmd->SCp.ptr;
++ int err;
++
++ err = srp_transfer_data(scmd, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma);
++ done(scmd);
++
++ return err;
++}
++
++static int ibmvstgt_cmd_done(struct scsi_cmnd *scmd,
++ void (*done)(struct scsi_cmnd *))
++{
++ unsigned long flags;
++ struct iu_entry *iue = (struct iu_entry *) scmd->SCp.ptr;
++ struct srp_target *target = iue->target;
++
++ dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
++
++ spin_lock_irqsave(&target->lock, flags);
++ list_del(&iue->ilist);
++ spin_unlock_irqrestore(&target->lock, flags);
++
++ if (scmd->result != SAM_STAT_GOOD) {
++ eprintk("operation failed %p %d %x\n",
++ iue, scmd->result, vio_iu(iue)->srp.cmd.cdb[0]);
++ send_rsp(iue, HARDWARE_ERROR, 0x00);
++ } else
++ send_rsp(iue, NO_SENSE, 0x00);
++
++ done(scmd);
++ srp_iu_put(iue);
++ return 0;
++}
++
++int send_adapter_info(struct iu_entry *iue,
++ dma_addr_t remote_buffer, uint16_t length)
++{
++ struct srp_target *target = iue->target;
++ struct vio_port *vport = target_to_port(target);
++ struct Scsi_Host *shost = target->shost;
++ dma_addr_t data_token;
++ struct mad_adapter_info_data *info;
++ int err;
++
++ info = dma_alloc_coherent(target->dev, sizeof(*info), &data_token,
++ GFP_KERNEL);
++ if (!info) {
++ eprintk("bad dma_alloc_coherent %p\n", target);
++ return 1;
++ }
++
++ /* Get remote info */
++ err = h_copy_rdma(sizeof(*info), vport->riobn, remote_buffer,
++ vport->liobn, data_token);
++ if (err == H_SUCCESS) {
++ dprintk("Client connect: %s (%d)\n",
++ info->partition_name, info->partition_number);
++ }
++
++ memset(info, 0, sizeof(*info));
++
++ strcpy(info->srp_version, "16.a");
++ strncpy(info->partition_name, partition_name,
++ sizeof(info->partition_name));
++ info->partition_number = partition_number;
++ info->mad_version = 1;
++ info->os_type = 2;
++ info->port_max_txu[0] = shost->hostt->max_sectors << 9;
++
++ /* Send our info to remote */
++ err = h_copy_rdma(sizeof(*info), vport->liobn, data_token,
++ vport->riobn, remote_buffer);
++
++ dma_free_coherent(target->dev, sizeof(*info), info, data_token);
++
++ if (err != H_SUCCESS) {
++ eprintk("Error sending adapter info %d\n", err);
++ return 1;
++ }
++
++ return 0;
++}
++
++static void process_login(struct iu_entry *iue)
++{
++ union viosrp_iu *iu = vio_iu(iue);
++ struct srp_login_rsp *rsp = &iu->srp.login_rsp;
++ uint64_t tag = iu->srp.rsp.tag;
++
++ /* TODO handle case that requested size is wrong and
++ * buffer format is wrong
++ */
++ memset(iu, 0, sizeof(struct srp_login_rsp));
++ rsp->opcode = SRP_LOGIN_RSP;
++ rsp->req_lim_delta = INITIAL_SRP_LIMIT;
++ rsp->tag = tag;
++ rsp->max_it_iu_len = sizeof(union srp_iu);
++ rsp->max_ti_iu_len = sizeof(union srp_iu);
++ /* direct and indirect */
++ rsp->buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
++
++ send_iu(iue, sizeof(*rsp), VIOSRP_SRP_FORMAT);
++}
++
++static inline void queue_cmd(struct iu_entry *iue)
++{
++ struct srp_target *target = iue->target;
++ unsigned long flags;
++
++ spin_lock_irqsave(&target->lock, flags);
++ list_add_tail(&iue->ilist, &target->cmd_queue);
++ spin_unlock_irqrestore(&target->lock, flags);
++}
++
++static int process_tsk_mgmt(struct iu_entry *iue)
++{
++ union viosrp_iu *iu = vio_iu(iue);
++ int fn;
++
++ dprintk("%p %u\n", iue, iu->srp.tsk_mgmt.tsk_mgmt_func);
++
++ switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
++ case SRP_TSK_ABORT_TASK:
++ fn = ABORT_TASK;
++ break;
++ case SRP_TSK_ABORT_TASK_SET:
++ fn = ABORT_TASK_SET;
++ break;
++ case SRP_TSK_CLEAR_TASK_SET:
++ fn = CLEAR_TASK_SET;
++ break;
++ case SRP_TSK_LUN_RESET:
++ fn = LOGICAL_UNIT_RESET;
++ break;
++ case SRP_TSK_CLEAR_ACA:
++ fn = CLEAR_ACA;
++ break;
++ default:
++ fn = 0;
++ }
++ if (fn)
++ scsi_tgt_tsk_mgmt_request(iue->target->shost, fn,
++ iu->srp.tsk_mgmt.task_tag,
++ (struct scsi_lun *)
&iu->srp.tsk_mgmt.lun,
++ iue);
++ else
++ send_rsp(iue, ILLEGAL_REQUEST, 0x20);
++
++ return !fn;
++}
++
++static int process_mad_iu(struct iu_entry *iue)
++{
++ union viosrp_iu *iu = vio_iu(iue);
++ struct viosrp_adapter_info *info;
++ struct viosrp_host_config *conf;
++
++ switch (iu->mad.empty_iu.common.type) {
++ case VIOSRP_EMPTY_IU_TYPE:
++ eprintk("%s\n", "Unsupported EMPTY MAD IU");
++ break;
++ case VIOSRP_ERROR_LOG_TYPE:
++ eprintk("%s\n", "Unsupported ERROR LOG MAD IU");
++ iu->mad.error_log.common.status = 1;
++ send_iu(iue, sizeof(iu->mad.error_log), VIOSRP_MAD_FORMAT);
++ break;
++ case VIOSRP_ADAPTER_INFO_TYPE:
++ info = &iu->mad.adapter_info;
++ info->common.status = send_adapter_info(iue, info->buffer,
++ info->common.length);
++ send_iu(iue, sizeof(*info), VIOSRP_MAD_FORMAT);
++ break;
++ case VIOSRP_HOST_CONFIG_TYPE:
++ conf = &iu->mad.host_config;
++ conf->common.status = 1;
++ send_iu(iue, sizeof(*conf), VIOSRP_MAD_FORMAT);
++ break;
++ default:
++ eprintk("Unknown type %u\n", iu->srp.rsp.opcode);
++ }
++
++ return 1;
++}
++
++static int process_srp_iu(struct iu_entry *iue)
++{
++ union viosrp_iu *iu = vio_iu(iue);
++ int done = 1;
++ u8 opcode = iu->srp.rsp.opcode;
++
++ switch (opcode) {
++ case SRP_LOGIN_REQ:
++ process_login(iue);
++ break;
++ case SRP_TSK_MGMT:
++ done = process_tsk_mgmt(iue);
++ break;
++ case SRP_CMD:
++ queue_cmd(iue);
++ done = 0;
++ break;
++ case SRP_LOGIN_RSP:
++ case SRP_I_LOGOUT:
++ case SRP_T_LOGOUT:
++ case SRP_RSP:
++ case SRP_CRED_REQ:
++ case SRP_CRED_RSP:
++ case SRP_AER_REQ:
++ case SRP_AER_RSP:
++ eprintk("Unsupported type %u\n", opcode);
++ break;
++ default:
++ eprintk("Unknown type %u\n", opcode);
++ }
++
++ return done;
++}
++
++static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
++{
++ struct vio_port *vport = target_to_port(target);
++ struct iu_entry *iue;
++ long err, done;
++
++ iue = srp_iu_get(target);
++ if (!iue) {
++ eprintk("Error getting IU from pool, %p\n", target);
++ return;
++ }
++
++ iue->remote_token = crq->IU_data_ptr;
++
++ err = h_copy_rdma(crq->IU_length, vport->riobn,
++ iue->remote_token, vport->liobn, iue->sbuf->dma);
++
++ if (err != H_SUCCESS)
++ eprintk("%ld transferring data error %p\n", err, iue);
++
++ if (crq->format == VIOSRP_MAD_FORMAT)
++ done = process_mad_iu(iue);
++ else
++ done = process_srp_iu(iue);
++
++ if (done)
++ srp_iu_put(iue);
++}
++
++static irqreturn_t ibmvstgt_interrupt(int irq, void *data, struct pt_regs
*regs)
++{
++ struct srp_target *target = (struct srp_target *) data;
++ struct vio_port *vport = target_to_port(target);
++
++ vio_disable_interrupts(vport->dma_dev);
++ queue_work(vtgtd, &vport->crq_work);
++
++ return IRQ_HANDLED;
++}
++
++static int crq_queue_create(struct crq_queue *queue, struct srp_target
*target)
++{
++ int err;
++ struct vio_port *vport = target_to_port(target);
++
++ queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL);
++ if (!queue->msgs)
++ goto malloc_failed;
++ queue->size = PAGE_SIZE / sizeof(*queue->msgs);
++
++ queue->msg_token = dma_map_single(target->dev, queue->msgs,
++ queue->size * sizeof(*queue->msgs),
++ DMA_BIDIRECTIONAL);
++
++ if (dma_mapping_error(queue->msg_token))
++ goto map_failed;
++
++ err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
++ PAGE_SIZE);
++
++ /* If the adapter was left active for some reason (like kexec)
++ * try freeing and re-registering
++ */
++ if (err == H_RESOURCE) {
++ do {
++ err = h_free_crq(vport->dma_dev->unit_address);
++ } while (err == H_BUSY || H_IS_LONG_BUSY(err));
++
++ err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
++ PAGE_SIZE);
++ }
++
++ if (err != H_SUCCESS && err != 2) {
++ eprintk("Error 0x%x opening virtual adapter\n", err);
++ goto reg_crq_failed;
++ }
++
++ err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
++ SA_INTERRUPT, "ibmvstgt", target);
++ if (err)
++ goto req_irq_failed;
++
++ vio_enable_interrupts(vport->dma_dev);
++
++ h_send_crq(vport->dma_dev->unit_address, 0xC001000000000000, 0);
++
++ queue->cur = 0;
++ spin_lock_init(&queue->lock);
++
++ return 0;
++
++req_irq_failed:
++ do {
++ err = h_free_crq(vport->dma_dev->unit_address);
++ } while (err == H_BUSY || H_IS_LONG_BUSY(err));
++
++reg_crq_failed:
++ dma_unmap_single(target->dev, queue->msg_token,
++ queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
++map_failed:
++ free_page((unsigned long) queue->msgs);
++
++malloc_failed:
++ return -ENOMEM;
++}
++
++static void crq_queue_destroy(struct srp_target *target)
++{
++ struct vio_port *vport = target_to_port(target);
++ struct crq_queue *queue = &vport->crq_queue;
++ int err;
++
++ free_irq(vport->dma_dev->irq, target);
++ do {
++ err = h_free_crq(vport->dma_dev->unit_address);
++ } while (err == H_BUSY || H_IS_LONG_BUSY(err));
++
++ dma_unmap_single(target->dev, queue->msg_token,
++ queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
++
++ free_page((unsigned long) queue->msgs);
++}
++
++static void process_crq(struct viosrp_crq *crq, struct srp_target
*target)
++{
++ struct vio_port *vport = target_to_port(target);
++ dprintk("%x %x\n", crq->valid, crq->format);
++
++ switch (crq->valid) {
++ case 0xC0:
++ /* initialization */
++ switch (crq->format) {
++ case 0x01:
++ h_send_crq(vport->dma_dev->unit_address,
++ 0xC002000000000000, 0);
++ break;
++ case 0x02:
++ break;
++ default:
++ eprintk("Unknown format %u\n", crq->format);
++ }
++ break;
++ case 0xFF:
++ /* transport event */
++ break;
++ case 0x80:
++ /* real payload */
++ switch (crq->format) {
++ case VIOSRP_SRP_FORMAT:
++ case VIOSRP_MAD_FORMAT:
++ process_iu(crq, target);
++ break;
++ case VIOSRP_OS400_FORMAT:
++ case VIOSRP_AIX_FORMAT:
++ case VIOSRP_LINUX_FORMAT:
++ case VIOSRP_INLINE_FORMAT:
++ eprintk("Unsupported format %u\n", crq->format);
++ break;
++ default:
++ eprintk("Unknown format %u\n", crq->format);
++ }
++ break;
++ default:
++ eprintk("unknown message type 0x%02x!?\n", crq->valid);
++ }
++}
++
++static inline struct viosrp_crq *next_crq(struct crq_queue *queue)
++{
++ struct viosrp_crq *crq;
++ unsigned long flags;
++
++ spin_lock_irqsave(&queue->lock, flags);
++ crq = &queue->msgs[queue->cur];
++ if (crq->valid & 0x80) {
++ if (++queue->cur == queue->size)
++ queue->cur = 0;
++ } else
++ crq = NULL;
++ spin_unlock_irqrestore(&queue->lock, flags);
++
++ return crq;
++}
++
++static void handle_crq(void *data)
++{
++ struct srp_target *target = (struct srp_target *) data;
++ struct vio_port *vport = target_to_port(target);
++ struct viosrp_crq *crq;
++ int done = 0;
++
++ while (!done) {
++ while ((crq = next_crq(&vport->crq_queue)) != NULL) {
++ process_crq(crq, target);
++ crq->valid = 0x00;
++ }
++
++ vio_enable_interrupts(vport->dma_dev);
++
++ crq = next_crq(&vport->crq_queue);
++ if (crq) {
++ vio_disable_interrupts(vport->dma_dev);
++ process_crq(crq, target);
++ crq->valid = 0x00;
++ } else
++ done = 1;
++ }
++
++ handle_cmd_queue(target);
++}
++
++
++static int ibmvstgt_eh_abort_handler(struct scsi_cmnd *scmd)
++{
++ unsigned long flags;
++ struct iu_entry *iue = (struct iu_entry *) scmd->SCp.ptr;
++ struct srp_target *target = iue->target;
++
++ dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
++
++ spin_lock_irqsave(&target->lock, flags);
++ list_del(&iue->ilist);
++ spin_unlock_irqrestore(&target->lock, flags);
++
++ srp_iu_put(iue);
++
++ return 0;
++}
++
++static int ibmvstgt_tsk_mgmt_response(u64 mid, int result)
++{
++ struct iu_entry *iue = (struct iu_entry *) ((void *) mid);
++ union viosrp_iu *iu = vio_iu(iue);
++ unsigned char status, asc;
++
++ eprintk("%p %d\n", iue, result);
++ status = NO_SENSE;
++ asc = 0;
++
++ switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
++ case SRP_TSK_ABORT_TASK:
++ asc = 0x14;
++ if (result)
++ status = ABORTED_COMMAND;
++ break;
++ default:
++ break;
++ }
++
++ send_rsp(iue, status, asc);
++ srp_iu_put(iue);
++
++ return 0;
++}
++
++static ssize_t system_id_show(struct class_device *cdev, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
++}
++
++static ssize_t partition_number_show(struct class_device *cdev, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
++}
++
++static ssize_t unit_address_show(struct class_device *cdev, char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(cdev);
++ struct srp_target *target = host_to_target(shost);
++ struct vio_port *vport = target_to_port(target);
++ return snprintf(buf, PAGE_SIZE, "%x\n", vport->dma_dev->unit_address);
++}
++
++static CLASS_DEVICE_ATTR(system_id, S_IRUGO, system_id_show, NULL);
++static CLASS_DEVICE_ATTR(partition_number, S_IRUGO, partition_number_show,
NULL);
++static CLASS_DEVICE_ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
++
++static struct class_device_attribute *ibmvstgt_attrs[] = {
++ &class_device_attr_system_id,
++ &class_device_attr_partition_number,
++ &class_device_attr_unit_address,
++ NULL,
++};
++
++static struct scsi_host_template ibmvstgt_sht = {
++ .name = TGT_NAME,
++ .module = THIS_MODULE,
++ .can_queue = INITIAL_SRP_LIMIT,
++ .sg_tablesize = SG_ALL,
++ .use_clustering = DISABLE_CLUSTERING,
++ .max_sectors = DEFAULT_MAX_SECTORS,
++ .transfer_response = ibmvstgt_cmd_done,
++ .transfer_data = ibmvstgt_transfer_data,
++ .eh_abort_handler = ibmvstgt_eh_abort_handler,
++ .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
++ .shost_attrs = ibmvstgt_attrs,
++ .proc_name = TGT_NAME,
++};
++
++static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
++{
++ struct Scsi_Host *shost;
++ struct srp_target *target;
++ struct vio_port *vport;
++ unsigned int *dma, dma_size;
++ int err = -ENOMEM;
++
++ vport = kzalloc(sizeof(struct vio_port), GFP_KERNEL);
++ if (!vport)
++ return err;
++ shost = scsi_host_alloc(&ibmvstgt_sht, sizeof(struct srp_target));
++ if (!shost)
++ goto free_vport;
++ err = scsi_tgt_alloc_queue(shost);
++ if (err)
++ goto put_host;
++
++ target = host_to_target(shost);
++ target->shost = shost;
++ vport->dma_dev = dev;
++ target->ldata = vport;
++ err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT,
++ SRP_MAX_IU_LEN);
++ if (err)
++ goto put_host;
++
++ dma = (unsigned int *) vio_get_attribute(dev, "ibm,my-dma-window",
++ &dma_size);
++ if (!dma || dma_size != 40) {
++ eprintk("Couldn't get window property %d\n", dma_size);
++ err = -EIO;
++ goto free_srp_target;
++ }
++ vport->liobn = dma[0];
++ vport->riobn = dma[5];
++
++ INIT_WORK(&vport->crq_work, handle_crq, target);
++
++ err = crq_queue_create(&vport->crq_queue, target);
++ if (err)
++ goto free_srp_target;
++
++ err = scsi_add_host(shost, target->dev);
++ if (err)
++ goto destroy_queue;
++ return 0;
++
++destroy_queue:
++ crq_queue_destroy(target);
++free_srp_target:
++ srp_target_free(target);
++put_host:
++ scsi_host_put(shost);
++free_vport:
++ kfree(vport);
++ return err;
++}
++
++static int ibmvstgt_remove(struct vio_dev *dev)
++{
++ struct srp_target *target = (struct srp_target *) dev->dev.driver_data;
++ struct Scsi_Host *shost = target->shost;
++
++ srp_target_free(target);
++ crq_queue_destroy(target);
++ scsi_remove_host(shost);
++ scsi_host_put(shost);
++ return 0;
++}
++
++static struct vio_device_id ibmvstgt_device_table[] __devinitdata = {
++ {"v-scsi-host", "IBM,v-scsi-host"},
++ {"",""}
++};
++
++MODULE_DEVICE_TABLE(vio, ibmvstgt_device_table);
++
++static struct vio_driver ibmvstgt_driver = {
++ .id_table = ibmvstgt_device_table,
++ .probe = ibmvstgt_probe,
++ .remove = ibmvstgt_remove,
++ .driver = {
++ .name = "ibmvscsi",
++ .owner = THIS_MODULE,
++ }
++};
++
++static int get_system_info(void)
++{
++ struct device_node *rootdn;
++ char *id, *model, *name;
++ unsigned int *num;
++
++ rootdn = find_path_device("/");
++ if (!rootdn)
++ return -ENOENT;
++
++ model = get_property(rootdn, "model", NULL);
++ id = get_property(rootdn, "system-id", NULL);
++ if (model && id)
++ snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
++
++ name = get_property(rootdn, "ibm,partition-name", NULL);
++ if (name)
++ strncpy(partition_name, name, sizeof(partition_name));
++
++ num = (unsigned int *) get_property(rootdn, "ibm,partition-no", NULL);
++ if (num)
++ partition_number = *num;
++
++ return 0;
++}
++
++static int ibmvstgt_init(void)
++{
++ int err = -ENOMEM;
++
++ printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n");
++
++ vtgtd = create_workqueue("ibmvtgtd");
++ if (!vtgtd)
++ return err;
++
++ err = get_system_info();
++ if (err)
++ goto destroy_wq;
++
++ err = vio_register_driver(&ibmvstgt_driver);
++ if (err)
++ goto destroy_wq;
++
++ return 0;
++
++destroy_wq:
++ destroy_workqueue(vtgtd);
++ return err;
++}
++
++static void ibmvstgt_exit(void)
++{
++ printk("Unregister IBM virtual SCSI driver\n");
++
++ destroy_workqueue(vtgtd);
++ vio_unregister_driver(&ibmvstgt_driver);
++}
++
++MODULE_DESCRIPTION("IBM Virtual SCSI Target");
++MODULE_AUTHOR("Dave Boutcher");
++MODULE_LICENSE("GPL");
++
++module_init(ibmvstgt_init);
++module_exit(ibmvstgt_exit);
+diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
+new file mode 100644
+index 0000000..36b5b39
+--- /dev/null
++++ b/drivers/scsi/libsrp.c
+@@ -0,0 +1,450 @@
++/*
++ * SCSI RDAM Protocol lib functions
++ *
++ * Copyright (C) 2006 FUJITA Tomonori <tomof@xxxxxxx>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++#include <linux/err.h>
++#include <linux/kfifo.h>
++#include <linux/scatterlist.h>
++#include <linux/dma-mapping.h>
++#include <linux/pci.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_tcq.h>
++#include <scsi/scsi_tgt.h>
++#include <scsi/srp.h>
++#include <scsi/libsrp.h>
++
++enum srp_task_attributes {
++ SRP_SIMPLE_TASK = 0,
++ SRP_HEAD_TASK = 1,
++ SRP_ORDERED_TASK = 2,
++ SRP_ACA_TASK = 4
++};
++
++/* tmp - will replace with SCSI logging stuff */
++#define eprintk(fmt, args...) \
++do { \
++ printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
++} while (0)
++/* #define dprintk eprintk */
++#define dprintk(fmt, args...)
++
++static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
++ struct srp_buf **ring)
++{
++ int i;
++ struct iu_entry *iue;
++
++ q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
++ if (!q->pool)
++ return -ENOMEM;
++ q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
++ if (!q->items)
++ goto free_pool;
++
++ spin_lock_init(&q->lock);
++ q->queue = kfifo_init((void *) q->pool, max * sizeof(void *),
++ GFP_KERNEL, &q->lock);
++ if (IS_ERR(q->queue))
++ goto free_item;
++
++ for (i = 0, iue = q->items; i < max; i++) {
++ __kfifo_put(q->queue, (void *) &iue, sizeof(void *));
++ iue->sbuf = ring[i];
++ iue++;
++ }
++ return 0;
++
++free_item:
++ kfree(q->items);
++free_pool:
++ kfree(q->pool);
++ return -ENOMEM;
++}
++
++static void srp_iu_pool_free(struct srp_queue *q)
++{
++ kfree(q->items);
++ kfree(q->pool);
++}
++
++static struct srp_buf ** srp_ring_alloc(struct device *dev,
++ size_t max, size_t size)
++{
++ int i;
++ struct srp_buf **ring;
++
++ ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
++ if (!ring)
++ return NULL;
++
++ for (i = 0; i < max; i++) {
++ ring[i] = kzalloc(sizeof(struct srp_buf), GFP_KERNEL);
++ if (!ring[i])
++ goto out;
++ ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
++ GFP_KERNEL);
++ if (!ring[i]->buf)
++ goto out;
++ }
++ return ring;
++
++out:
++ for (i = 0; i < max && ring[i]; i++) {
++ if (ring[i]->buf)
++ dma_free_coherent(dev, size, ring[i]->buf,
ring[i]->dma);
++ kfree(ring[i]);
++ }
++ kfree(ring);
++
++ return NULL;
++}
++
++static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t
max,
++ size_t size)
++{
++ int i;
++
++ for (i = 0; i < max; i++) {
++ dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
++ kfree(ring[i]);
++ }
++}
++
++int srp_target_alloc(struct srp_target *target, struct device *dev,
++ size_t nr, size_t iu_size)
++{
++ int err;
++
++ spin_lock_init(&target->lock);
++ INIT_LIST_HEAD(&target->cmd_queue);
++
++ target->dev = dev;
++ target->dev->driver_data = target;
++
++ target->srp_iu_size = iu_size;
++ target->rx_ring_size = nr;
++ target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
++ if (!target->rx_ring)
++ return -ENOMEM;
++ err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
++ if (err)
++ goto free_ring;
++
++ return 0;
++
++free_ring:
++ srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
++ return -ENOMEM;
++}
++EXPORT_SYMBOL_GPL(srp_target_alloc);
++
++void srp_target_free(struct srp_target *target)
++{
++ srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
++ target->srp_iu_size);
++ srp_iu_pool_free(&target->iu_queue);
++}
++EXPORT_SYMBOL_GPL(srp_target_free);
++
++struct iu_entry *srp_iu_get(struct srp_target *target)
++{
++ struct iu_entry *iue = NULL;
++
++ kfifo_get(target->iu_queue.queue, (void *) &iue, sizeof(void *));
++ BUG_ON(!iue);
++
++ iue->target = target;
++ iue->scmd = NULL;
++ INIT_LIST_HEAD(&iue->ilist);
++ iue->flags = 0;
++ return iue;
++}
++EXPORT_SYMBOL_GPL(srp_iu_get);
++
++void srp_iu_put(struct iu_entry *iue)
++{
++ kfifo_put(iue->target->iu_queue.queue, (void *) &iue, sizeof(void *));
++}
++EXPORT_SYMBOL_GPL(srp_iu_put);
++
++static int direct_data(struct scsi_cmnd *scmd, struct srp_direct_buf *md,
++ enum dma_data_direction dir, rdma_io_t rdma_io)
++{
++ struct iu_entry *iue = (struct iu_entry *) scmd->SCp.ptr;
++ struct srp_target *target = iue->target;
++ struct scatterlist *sg = scmd->request_buffer;
++ int nsg, err;
++
++ dprintk("%p %u %u %u %d\n", iue, scmd->request_bufflen, scmd->bufflen,
++ md->len, scmd->use_sg);
++
++ nsg = dma_map_sg(target->dev, sg, scmd->use_sg, DMA_BIDIRECTIONAL);
++ if (!nsg) {
++ printk("fail to map %p %d\n", iue, scmd->use_sg);
++ return 0;
++ }
++ err = rdma_io(iue, sg, nsg, md, 1, dir,
++ min(scmd->request_bufflen, md->len));
++
++ dma_unmap_sg(target->dev, sg, nsg, DMA_BIDIRECTIONAL);
++
++ return err;
++}
++
++static int indirect_data(struct scsi_cmnd *scmd, struct srp_cmd *cmd,
++ struct srp_indirect_buf *id,
++ enum dma_data_direction dir, rdma_io_t rdma_io)
++{
++ struct iu_entry *iue = (struct iu_entry *) scmd->SCp.ptr;
++ struct srp_target *target = iue->target;
++ struct srp_direct_buf *md;
++ struct scatterlist dummy, *sg = scmd->request_buffer;
++ dma_addr_t token = 0;
++ long err;
++ unsigned int done = 0;
++ int nmd, nsg;
++
++ nmd = id->table_desc.len / sizeof(struct srp_direct_buf);
++
++ dprintk("%p %u %u %u %u %d %d %d\n",
++ iue, scmd->request_bufflen, scmd->bufflen,
++ id->len, scmd->offset, nmd,
++ cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
++
++ if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
++ (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
++ md = &id->desc_list[0];
++ goto rdma;
++ }
++
++ md = dma_alloc_coherent(target->dev, id->table_desc.len,
++ &token, GFP_KERNEL);
++ if (!md) {
++ eprintk("Can't get dma memory %u\n", id->table_desc.len);
++ return 0;
++ }
++
++ sg_init_one(&dummy, md, id->table_desc.len);
++ sg_dma_address(&dummy) = token;
++ err = rdma_io(iue, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
++ id->table_desc.len);
++ if (err < 0) {
++ eprintk("Error copying indirect table %ld\n", err);
++ goto free_mem;
++ }
++
++rdma:
++ nsg = dma_map_sg(target->dev, sg, scmd->use_sg, DMA_BIDIRECTIONAL);
++ if (!nsg) {
++ eprintk("fail to map %p %d\n", iue, scmd->use_sg);
++ goto free_mem;
++ }
++
++ err = rdma_io(iue, sg, nsg, md, nmd, dir,
++ min(scmd->request_bufflen, id->len));
++ dma_unmap_sg(target->dev, sg, nsg, DMA_BIDIRECTIONAL);
++
++free_mem:
++ if (token)
++ dma_free_coherent(target->dev, id->table_desc.len, md, token);
++
++ return done;
++}
++
++static int data_out_desc_size(struct srp_cmd *cmd)
++{
++ int size = 0;
++ u8 fmt = cmd->buf_fmt >> 4;
++
++ switch (fmt) {
++ case SRP_NO_DATA_DESC:
++ break;
++ case SRP_DATA_DESC_DIRECT:
++ size = sizeof(struct srp_direct_buf);
++ break;
++ case SRP_DATA_DESC_INDIRECT:
++ size = sizeof(struct srp_indirect_buf) +
++ sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
++ break;
++ default:
++ eprintk("client error. Invalid data_out_format %x\n", fmt);
++ break;
++ }
++ return size;
++}
++
++static int __srp_transfer_data(struct scsi_cmnd *scmd, struct srp_cmd *cmd,
++ enum dma_data_direction dir, rdma_io_t rdma_io)
++{
++ struct srp_direct_buf *md;
++ struct srp_indirect_buf *id;
++ int offset, err = 0;
++ u8 format;
++
++ offset = cmd->add_cdb_len * 4;
++ if (dir == DMA_FROM_DEVICE)
++ offset += data_out_desc_size(cmd);
++
++ if (dir == DMA_TO_DEVICE)
++ format = cmd->buf_fmt >> 4;
++ else
++ format = cmd->buf_fmt & ((1U << 4) - 1);
++
++ switch (format) {
++ case SRP_NO_DATA_DESC:
++ break;
++ case SRP_DATA_DESC_DIRECT:
++ md = (struct srp_direct_buf *)
++ (cmd->add_data + offset);
++ err = direct_data(scmd, md, dir, rdma_io);
++ break;
++ case SRP_DATA_DESC_INDIRECT:
++ id = (struct srp_indirect_buf *)
++ (cmd->add_data + offset);
++ err = indirect_data(scmd, cmd, id, dir, rdma_io);
++ break;
++ default:
++ eprintk("Unknown format %d %x\n", dir, format);
++ break;
++ }
++
++ return err;
++}
++
++/* TODO: this can be called multiple times for a single command. */
++int srp_transfer_data(struct scsi_cmnd *scmd, struct srp_cmd *cmd,
++ rdma_io_t rdma_io)
++{
++ struct iu_entry *iue = (struct iu_entry *) scmd->SCp.ptr;
++ enum dma_data_direction dir;
++
++ if (test_bit(V_WRITE, &iue->flags))
++ dir = DMA_TO_DEVICE;
++ else
++ dir = DMA_FROM_DEVICE;
++ __srp_transfer_data(scmd, cmd, dir, rdma_io);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(srp_transfer_data);
++
++static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction
dir)
++{
++ struct srp_direct_buf *md;
++ struct srp_indirect_buf *id;
++ int len = 0, offset = cmd->add_cdb_len * 4;
++ u8 fmt;
++
++ if (dir == DMA_TO_DEVICE)
++ fmt = cmd->buf_fmt >> 4;
++ else {
++ fmt = cmd->buf_fmt & ((1U << 4) - 1);
++ offset += data_out_desc_size(cmd);
++ }
++
++ switch (fmt) {
++ case SRP_NO_DATA_DESC:
++ break;
++ case SRP_DATA_DESC_DIRECT:
++ md = (struct srp_direct_buf *) (cmd->add_data + offset);
++ len = md->len;
++ break;
++ case SRP_DATA_DESC_INDIRECT:
++ id = (struct srp_indirect_buf *) (cmd->add_data + offset);
++ len = id->len;
++ break;
++ default:
++ eprintk("invalid data format %x\n", fmt);
++ break;
++ }
++ return len;
++}
++
++static uint8_t getcontrolbyte(u8 *cdb)
++{
++ return cdb[COMMAND_SIZE(cdb[0]) - 1];
++}
++
++static inline uint8_t getlink(struct srp_cmd *cmd)
++{
++ return (getcontrolbyte(cmd->cdb) & 0x01);
++}
++
++int srp_cmd_perform(struct iu_entry *iue, struct srp_cmd *cmd)
++{
++ struct Scsi_Host *shost = iue->target->shost;
++ enum dma_data_direction data_dir;
++ struct scsi_cmnd *scmd;
++ int tag, len;
++
++ if (getlink(cmd))
++ __set_bit(V_LINKED, &iue->flags);
++
++ tag = MSG_SIMPLE_TAG;
++
++ switch (cmd->task_attr) {
++ case SRP_SIMPLE_TASK:
++ tag = MSG_SIMPLE_TAG;
++ break;
++ case SRP_ORDERED_TASK:
++ tag = MSG_ORDERED_TAG;
++ break;
++ case SRP_HEAD_TASK:
++ tag = MSG_HEAD_TAG;
++ break;
++ default:
++ eprintk("Task attribute %d not supported\n", cmd->task_attr);
++ tag = MSG_ORDERED_TAG;
++ }
++
++ switch (cmd->cdb[0]) {
++ case WRITE_6:
++ case WRITE_10:
++ case WRITE_VERIFY:
++ case WRITE_12:
++ case WRITE_VERIFY_12:
++ __set_bit(V_WRITE, &iue->flags);
++ }
++
++ if (cmd->buf_fmt >> 4)
++ data_dir = DMA_TO_DEVICE;
++ else
++ data_dir = DMA_FROM_DEVICE;
++ len = vscsis_data_length(cmd, data_dir);
++
++ dprintk("%p %x %lx %d %d %d %llx\n", iue, cmd->cdb[0],
++ cmd->lun, data_dir, len, tag, (unsigned long long) cmd->tag);
++
++ scmd = scsi_host_get_command(shost, data_dir, GFP_KERNEL);
++ BUG_ON(!scmd);
++ scmd->SCp.ptr = (char *) iue;
++ memcpy(scmd->cmnd, cmd->cdb, MAX_COMMAND_SIZE);
++ scmd->request_buffer = 0;
++ scmd->request_bufflen = len;
++ scmd->tag = tag;
++ iue->scmd = scmd;
++ scsi_tgt_queue_command(scmd, (struct scsi_lun *) &cmd->lun, cmd->tag);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(srp_cmd_perform);
++
++MODULE_DESCRIPTION("SCSI RDAM Protocol lib functions");
++MODULE_AUTHOR("FUJITA Tomonori");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index 73994e2..0591b93 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -210,8 +210,7 @@ static struct scsi_host_cmd_pool scsi_cm
+
+ static DEFINE_MUTEX(host_cmd_pool_mutex);
+
+-static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
+- gfp_t gfp_mask)
++struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
+ {
+ struct scsi_cmnd *cmd;
+
+@@ -232,6 +231,7 @@ static struct scsi_cmnd *__scsi_get_comm
+
+ return cmd;
+ }
++EXPORT_SYMBOL_GPL(__scsi_get_command);
+
+ /*
+ * Function: scsi_get_command()
+@@ -268,9 +268,29 @@ struct scsi_cmnd *scsi_get_command(struc
+ put_device(&dev->sdev_gendev);
+
+ return cmd;
+-}
++}
+ EXPORT_SYMBOL(scsi_get_command);
+
++void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
++ struct device *dev)
++{
++ unsigned long flags;
++
++ /* changing locks here, don't need to restore the irq state */
++ spin_lock_irqsave(&shost->free_list_lock, flags);
++ if (unlikely(list_empty(&shost->free_list))) {
++ list_add(&cmd->list, &shost->free_list);
++ cmd = NULL;
++ }
++ spin_unlock_irqrestore(&shost->free_list_lock, flags);
++
++ if (likely(cmd != NULL))
++ kmem_cache_free(shost->cmd_pool->slab, cmd);
++
++ put_device(dev);
++}
++EXPORT_SYMBOL(__scsi_put_command);
++
+ /*
+ * Function: scsi_put_command()
+ *
+@@ -285,26 +305,15 @@ EXPORT_SYMBOL(scsi_get_command);
+ void scsi_put_command(struct scsi_cmnd *cmd)
+ {
+ struct scsi_device *sdev = cmd->device;
+- struct Scsi_Host *shost = sdev->host;
+ unsigned long flags;
+-
++
+ /* serious error if the command hasn't come from a device list */
+ spin_lock_irqsave(&cmd->device->list_lock, flags);
+ BUG_ON(list_empty(&cmd->list));
+ list_del_init(&cmd->list);
+- spin_unlock(&cmd->device->list_lock);
+- /* changing locks here, don't need to restore the irq state */
+- spin_lock(&shost->free_list_lock);
+- if (unlikely(list_empty(&shost->free_list))) {
+- list_add(&cmd->list, &shost->free_list);
+- cmd = NULL;
+- }
+- spin_unlock_irqrestore(&shost->free_list_lock, flags);
++ spin_unlock_irqrestore(&cmd->device->list_lock, flags);
+
+- if (likely(cmd != NULL))
+- kmem_cache_free(shost->cmd_pool->slab, cmd);
+-
+- put_device(&sdev->sdev_gendev);
++ __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
+ }
+ EXPORT_SYMBOL(scsi_put_command);
+
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index faee475..f8ac3d5 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -803,7 +803,7 @@ static struct scsi_cmnd *scsi_end_reques
+ return NULL;
+ }
+
+-static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t
gfp_mask)
++struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
+ {
+ struct scsi_host_sg_pool *sgp;
+ struct scatterlist *sgl;
+@@ -844,7 +844,9 @@ static struct scatterlist *scsi_alloc_sg
+ return sgl;
+ }
+
+-static void scsi_free_sgtable(struct scatterlist *sgl, int index)
++EXPORT_SYMBOL(scsi_alloc_sgtable);
++
++void scsi_free_sgtable(struct scatterlist *sgl, int index)
+ {
+ struct scsi_host_sg_pool *sgp;
+
+@@ -854,6 +856,8 @@ static void scsi_free_sgtable(struct sca
+ mempool_free(sgl, sgp->pool);
+ }
+
++EXPORT_SYMBOL(scsi_free_sgtable);
++
+ /*
+ * Function: scsi_release_buffers()
+ *
+@@ -1714,29 +1718,40 @@ u64 scsi_calculate_bounce_limit(struct S
+ }
+ EXPORT_SYMBOL(scsi_calculate_bounce_limit);
+
+-struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
++struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
++ request_fn_proc *request_fn)
+ {
+- struct Scsi_Host *shost = sdev->host;
+ struct request_queue *q;
+
+- q = blk_init_queue(scsi_request_fn, NULL);
++ q = blk_init_queue(request_fn, NULL);
+ if (!q)
+ return NULL;
+
+- blk_queue_prep_rq(q, scsi_prep_fn);
+-
+ blk_queue_max_hw_segments(q, shost->sg_tablesize);
+ blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
+ blk_queue_max_sectors(q, shost->max_sectors);
+ blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
+ blk_queue_segment_boundary(q, shost->dma_boundary);
+- blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
+- blk_queue_softirq_done(q, scsi_softirq_done);
+
+ if (!shost->use_clustering)
+ clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+ return q;
+ }
++EXPORT_SYMBOL(__scsi_alloc_queue);
++
++struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
++{
++ struct request_queue *q;
++
++ q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
++ if (!q)
++ return NULL;
++
++ blk_queue_prep_rq(q, scsi_prep_fn);
++ blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
++ blk_queue_softirq_done(q, scsi_softirq_done);
++ return q;
++}
+
+ void scsi_free_queue(struct request_queue *q)
+ {
+diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c
+new file mode 100644
+index 0000000..c9d15a7
+--- /dev/null
++++ b/drivers/scsi/scsi_tgt_if.c
+@@ -0,0 +1,316 @@
++/*
++ * SCSI target kernel/user interface functions
++ *
++ * Copyright (C) 2005 FUJITA Tomonori <tomof@xxxxxxx>
++ * Copyright (C) 2005 Mike Christie <michaelc@xxxxxxxxxxx>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++#include <linux/blkdev.h>
++#include <linux/file.h>
++#include <net/tcp.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_tgt.h>
++#include <scsi/scsi_tgt_if.h>
++
++#include "scsi_tgt_priv.h"
++
++struct rbuf {
++ u32 idx;
++ u32 nr_entry;
++ int entry_size;
++ char *buf;
++ int buf_size;
++ spinlock_t lock;
++};
++
++static int chrdev;
++static struct rbuf txbuf, rxbuf;
++static DECLARE_WAIT_QUEUE_HEAD(tgt_poll_wait);
++
++static inline struct rbuf_hdr *head_rbuf_hdr(struct rbuf *rbuf, u32 idx)
++{
++ u32 offset = (idx & (rbuf->nr_entry - 1)) * rbuf->entry_size;
++ return (struct rbuf_hdr *) (rbuf->buf + offset);
++}
++
++static void rbuf_init(struct rbuf *rbuf, char *buf, int bsize, int esize)
++{
++ int i;
++
++ esize += sizeof(struct rbuf_hdr);
++ rbuf->idx = 0;
++ rbuf->entry_size = esize;
++ rbuf->buf = buf;
++ spin_lock_init(&rbuf->lock);
++
++ bsize /= esize;
++ for (i = 0; (1 << i) < bsize && (1 << (i + 1)) <= bsize; i++)
++ ;
++ rbuf->nr_entry = 1 << i;
++}
++
++static int send_event_rsp(u32 type, struct tgt_event *p)
++{
++ struct tgt_event *ev;
++ struct rbuf_hdr *hdr;
++ struct page *sp, *ep;
++ unsigned long flags;
++ int err = 0;
++
++ spin_lock_irqsave(&txbuf.lock, flags);
++
++ hdr = head_rbuf_hdr(&txbuf, txbuf.idx);
++ if (hdr->status)
++ err = 1;
++ else
++ txbuf.idx++;
++
++ spin_unlock_irqrestore(&txbuf.lock, flags);
++
++ if (err)
++ return err;
++
++ ev = (struct tgt_event *) hdr->data;
++ memcpy(ev, p, sizeof(*ev));
++ ev->type = type;
++ hdr->status = 1;
++ mb();
++
++ sp = virt_to_page(hdr);
++ ep = virt_to_page((char *) hdr->data + sizeof(*ev));
++ for (;sp <= ep; sp++)
++ flush_dcache_page(sp);
++
++ wake_up_interruptible(&tgt_poll_wait);
++
++ return 0;
++}
++
++int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun, u64
tag)
++{
++ struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
++ struct tgt_event ev;
++ int err;
++
++ memset(&ev, 0, sizeof(ev));
++ ev.k.cmd_req.host_no = shost->host_no;
++ ev.k.cmd_req.cid = cmd->request->tag;
++ ev.k.cmd_req.data_len = cmd->request_bufflen;
++ memcpy(ev.k.cmd_req.scb, cmd->cmnd, sizeof(ev.k.cmd_req.scb));
++ memcpy(ev.k.cmd_req.lun, lun, sizeof(ev.k.cmd_req.lun));
++ ev.k.cmd_req.attribute = cmd->tag;
++ ev.k.cmd_req.tag = tag;
++ ev.k.cmd_req.uaddr = (u64) (unsigned long) cmd->request_buffer;
++
++ dprintk("%p %d %u %u %x %llx\n", cmd, shost->host_no, ev.k.cmd_req.cid,
++ ev.k.cmd_req.data_len, cmd->tag,
++ (unsigned long long) ev.k.cmd_req.tag);
++
++ err = send_event_rsp(TGT_KEVENT_CMD_REQ, &ev);
++ if (err)
++ eprintk("tx buf is full, could not send\n");
++ return err;
++}
++
++int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd)
++{
++ struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
++ struct tgt_event ev;
++ int err;
++
++ memset(&ev, 0, sizeof(ev));
++ ev.k.cmd_done.host_no = shost->host_no;
++ ev.k.cmd_done.cid = cmd->request->tag;
++ ev.k.cmd_done.result = cmd->result;
++
++ dprintk("%p %d %u %u %x %llx\n", cmd, shost->host_no, ev.k.cmd_req.cid,
++ ev.k.cmd_req.data_len, cmd->tag,
++ (unsigned long long) ev.k.cmd_req.tag);
++
++ err = send_event_rsp(TGT_KEVENT_CMD_DONE, &ev);
++ if (err)
++ eprintk("tx buf is full, could not send\n");
++ return err;
++}
++
++int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
++ struct scsi_lun *scsilun, void *data)
++{
++ struct tgt_event ev;
++ int err;
++
++ memset(&ev, 0, sizeof(ev));
++ ev.k.tsk_mgmt_req.host_no = host_no;
++ ev.k.tsk_mgmt_req.function = function;
++ ev.k.tsk_mgmt_req.tag = tag;
++ memcpy(ev.k.tsk_mgmt_req.lun, scsilun, sizeof(ev.k.tsk_mgmt_req.lun));
++ ev.k.tsk_mgmt_req.mid = (u64) (unsigned long) data;
++
++ dprintk("%d %x %llx %llx\n", host_no, function, (unsigned long long)
tag,
++ (unsigned long long) ev.k.tsk_mgmt_req.mid);
++
++ err = send_event_rsp(TGT_KEVENT_TSK_MGMT_REQ, &ev);
++ if (err)
++ eprintk("tx buf is full, could not send\n");
++ return err;
++}
++
++static int event_recv_msg(struct tgt_event *ev)
++{
++ int err = 0;
++
++ switch (ev->type) {
++ case TGT_UEVENT_CMD_RSP:
++ err = scsi_tgt_kspace_exec(ev->u.cmd_rsp.host_no,
++ ev->u.cmd_rsp.cid,
++ ev->u.cmd_rsp.result,
++ ev->u.cmd_rsp.len,
++ ev->u.cmd_rsp.uaddr,
++ ev->u.cmd_rsp.rw);
++ break;
++ case TGT_UEVENT_TSK_MGMT_RSP:
++ err = scsi_tgt_kspace_tsk_mgmt(ev->u.tsk_mgmt_rsp.host_no,
++ ev->u.tsk_mgmt_rsp.mid,
++ ev->u.tsk_mgmt_rsp.result);
++ break;
++ default:
++ eprintk("unknown type %d\n", ev->type);
++ err = -EINVAL;
++ }
++
++ return err;
++}
++
++static ssize_t tgt_write(struct file *file, const char __user * buffer,
++ size_t count, loff_t * ppos)
++{
++ struct rbuf_hdr *hdr;
++ struct tgt_event *ev;
++ struct page *sp, *ep;
++
++retry:
++ hdr = head_rbuf_hdr(&rxbuf, rxbuf.idx);
++
++ sp = virt_to_page(hdr);
++ ep = virt_to_page((char *) hdr->data + sizeof(*ev));
++ for (;sp <= ep; sp++)
++ flush_dcache_page(sp);
++
++ if (!hdr->status)
++ return count;
++
++ rxbuf.idx++;
++ ev = (struct tgt_event *) hdr->data;
++ event_recv_msg(ev);
++ hdr->status = 0;
++
++ goto retry;
++}
++
++static unsigned int tgt_poll(struct file * file, struct poll_table_struct
*wait)
++{
++ struct rbuf_hdr *hdr;
++ unsigned long flags;
++ unsigned int mask = 0;
++
++ poll_wait(file, &tgt_poll_wait, wait);
++
++ spin_lock_irqsave(&txbuf.lock, flags);
++
++ hdr = head_rbuf_hdr(&txbuf, txbuf.idx - 1);
++ if (hdr->status)
++ mask |= POLLIN | POLLRDNORM;
++
++ spin_unlock_irqrestore(&txbuf.lock, flags);
++
++ return mask;
++}
++
++static int tgt_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ unsigned long size, addr;
++ struct page *page;
++ int err, i;
++
++ if (vma->vm_pgoff) {
++ eprintk("bug\n");
++ return -EINVAL;
++ }
++
++ size = vma->vm_end - vma->vm_start;
++ if (size != TGT_RINGBUF_SIZE * 2) {
++ eprintk("%lu\n", size);
++ return -EINVAL;
++ }
++ addr = vma->vm_start;
++ page = virt_to_page(txbuf.buf);
++ for (i = 0; i < size >> PAGE_SHIFT; i++) {
++ err = vm_insert_page(vma, addr, page);
++ if (err) {
++ eprintk("%d %d %lu\n", err, i, addr);
++ return -EINVAL;
++ }
++ addr += PAGE_SIZE;
++ page++;
++ }
++
++ return 0;
++}
++
++static struct file_operations tgt_fops = {
++ .owner = THIS_MODULE,
++ .poll = tgt_poll,
++ .write = tgt_write,
++ .mmap = tgt_mmap,
++};
++
++void __exit scsi_tgt_if_exit(void)
++{
++ int order = long_log2(TGT_RINGBUF_SIZE * 2);
++
++ unregister_chrdev(chrdev, "tgt");
++ free_pages((unsigned long) txbuf.buf, order);
++}
++
++int __init scsi_tgt_if_init(void)
++{
++ u32 bsize = TGT_RINGBUF_SIZE;
++ int order;
++ char *buf;
++
++ chrdev = register_chrdev(0, "tgt", &tgt_fops);
++ if (chrdev < 0)
++ return chrdev;
++
++ order = long_log2((bsize * 2) >> PAGE_SHIFT);
++ buf = (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
++ order);
++ if (!buf)
++ goto free_dev;
++ rbuf_init(&txbuf, buf, bsize, sizeof(struct tgt_event));
++ rbuf_init(&rxbuf, buf + bsize, bsize, sizeof(struct tgt_event));
++
++ return 0;
++
++free_dev:
++ unregister_chrdev(chrdev, "tgt");
++
++ return -ENOMEM;
++}
+diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
+new file mode 100644
+index 0000000..447bf88
+--- /dev/null
++++ b/drivers/scsi/scsi_tgt_lib.c
+@@ -0,0 +1,707 @@
++/*
++ * SCSI target lib functions
++ *
++ * Copyright (C) 2005 Mike Christie <michaelc@xxxxxxxxxxx>
++ * Copyright (C) 2005 FUJITA Tomonori <tomof@xxxxxxx>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++#include <linux/blkdev.h>
++#include <linux/hash.h>
++#include <linux/module.h>
++#include <linux/pagemap.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_tgt.h>
++#include <../drivers/md/dm-bio-list.h>
++
++#include "scsi_tgt_priv.h"
++
++static struct workqueue_struct *scsi_tgtd;
++static kmem_cache_t *scsi_tgt_cmd_cache;
++
++/*
++ * TODO: this struct will be killed when the block layer supports large bios
++ * and James's work struct code is in
++ */
++struct scsi_tgt_cmd {
++ /* TODO replace work with James b's code */
++ struct work_struct work;
++ /* TODO replace the lists with a large bio */
++ struct bio_list xfer_done_list;
++ struct bio_list xfer_list;
++
++ struct list_head hash_list;
++ struct request *rq;
++
++ void *buffer;
++ unsigned bufflen;
++};
++
++#define TGT_HASH_ORDER 4
++#define cmd_hashfn(cid) hash_long((cid), TGT_HASH_ORDER)
++
++struct scsi_tgt_queuedata {
++ struct Scsi_Host *shost;
++ struct list_head cmd_hash[1 << TGT_HASH_ORDER];
++ spinlock_t cmd_hash_lock;
++};
++
++/*
++ * Function: scsi_host_get_command()
++ *
++ * Purpose: Allocate and setup a scsi command block and blk request
++ *
++ * Arguments: shost - scsi host
++ * data_dir - dma data dir
++ * gfp_mask- allocator flags
++ *
++ * Returns: The allocated scsi command structure.
++ *
++ * This should be called by target LLDs to get a command.
++ */
++struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
++ enum dma_data_direction data_dir,
++ gfp_t gfp_mask)
++{
++ int write = (data_dir == DMA_TO_DEVICE);
++ struct request *rq;
++ struct scsi_cmnd *cmd;
++ struct scsi_tgt_cmd *tcmd;
++
++ /* Bail if we can't get a reference to the device */
++ if (!get_device(&shost->shost_gendev))
++ return NULL;
++
++ tcmd = kmem_cache_alloc(scsi_tgt_cmd_cache, GFP_ATOMIC);
++ if (!tcmd)
++ goto put_dev;
++
++ rq = blk_get_request(shost->uspace_req_q, write, gfp_mask);
++ if (!rq)
++ goto free_tcmd;
++
++ cmd = __scsi_get_command(shost, gfp_mask);
++ if (!cmd)
++ goto release_rq;
++
++ memset(cmd, 0, sizeof(*cmd));
++ cmd->sc_data_direction = data_dir;
++ cmd->jiffies_at_alloc = jiffies;
++ cmd->request = rq;
++
++ rq->special = cmd;
++ rq->flags |= REQ_SPECIAL | REQ_BLOCK_PC;
++ rq->end_io_data = tcmd;
++
++ bio_list_init(&tcmd->xfer_list);
++ bio_list_init(&tcmd->xfer_done_list);
++ tcmd->rq = rq;
++
++ return cmd;
++
++release_rq:
++ blk_put_request(rq);
++free_tcmd:
++ kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
++put_dev:
++ put_device(&shost->shost_gendev);
++ return NULL;
++
++}
++EXPORT_SYMBOL_GPL(scsi_host_get_command);
++
++/*
++ * Function: scsi_host_put_command()
++ *
++ * Purpose: Free a scsi command block
++ *
++ * Arguments: shost - scsi host
++ * cmd - command block to free
++ *
++ * Returns: Nothing.
++ *
++ * Notes: The command must not belong to any lists.
++ */
++static void scsi_host_put_command(struct Scsi_Host *shost,
++ struct scsi_cmnd *cmd)
++{
++ struct request_queue *q = shost->uspace_req_q;
++ struct request *rq = cmd->request;
++ struct scsi_tgt_cmd *tcmd = rq->end_io_data;
++ unsigned long flags;
++
++ kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
++
++ spin_lock_irqsave(q->queue_lock, flags);
++ __blk_put_request(q, rq);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ __scsi_put_command(shost, cmd, &shost->shost_gendev);
++}
++
++static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
++{
++ struct bio *bio;
++
++ /* must call bio_endio in case bio was bounced */
++ while ((bio = bio_list_pop(&tcmd->xfer_done_list))) {
++ bio_endio(bio, bio->bi_size, 0);
++ bio_unmap_user(bio);
++ }
++
++ while ((bio = bio_list_pop(&tcmd->xfer_list))) {
++ bio_endio(bio, bio->bi_size, 0);
++ bio_unmap_user(bio);
++ }
++}
++
++static void cmd_hashlist_del(struct scsi_cmnd *cmd)
++{
++ struct request_queue *q = cmd->request->q;
++ struct scsi_tgt_queuedata *qdata = q->queuedata;
++ unsigned long flags;
++ struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
++
++ spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
++ list_del(&tcmd->hash_list);
++ spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
++}
++
++static void scsi_tgt_cmd_destroy(void *data)
++{
++ struct scsi_cmnd *cmd = data;
++ struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
++
++ dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
++ rq_data_dir(cmd->request));
++
++ cmd_hashlist_del(cmd);
++
++ /*
++ * We must set rq->flags here because bio_map_user and
++ * blk_rq_bio_prep ruined ti.
++ */
++ if (cmd->sc_data_direction == DMA_TO_DEVICE)
++ cmd->request->flags |= 1;
++ else
++ cmd->request->flags &= ~1UL;
++
++ scsi_unmap_user_pages(tcmd);
++ scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
++}
++
++static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd)
++{
++ struct scsi_tgt_queuedata *qdata = rq->q->queuedata;
++ unsigned long flags;
++ struct list_head *head;
++ static u32 tag = 0;
++
++ spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
++ rq->tag = tag++;
++ head = &qdata->cmd_hash[cmd_hashfn(rq->tag)];
++ list_add(&tcmd->hash_list, head);
++ spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
++}
++
++/*
++ * scsi_tgt_alloc_queue - setup queue used for message passing
++ * shost: scsi host
++ *
++ * This should be called by the LLD after host allocation.
++ * And will be released when the host is released.
++ */
++int scsi_tgt_alloc_queue(struct Scsi_Host *shost)
++{
++ struct scsi_tgt_queuedata *queuedata;
++ struct request_queue *q;
++ int err, i;
++
++ /*
++ * Do we need to send a netlink event or should uspace
++ * just respond to the hotplug event?
++ */
++ q = __scsi_alloc_queue(shost, NULL);
++ if (!q)
++ return -ENOMEM;
++
++ queuedata = kzalloc(sizeof(*queuedata), GFP_KERNEL);
++ if (!queuedata) {
++ err = -ENOMEM;
++ goto cleanup_queue;
++ }
++ queuedata->shost = shost;
++ q->queuedata = queuedata;
++
++ /*
++ * this is a silly hack. We should probably just queue as many
++ * command as is recvd to userspace. uspace can then make
++ * sure we do not overload the HBA
++ */
++ q->nr_requests = shost->hostt->can_queue;
++ /*
++ * We currently only support software LLDs so this does
++ * not matter for now. Do we need this for the cards we support?
++ * If so we should make it a host template value.
++ */
++ blk_queue_dma_alignment(q, 0);
++ shost->uspace_req_q = q;
++
++ for (i = 0; i < ARRAY_SIZE(queuedata->cmd_hash); i++)
++ INIT_LIST_HEAD(&queuedata->cmd_hash[i]);
++ spin_lock_init(&queuedata->cmd_hash_lock);
++
++ return 0;
++
++cleanup_queue:
++ blk_cleanup_queue(q);
++ return err;
++}
++EXPORT_SYMBOL_GPL(scsi_tgt_alloc_queue);
++
++struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *cmd)
++{
++ struct scsi_tgt_queuedata *queue = cmd->request->q->queuedata;
++ return queue->shost;
++}
++EXPORT_SYMBOL_GPL(scsi_tgt_cmd_to_host);
++
++/*
++ * scsi_tgt_queue_command - queue command for userspace processing
++ * @cmd: scsi command
++ * @scsilun: scsi lun
++ * @tag: unique value to identify this command for tmf
++ */
++int scsi_tgt_queue_command(struct scsi_cmnd *cmd, struct scsi_lun *scsilun,
++ u64 tag)
++{
++ struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
++ int err;
++
++ init_scsi_tgt_cmd(cmd->request, tcmd);
++ err = scsi_tgt_uspace_send_cmd(cmd, scsilun, tag);
++ if (err)
++ cmd_hashlist_del(cmd);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(scsi_tgt_queue_command);
++
++/*
++ * This is run from a interrpt handler normally and the unmap
++ * needs process context so we must queue
++ */
++static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
++{
++ struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
++
++ dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
++
++ scsi_tgt_uspace_send_status(cmd);
++ INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy, cmd);
++ queue_work(scsi_tgtd, &tcmd->work);
++}
++
++static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
++{
++ struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
++ int err;
++
++ dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
++
++ err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done);
++ switch (err) {
++ case SCSI_MLQUEUE_HOST_BUSY:
++ case SCSI_MLQUEUE_DEVICE_BUSY:
++ return -EAGAIN;
++ }
++
++ return 0;
++}
++
++static void scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
++{
++ int err;
++
++ err = __scsi_tgt_transfer_response(cmd);
++ if (!err)
++ return;
++
++ cmd->result = DID_BUS_BUSY << 16;
++ err = scsi_tgt_uspace_send_status(cmd);
++ if (err <= 0)
++ /* the eh will have to pick this up */
++ printk(KERN_ERR "Could not send cmd %p status\n", cmd);
++}
++
++static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
++{
++ struct request *rq = cmd->request;
++ struct scsi_tgt_cmd *tcmd = rq->end_io_data;
++ int count;
++
++ cmd->use_sg = rq->nr_phys_segments;
++ cmd->request_buffer = scsi_alloc_sgtable(cmd, gfp_mask);
++ if (!cmd->request_buffer)
++ return -ENOMEM;
++
++ cmd->request_bufflen = rq->data_len;
++
++ dprintk("cmd %p addr %p cnt %d %lu\n", cmd, tcmd->buffer, cmd->use_sg,
++ rq_data_dir(rq));
++ count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer);
++ if (likely(count <= cmd->use_sg)) {
++ cmd->use_sg = count;
++ return 0;
++ }
++
++ eprintk("cmd %p addr %p cnt %d\n", cmd, tcmd->buffer, cmd->use_sg);
++ scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
++ return -EINVAL;
++}
++
++/* TODO: test this crap and replace bio_map_user with new interface maybe */
++static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd
*cmd,
++ int rw)
++{
++ struct request_queue *q = cmd->request->q;
++ struct request *rq = cmd->request;
++ void *uaddr = tcmd->buffer;
++ unsigned int len = tcmd->bufflen;
++ struct bio *bio;
++ int err;
++
++ while (len > 0) {
++ dprintk("%lx %u\n", (unsigned long) uaddr, len);
++ bio = bio_map_user(q, NULL, (unsigned long) uaddr, len, rw);
++ if (IS_ERR(bio)) {
++ err = PTR_ERR(bio);
++ dprintk("fail to map %lx %u %d %x\n",
++ (unsigned long) uaddr, len, err, cmd->cmnd[0]);
++ goto unmap_bios;
++ }
++
++ uaddr += bio->bi_size;
++ len -= bio->bi_size;
++
++ /*
++ * The first bio is added and merged. We could probably
++ * try to add others using scsi_merge_bio() but for now
++ * we keep it simple. The first bio should be pretty large
++ * (either hitting the 1 MB bio pages limit or a queue limit)
++ * already but for really large IO we may want to try and
++ * merge these.
++ */
++ if (!rq->bio) {
++ blk_rq_bio_prep(q, rq, bio);
++ rq->data_len = bio->bi_size;
++ } else
++ /* put list of bios to transfer in next go around */
++ bio_list_add(&tcmd->xfer_list, bio);
++ }
++
++ cmd->offset = 0;
++ err = scsi_tgt_init_cmd(cmd, GFP_KERNEL);
++ if (err)
++ goto unmap_bios;
++
++ return 0;
++
++unmap_bios:
++ if (rq->bio) {
++ bio_unmap_user(rq->bio);
++ while ((bio = bio_list_pop(&tcmd->xfer_list)))
++ bio_unmap_user(bio);
++ }
++
++ return err;
++}
++
++static int scsi_tgt_transfer_data(struct scsi_cmnd *);
++
++static void scsi_tgt_data_transfer_done(struct scsi_cmnd *cmd)
++{
++ struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
++ struct bio *bio;
++ int err;
++
++ /* should we free resources here on error ? */
++ if (cmd->result) {
++send_uspace_err:
++ err = scsi_tgt_uspace_send_status(cmd);
++ if (err <= 0)
++ /* the tgt uspace eh will have to pick this up */
++ printk(KERN_ERR "Could not send cmd %p status\n", cmd);
++ return;
++ }
++
++ dprintk("cmd %p request_bufflen %u bufflen %u\n",
++ cmd, cmd->request_bufflen, tcmd->bufflen);
++
++ scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
++ bio_list_add(&tcmd->xfer_done_list, cmd->request->bio);
++
++ tcmd->buffer += cmd->request_bufflen;
++ cmd->offset += cmd->request_bufflen;
++
++ if (!tcmd->xfer_list.head) {
++ scsi_tgt_transfer_response(cmd);
++ return;
++ }
++
++ dprintk("cmd2 %p request_bufflen %u bufflen %u\n",
++ cmd, cmd->request_bufflen, tcmd->bufflen);
++
++ bio = bio_list_pop(&tcmd->xfer_list);
++ BUG_ON(!bio);
++
++ blk_rq_bio_prep(cmd->request->q, cmd->request, bio);
++ cmd->request->data_len = bio->bi_size;
++ err = scsi_tgt_init_cmd(cmd, GFP_ATOMIC);
++ if (err) {
++ cmd->result = DID_ERROR << 16;
++ goto send_uspace_err;
++ }
++
++ if (scsi_tgt_transfer_data(cmd)) {
++ cmd->result = DID_NO_CONNECT << 16;
++ goto send_uspace_err;
++ }
++}
++
++static int scsi_tgt_transfer_data(struct scsi_cmnd *cmd)
++{
++ int err;
++ struct Scsi_Host *host = scsi_tgt_cmd_to_host(cmd);
++
++ err = host->hostt->transfer_data(cmd, scsi_tgt_data_transfer_done);
++ switch (err) {
++ case SCSI_MLQUEUE_HOST_BUSY:
++ case SCSI_MLQUEUE_DEVICE_BUSY:
++ return -EAGAIN;
++ default:
++ return 0;
++ }
++}
++
++static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr,
++ unsigned len)
++{
++ char __user *p = (char __user *) uaddr;
++
++ if (copy_from_user(cmd->sense_buffer, p,
++ min_t(unsigned, SCSI_SENSE_BUFFERSIZE, len))) {
++ printk(KERN_ERR "Could not copy the sense buffer\n");
++ return -EIO;
++ }
++ return 0;
++}
++
++static int scsi_tgt_abort_cmd(struct Scsi_Host *host, struct scsi_cmnd *cmd)
++{
++ int err;
++
++ err = host->hostt->eh_abort_handler(cmd);
++ if (err)
++ eprintk("fail to abort %p\n", cmd);
++
++ scsi_tgt_cmd_destroy(cmd);
++ return err;
++}
++
++static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u32 cid)
++{
++ struct scsi_tgt_queuedata *qdata = q->queuedata;
++ struct request *rq = NULL;
++ struct list_head *head;
++ struct scsi_tgt_cmd *tcmd;
++ unsigned long flags;
++
++ head = &qdata->cmd_hash[cmd_hashfn(cid)];
++ spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
++ list_for_each_entry(tcmd, head, hash_list) {
++ if (tcmd->rq->tag == cid) {
++ rq = tcmd->rq;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
++
++ return rq;
++}
++
++int scsi_tgt_kspace_exec(int host_no, u32 cid, int result, u32 len,
++ unsigned long uaddr, u8 rw)
++{
++ struct Scsi_Host *shost;
++ struct scsi_cmnd *cmd;
++ struct request *rq;
++ struct scsi_tgt_cmd *tcmd;
++ int err = 0;
++
++ dprintk("%d %u %d %u %lx %u\n", host_no, cid, result,
++ len, uaddr, rw);
++
++ /* TODO: replace with a O(1) alg */
++ shost = scsi_host_lookup(host_no);
++ if (IS_ERR(shost)) {
++ printk(KERN_ERR "Could not find host no %d\n", host_no);
++ return -EINVAL;
++ }
++
++ if (!shost->uspace_req_q) {
++ printk(KERN_ERR "Not target scsi host %d\n", host_no);
++ goto done;
++ }
++
++ rq = tgt_cmd_hash_lookup(shost->uspace_req_q, cid);
++ if (!rq) {
++ printk(KERN_ERR "Could not find cid %u\n", cid);
++ err = -EINVAL;
++ goto done;
++ }
++ cmd = rq->special;
++
++ dprintk("cmd %p result %d len %d bufflen %u %lu %x\n", cmd,
++ result, len, cmd->request_bufflen, rq_data_dir(rq),
cmd->cmnd[0]);
++
++ if (result == TASK_ABORTED) {
++ scsi_tgt_abort_cmd(shost, cmd);
++ goto done;
++ }
++ /*
++ * store the userspace values here, the working values are
++ * in the request_* values
++ */
++ tcmd = cmd->request->end_io_data;
++ tcmd->buffer = (void *)uaddr;
++ tcmd->bufflen = len;
++ cmd->result = result;
++
++ if (!tcmd->bufflen || cmd->request_buffer) {
++ err = __scsi_tgt_transfer_response(cmd);
++ goto done;
++ }
++
++ /*
++ * TODO: Do we need to handle case where request does not
++ * align with LLD.
++ */
++ err = scsi_map_user_pages(rq->end_io_data, cmd, rw);
++ if (err) {
++ eprintk("%p %d\n", cmd, err);
++ err = -EAGAIN;
++ goto done;
++ }
++
++ /* userspace failure */
++ if (cmd->result) {
++ if (status_byte(cmd->result) == CHECK_CONDITION)
++ scsi_tgt_copy_sense(cmd, uaddr, len);
++ err = __scsi_tgt_transfer_response(cmd);
++ goto done;
++ }
++ /* ask the target LLD to transfer the data to the buffer */
++ err = scsi_tgt_transfer_data(cmd);
++
++done:
++ scsi_host_put(shost);
++ return err;
++}
++
++int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *shost, int function, u64 tag,
++ struct scsi_lun *scsilun, void *data)
++{
++ int err;
++
++ /* TODO: need to retry if this fails. */
++ err = scsi_tgt_uspace_send_tsk_mgmt(shost->host_no, function,
++ tag, scsilun, data);
++ if (err < 0)
++ eprintk("The task management request lost!\n");
++ return err;
++}
++EXPORT_SYMBOL_GPL(scsi_tgt_tsk_mgmt_request);
++
++int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result)
++{
++ struct Scsi_Host *shost;
++ int err = -EINVAL;
++
++ dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
++
++ shost = scsi_host_lookup(host_no);
++ if (IS_ERR(shost)) {
++ printk(KERN_ERR "Could not find host no %d\n", host_no);
++ return -EINVAL;
++ }
++
++ if (!shost->uspace_req_q) {
++ printk(KERN_ERR "Not target scsi host %d\n", host_no);
++ goto done;
++ }
++
++ err = shost->hostt->tsk_mgmt_response(mid, result);
++done:
++ scsi_host_put(shost);
++
++ return err;
++}
++
++static int __init scsi_tgt_init(void)
++{
++ int err;
++
++ scsi_tgt_cmd_cache = kmem_cache_create("scsi_tgt_cmd",
++ sizeof(struct scsi_tgt_cmd),
++ 0, 0, NULL, NULL);
++ if (!scsi_tgt_cmd_cache)
++ return -ENOMEM;
++
++ scsi_tgtd = create_workqueue("scsi_tgtd");
++ if (!scsi_tgtd) {
++ err = -ENOMEM;
++ goto free_kmemcache;
++ }
++
++ err = scsi_tgt_if_init();
++ if (err)
++ goto destroy_wq;
++
++ return 0;
++
++destroy_wq:
++ destroy_workqueue(scsi_tgtd);
++free_kmemcache:
++ kmem_cache_destroy(scsi_tgt_cmd_cache);
++ return err;
++}
++
++static void __exit scsi_tgt_exit(void)
++{
++ destroy_workqueue(scsi_tgtd);
++ scsi_tgt_if_exit();
++ kmem_cache_destroy(scsi_tgt_cmd_cache);
++}
++
++module_init(scsi_tgt_init);
++module_exit(scsi_tgt_exit);
++
++MODULE_DESCRIPTION("SCSI target core");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/scsi/scsi_tgt_priv.h b/drivers/scsi/scsi_tgt_priv.h
+new file mode 100644
+index 0000000..bd16a2c
+--- /dev/null
++++ b/drivers/scsi/scsi_tgt_priv.h
+@@ -0,0 +1,24 @@
++struct scsi_cmnd;
++struct scsi_lun;
++struct Scsi_Host;
++struct task_struct;
++
++/* tmp - will replace with SCSI logging stuff */
++#define eprintk(fmt, args...) \
++do { \
++ printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
++} while (0)
++
++#define dprintk eprintk
++
++extern void scsi_tgt_if_exit(void);
++extern int scsi_tgt_if_init(void);
++
++extern int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun
*lun,
++ u64 tag);
++extern int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd);
++extern int scsi_tgt_kspace_exec(int host_no, u32 cid, int result, u32 len,
++ unsigned long uaddr, u8 rw);
++extern int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
++ struct scsi_lun *scsilun, void *data);
++extern int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result);
+diff --git a/fs/bio.c b/fs/bio.c
+index 6a0b9ad..ade77bf 100644
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -621,10 +621,9 @@ static struct bio *__bio_map_user_iov(re
+
+ nr_pages += end - start;
+ /*
+- * transfer and buffer must be aligned to at least hardsector
+- * size for now, in the future we can relax this restriction
++ * buffer must be aligned to at least hardsector size for now
+ */
+- if ((uaddr & queue_dma_alignment(q)) || (len &
queue_dma_alignment(q)))
++ if (uaddr & queue_dma_alignment(q))
+ return ERR_PTR(-EINVAL);
+ }
+
+@@ -750,7 +749,6 @@ struct bio *bio_map_user_iov(request_que
+ int write_to_vm)
+ {
+ struct bio *bio;
+- int len = 0, i;
+
+ bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
+
+@@ -765,18 +763,7 @@ struct bio *bio_map_user_iov(request_que
+ */
+ bio_get(bio);
+
+- for (i = 0; i < iov_count; i++)
+- len += iov[i].iov_len;
+-
+- if (bio->bi_size == len)
+- return bio;
+-
+- /*
+- * don't support partial mappings
+- */
+- bio_endio(bio, bio->bi_size, 0);
+- bio_unmap_user(bio);
+- return ERR_PTR(-EINVAL);
++ return bio;
+ }
+
+ static void __bio_unmap_user(struct bio *bio)
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 59e1259..cec7ccf 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -624,7 +624,8 @@ extern void blk_queue_activity_fn(reques
+ extern int blk_rq_map_user(request_queue_t *, struct request *, void __user
*, unsigned int);
+ extern int blk_rq_unmap_user(struct bio *, unsigned int);
+ extern int blk_rq_map_kern(request_queue_t *, struct request *, void *,
unsigned int, gfp_t);
+-extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct
sg_iovec *, int);
++extern int blk_rq_map_user_iov(request_queue_t *, struct request *,
++ struct sg_iovec *, int, unsigned int);
+ extern int blk_execute_rq(request_queue_t *, struct gendisk *,
+ struct request *, int);
+ extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
+diff --git a/include/scsi/libsrp.h b/include/scsi/libsrp.h
+new file mode 100644
+index 0000000..9dd10ff
+--- /dev/null
++++ b/include/scsi/libsrp.h
+@@ -0,0 +1,75 @@
++#ifndef __LIBSRP_H__
++#define __LIBSRP_H__
++
++#include <linux/list.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_host.h>
++#include <scsi/srp.h>
++
++enum iue_flags {
++ V_DIOVER,
++ V_WRITE,
++ V_LINKED,
++ V_FLYING,
++};
++
++struct srp_buf {
++ dma_addr_t dma;
++ void *buf;
++};
++
++struct srp_queue {
++ void *pool;
++ void *items;
++ struct kfifo *queue;
++ spinlock_t lock;
++};
++
++struct srp_target {
++ struct Scsi_Host *shost;
++ struct device *dev;
++
++ spinlock_t lock;
++ struct list_head cmd_queue;
++
++ size_t srp_iu_size;
++ struct srp_queue iu_queue;
++ size_t rx_ring_size;
++ struct srp_buf **rx_ring;
++
++ /* IB needs tx_ring too */
++
++ void *ldata;
++};
++
++struct iu_entry {
++ struct srp_target *target;
++ struct scsi_cmnd *scmd;
++
++ struct list_head ilist;
++ dma_addr_t remote_token;
++ unsigned long flags;
++
++ struct srp_buf *sbuf;
++};
++
++typedef int (rdma_io_t) (struct iu_entry *, struct scatterlist *, int,
++ struct srp_direct_buf *, int,
++ enum dma_data_direction, unsigned int);
++
++static inline struct srp_target *host_to_target(struct Scsi_Host *host)
++{
++ return (struct srp_target *) host->hostdata;
++}
++
++extern int srp_target_alloc(struct srp_target *, struct device *, size_t,
size_t);
++extern void srp_target_free(struct srp_target *);
++
++extern struct iu_entry *srp_iu_get(struct srp_target *);
++extern void srp_iu_put(struct iu_entry *);
++
++extern int srp_cmd_perform(struct iu_entry *iue, struct srp_cmd *cmd);
++extern int srp_transfer_data(struct scsi_cmnd *scmd, struct srp_cmd *cmd,
++ rdma_io_t rdma_io);
++
++#endif
+diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
+index 1ace1b9..d69dbf5 100644
+--- a/include/scsi/scsi_cmnd.h
++++ b/include/scsi/scsi_cmnd.h
+@@ -8,6 +8,7 @@
+
+ struct request;
+ struct scatterlist;
++struct Scsi_Host;
+ struct scsi_device;
+ struct scsi_request;
+
+@@ -84,6 +85,8 @@ struct scsi_cmnd {
+ unsigned short sglist_len; /* size of malloc'd scatter-gather list
*/
+ unsigned bufflen; /* Size of data buffer */
+ void *buffer; /* Data buffer */
++ /* offset in cmd we are at (for multi-transfer tgt cmds) */
++ unsigned offset;
+
+ unsigned underflow; /* Return error if less than
+ this amount is transferred */
+@@ -148,8 +151,13 @@ struct scsi_cmnd {
+
+
+ extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
++extern struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *, gfp_t);
+ extern void scsi_put_command(struct scsi_cmnd *);
++extern void __scsi_put_command(struct Scsi_Host *, struct scsi_cmnd *,
++ struct device *);
+ extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned
int);
+ extern void scsi_finish_command(struct scsi_cmnd *cmd);
++extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
++extern void scsi_free_sgtable(struct scatterlist *, int);
+
+ #endif /* _SCSI_SCSI_CMND_H */
+diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
+index de6ce54..353abe5 100644
+--- a/include/scsi/scsi_host.h
++++ b/include/scsi/scsi_host.h
+@@ -7,6 +7,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/mutex.h>
+
++struct request_queue;
+ struct block_device;
+ struct completion;
+ struct module;
+@@ -123,6 +124,39 @@ struct scsi_host_template {
+ void (*done)(struct scsi_cmnd *));
+
+ /*
++ * The transfer functions are used to queue a scsi command to
++ * the LLD. When the driver is finished processing the command
++ * the done callback is invoked.
++ *
++ * return values: see queuecommand
++ *
++ * If the LLD accepts the cmd, it should set the result to an
++ * appropriate value when completed before calling the done function.
++ *
++ * STATUS: REQUIRED FOR TARGET DRIVERS
++ */
++ /* TODO: rename */
++ int (* transfer_response)(struct scsi_cmnd *,
++ void (*done)(struct scsi_cmnd *));
++ /*
++ * This is called to inform the LLD to transfer cmd->request_bufflen
++ * bytes of the cmd at cmd->offset in the cmd. The cmd->use_sg
++ * speciefies the number of scatterlist entried in the command
++ * and cmd->request_buffer contains the scatterlist.
++ *
++ * If the command cannot be processed in one transfer_data call
++ * becuase a scatterlist within the LLD's limits cannot be
++ * created then transfer_data will be called multiple times.
++ * It is initially called from process context, and later
++ * calls are from the interrup context.
++ */
++ int (* transfer_data)(struct scsi_cmnd *,
++ void (*done)(struct scsi_cmnd *));
++
++ /* Used as callback for the completion of task management request. */
++ int (* tsk_mgmt_response)(u64 mid, int result);
++
++ /*
+ * This is an error handling strategy routine. You don't need to
+ * define one of these if you don't want to - there is a default
+ * routine that is present that should work in most cases. For those
+@@ -557,6 +591,12 @@ struct Scsi_Host {
+ */
+ unsigned int max_host_blocked;
+
++ /*
++ * q used for scsi_tgt msgs, async events or any other requests that
++ * need to be processed in userspace
++ */
++ struct request_queue *uspace_req_q;
++
+ /* legacy crap */
+ unsigned long base;
+ unsigned long io_port;
+@@ -659,6 +699,9 @@ extern void scsi_unblock_requests(struct
+ extern void scsi_block_requests(struct Scsi_Host *);
+
+ struct class_container;
++
++extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
++ void (*) (struct request_queue *));
+ /*
+ * These two functions are used to allocate and free a pseudo device
+ * which will connect to the host adapter itself rather than any
+diff --git a/include/scsi/scsi_tgt.h b/include/scsi/scsi_tgt.h
+new file mode 100644
+index 0000000..61e8ee9
+--- /dev/null
++++ b/include/scsi/scsi_tgt.h
+@@ -0,0 +1,17 @@
++/*
++ * SCSI target definitions
++ */
++
++#include <linux/dma-mapping.h>
++
++struct Scsi_Host;
++struct scsi_cmnd;
++struct scsi_lun;
++
++extern struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *);
++extern int scsi_tgt_alloc_queue(struct Scsi_Host *);
++extern int scsi_tgt_queue_command(struct scsi_cmnd *, struct scsi_lun *, u64);
++extern int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *, int, u64, struct
scsi_lun *,
++ void *);
++extern struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *,
++ enum dma_data_direction, gfp_t);
+diff --git a/include/scsi/scsi_tgt_if.h b/include/scsi/scsi_tgt_if.h
+new file mode 100644
+index 0000000..0cc1b9b
+--- /dev/null
++++ b/include/scsi/scsi_tgt_if.h
+@@ -0,0 +1,91 @@
++/*
++ * SCSI target kernel/user interface
++ *
++ * Copyright (C) 2005 FUJITA Tomonori <tomof@xxxxxxx>
++ * Copyright (C) 2005 Mike Christie <michaelc@xxxxxxxxxxx>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++#ifndef __SCSI_TARGET_IF_H
++#define __SCSI_TARGET_IF_H
++
++enum tgt_event_type {
++ /* user -> kernel */
++ TGT_UEVENT_CMD_RSP,
++ TGT_UEVENT_TSK_MGMT_RSP,
++
++ /* kernel -> user */
++ TGT_KEVENT_CMD_REQ,
++ TGT_KEVENT_CMD_DONE,
++ TGT_KEVENT_TSK_MGMT_REQ,
++};
++
++struct tgt_event {
++ uint32_t type;
++ /* user-> kernel */
++ union {
++ struct {
++ int host_no;
++ uint32_t cid;
++ uint32_t len;
++ int result;
++ uint64_t uaddr;
++ uint8_t rw;
++ } cmd_rsp;
++ struct {
++ int host_no;
++ uint64_t mid;
++ int result;
++ } tsk_mgmt_rsp;
++ } u;
++
++ /* kernel -> user */
++ union {
++ struct {
++ int host_no;
++ uint32_t cid;
++ uint32_t data_len;
++ uint8_t scb[16];
++ uint8_t lun[8];
++ int attribute;
++ uint64_t tag;
++ uint64_t uaddr;
++ } cmd_req;
++ struct {
++ int host_no;
++ uint32_t cid;
++ int result;
++ } cmd_done;
++ struct {
++ int host_no;
++ int function;
++ uint64_t tag;
++ uint8_t lun[8];
++ uint64_t mid;
++ } tsk_mgmt_req;
++ } k;
++
++} __attribute__ ((aligned (sizeof(uint64_t))));
++
++#define TGT_RINGBUF_SIZE (1UL << 16)
++
++struct rbuf_hdr {
++ uint32_t status;
++ uint32_t len;
++ uint64_t data[0];
++} __attribute__ ((aligned (sizeof(uint64_t))));
++
++#endif
+--
+1.1.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|