diff options
author | James Bottomley <jejb@mulgrave.(none)> | 2005-10-28 11:41:41 -0500 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.(none)> | 2005-10-28 11:41:41 -0500 |
commit | 38a9a621aba953ddb8051547e98c10ec3c741312 (patch) | |
tree | 53fc96a2902d2c5bf2a82d97d99b78570c76291e /drivers | |
parent | 27d1097d39509494706eaa2620ef3b1e780a3224 (diff) | |
parent | e75d51761debffbc5e556e02c8ceda4f8c1a3e83 (diff) | |
download | blackbird-op-linux-38a9a621aba953ddb8051547e98c10ec3c741312.tar.gz blackbird-op-linux-38a9a621aba953ddb8051547e98c10ec3c741312.zip |
Merge HEAD from ../scsi-misc-2.6-old
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/scsi/Kconfig | 26 | ||||
-rw-r--r-- | drivers/scsi/Makefile | 1 | ||||
-rw-r--r-- | drivers/scsi/aacraid/aachba.c | 57 | ||||
-rw-r--r-- | drivers/scsi/aacraid/aacraid.h | 2 | ||||
-rw-r--r-- | drivers/scsi/aacraid/commctrl.c | 6 | ||||
-rw-r--r-- | drivers/scsi/aacraid/linit.c | 10 | ||||
-rw-r--r-- | drivers/scsi/dc395x.c | 13 | ||||
-rw-r--r-- | drivers/scsi/iscsi_tcp.c | 3642 | ||||
-rw-r--r-- | drivers/scsi/iscsi_tcp.h | 322 | ||||
-rw-r--r-- | drivers/scsi/ncr53c8xx.c | 22 | ||||
-rw-r--r-- | drivers/scsi/scsi_transport_iscsi.c | 1380 | ||||
-rw-r--r-- | drivers/scsi/sym53c8xx_defs.h | 13 | ||||
-rw-r--r-- | drivers/scsi/tmscsim.c | 8 |
13 files changed, 5206 insertions, 296 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 9c9f162bd6ed..78c33180ebed 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -229,7 +229,7 @@ config SCSI_FC_ATTRS config SCSI_ISCSI_ATTRS tristate "iSCSI Transport Attributes" - depends on SCSI + depends on SCSI && NET help If you wish to export transport-specific information about each attached iSCSI device to sysfs, say Y. @@ -247,6 +247,30 @@ endmenu menu "SCSI low-level drivers" depends on SCSI!=n +config ISCSI_TCP + tristate "iSCSI Initiator over TCP/IP" + depends on SCSI && INET + select CRYPTO + select CRYPTO_MD5 + select CRYPTO_CRC32C + select SCSI_ISCSI_ATTRS + help + The iSCSI Driver provides a host with the ability to access storage + through an IP network. The driver uses the iSCSI protocol to transport + SCSI requests and responses over a TCP/IP network between the host + (the "initiator") and "targets". Architecturally, the iSCSI driver + combines with the host's TCP/IP stack, network drivers, and Network + Interface Card (NIC) to provide the same functions as a SCSI or a + Fibre Channel (FC) adapter driver with a Host Bus Adapter (HBA). + + To compile this driver as a module, choose M here: the + module will be called iscsi_tcp. + + The userspace component needed to initialize the driver, documentation, + and sample configuration files can be found here: + + http://linux-iscsi.sf.net + config SGIWD93_SCSI tristate "SGI WD93C93 SCSI Driver" depends on SGI_IP22 && SCSI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 2d4439826c08..8dfb9884afe0 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -33,6 +33,7 @@ obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o +obj-$(CONFIG_ISCSI_TCP) += iscsi_tcp.o obj-$(CONFIG_SCSI_AMIGA7XX) += amiga7xx.o 53c7xx.o obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 93416f760e5a..a913196459d5 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -608,17 +608,43 @@ static char *container_types[] = { * files instead of in OS dependant driver source. */ -static void setinqstr(int devtype, void *data, int tindex) +static void setinqstr(struct aac_dev *dev, void *data, int tindex) { struct scsi_inq *str; - struct aac_driver_ident *mp; - mp = aac_get_driver_ident(devtype); - str = (struct scsi_inq *)(data); /* cast data to scsi inq block */ - - inqstrcpy (mp->vname, str->vid); - inqstrcpy (mp->model, str->pid); /* last six chars reserved for vol type */ + memset(str, ' ', sizeof(*str)); + + if (dev->supplement_adapter_info.AdapterTypeText[0]) { + char * cp = dev->supplement_adapter_info.AdapterTypeText; + int c = sizeof(str->vid); + while (*cp && *cp != ' ' && --c) + ++cp; + c = *cp; + *cp = '\0'; + inqstrcpy (dev->supplement_adapter_info.AdapterTypeText, + str->vid); + *cp = c; + while (*cp && *cp != ' ') + ++cp; + while (*cp == ' ') + ++cp; + /* last six chars reserved for vol type */ + c = 0; + if (strlen(cp) > sizeof(str->pid)) { + c = cp[sizeof(str->pid)]; + cp[sizeof(str->pid)] = '\0'; + } + inqstrcpy (cp, str->pid); + if (c) + cp[sizeof(str->pid)] = c; + } else { + struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype); + + inqstrcpy (mp->vname, str->vid); + /* last six chars reserved for vol type */ + inqstrcpy (mp->model, str->pid); + } if (tindex < (sizeof(container_types)/sizeof(char *))){ char *findit = str->pid; @@ -627,7 +653,9 @@ static void setinqstr(int devtype, void *data, int tindex) /* RAID is superfluous in the context of a RAID device */ if (memcmp(findit-4, "RAID", 4) == 0) *(findit -= 4) = ' '; - inqstrcpy (container_types[tindex], findit + 1); + if (((findit - str->pid) + strlen(container_types[tindex])) + < (sizeof(str->pid) + sizeof(str->prl))) + inqstrcpy (container_types[tindex], findit + 1); } inqstrcpy ("V1.0", str->prl); } @@ -822,12 +850,12 @@ int aac_get_adapter_info(struct aac_dev* dev) dev->dac_support = (dacmode!=0); } if(dev->dac_support != 0) { - if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL) && - !pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL)) { + if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) && + !pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) { printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n", dev->name, dev->id); - } else if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFULL) && - !pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFULL)) { + } else if (!pci_set_dma_mask(dev->pdev, DMA_32BIT_MASK) && + !pci_set_consistent_dma_mask(dev->pdev, DMA_32BIT_MASK)) { printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n", dev->name, dev->id); dev->dac_support = 0; @@ -1438,7 +1466,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) struct Scsi_Host *host = scsicmd->device->host; struct aac_dev *dev = (struct aac_dev *)host->hostdata; struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev; - int cardtype = dev->cardtype; int ret; /* @@ -1542,14 +1569,14 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) * see: <vendor>.c i.e. aac.c */ if (scsicmd->device->id == host->this_id) { - setinqstr(cardtype, (void *) (inq_data.inqd_vid), (sizeof(container_types)/sizeof(char *))); + setinqstr(dev, (void *) (inq_data.inqd_vid), (sizeof(container_types)/sizeof(char *))); inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */ aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; scsicmd->scsi_done(scsicmd); return 0; } - setinqstr(cardtype, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type); + setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type); inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); return aac_get_container_name(scsicmd, cid); diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index d54b1cc88d0d..2ebe402bc31a 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1560,7 +1560,7 @@ struct fib_ioctl struct revision { - __le32 compat; + u32 compat; __le32 version; __le32 build; }; diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 71f1cad9b5f0..ef623bd965f5 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -408,7 +408,7 @@ static int check_revision(struct aac_dev *dev, void __user *arg) char *driver_version = aac_driver_version; u32 version; - response.compat = cpu_to_le32(1); + response.compat = 1; version = (simple_strtol(driver_version, &driver_version, 10) << 24) | 0x00000400; version += simple_strtol(driver_version + 1, &driver_version, 10) << 16; @@ -574,7 +574,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) rcode = -ENOMEM; goto cleanup; } - sg_user[i] = (void __user *)usg->sg[i].addr; + sg_user[i] = (void __user *)(long)usg->sg[i].addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; @@ -624,7 +624,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) rcode = -ENOMEM; goto cleanup; } - sg_user[i] = (void __user *)upsg->sg[i].addr; + sg_user[i] = (void __user *)(long)upsg->sg[i].addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index a1f9ceef0ac9..c235d0c0e7a7 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -752,8 +752,8 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, if (error) goto out; - if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) || - pci_set_consistent_dma_mask(pdev, 0xFFFFFFFFULL)) + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) || + pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) goto out; /* * If the quirk31 bit is set, the adapter needs adapter @@ -797,9 +797,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, * address space. */ if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) - if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL)) - goto out_free_fibs; - + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) + goto out_deinit; + aac->maximum_num_channels = aac_drivers[index].channels; error = aac_get_adapter_info(aac); if (error < 0) diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 600ba1202864..c44af5795b10 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -976,6 +976,16 @@ static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) } } +static inline void pio_trigger(void) +{ + static int feedback_requested; + + if (!feedback_requested) { + feedback_requested = 1; + printk(KERN_WARNING "%s: Please, contact <linux-scsi@vger.kernel.org> " + "to help improve support for your system.\n", __FILE__); + } +} /* Prepare SRB for being sent to Device DCB w/ command *cmd */ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, @@ -2320,6 +2330,7 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, CFG2_WIDEFIFO); while (DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) != 0x40) { u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); + pio_trigger(); *(srb->virt_addr)++ = byte; if (debug_enabled(DBG_PIO)) printk(" %02x", byte); @@ -2331,6 +2342,7 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, /* Read the last byte ... */ if (srb->total_xfer_length > 0) { u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); + pio_trigger(); *(srb->virt_addr)++ = byte; srb->total_xfer_length--; if (debug_enabled(DBG_PIO)) @@ -2507,6 +2519,7 @@ static void data_io_transfer(struct AdapterCtlBlk *acb, if (debug_enabled(DBG_PIO)) printk(" %02x", (unsigned char) *(srb->virt_addr)); + pio_trigger(); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *(srb->virt_addr)++); diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c new file mode 100644 index 000000000000..4fea3e4edaa7 --- /dev/null +++ b/drivers/scsi/iscsi_tcp.c @@ -0,0 +1,3642 @@ +/* + * iSCSI Initiator over TCP/IP Data-Path + * + * Copyright (C) 2004 Dmitry Yusupov + * Copyright (C) 2004 Alex Aizman + * Copyright (C) 2005 Mike Christie + * maintained by open-iscsi@googlegroups.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + * + * Credits: + * Christoph Hellwig + * FUJITA Tomonori + * Arne Redlich + * Zhenyu Wang + */ + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/inet.h> +#include <linux/blkdev.h> +#include <linux/crypto.h> +#include <linux/delay.h> +#include <linux/kfifo.h> +#include <linux/scatterlist.h> +#include <net/tcp.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_request.h> +#include <scsi/scsi_tcq.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi.h> +#include <scsi/scsi_transport_iscsi.h> + +#include "iscsi_tcp.h" + +MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " + "Alex Aizman <itn780@yahoo.com>"); +MODULE_DESCRIPTION("iSCSI/TCP data-path"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0:4.409"); +/* #define DEBUG_TCP */ +/* #define DEBUG_SCSI */ +#define DEBUG_ASSERT + +#ifdef DEBUG_TCP +#define debug_tcp(fmt...) printk(KERN_DEBUG "tcp: " fmt) +#else +#define debug_tcp(fmt...) +#endif + +#ifdef DEBUG_SCSI +#define debug_scsi(fmt...) printk(KERN_DEBUG "scsi: " fmt) +#else +#define debug_scsi(fmt...) +#endif + +#ifndef DEBUG_ASSERT +#ifdef BUG_ON +#undef BUG_ON +#endif +#define BUG_ON(expr) +#endif + +#define INVALID_SN_DELTA 0xffff + +static unsigned int iscsi_max_lun = 512; +module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); + +/* global data */ +static kmem_cache_t *taskcache; + +static inline void +iscsi_buf_init_virt(struct iscsi_buf *ibuf, char *vbuf, int size) +{ + sg_init_one(&ibuf->sg, (u8 *)vbuf, size); + ibuf->sent = 0; +} + +static inline void +iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size) +{ + ibuf->sg.page = (void*)vbuf; + ibuf->sg.offset = (unsigned int)-1; + ibuf->sg.length = size; + ibuf->sent = 0; +} + +static inline void* +iscsi_buf_iov_base(struct iscsi_buf *ibuf) +{ + return (char*)ibuf->sg.page + ibuf->sent; +} + +static inline void +iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg) +{ + /* + * Fastpath: sg element fits into single page + */ + if (sg->length + sg->offset <= PAGE_SIZE && page_count(sg->page) >= 2) { + ibuf->sg.page = sg->page; + ibuf->sg.offset = sg->offset; + ibuf->sg.length = sg->length; + } else + iscsi_buf_init_iov(ibuf, page_address(sg->page), sg->length); + ibuf->sent = 0; +} + +static inline int +iscsi_buf_left(struct iscsi_buf *ibuf) +{ + int rc; + + rc = ibuf->sg.length - ibuf->sent; + BUG_ON(rc < 0); + return rc; +} + +static inline void +iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf, + u8* crc) +{ + crypto_digest_digest(conn->tx_tfm, &buf->sg, 1, crc); + buf->sg.length += sizeof(uint32_t); +} + +static void +iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) +{ + struct iscsi_session *session = conn->session; + unsigned long flags; + + spin_lock_irqsave(&session->lock, flags); + if (session->conn_cnt == 1 || session->leadconn == conn) + session->state = ISCSI_STATE_FAILED; + spin_unlock_irqrestore(&session->lock, flags); + set_bit(SUSPEND_BIT, &conn->suspend_tx); + set_bit(SUSPEND_BIT, &conn->suspend_rx); + iscsi_conn_error(iscsi_handle(conn), err); +} + +static inline int +iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) +{ + uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn); + uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn); + + if (max_cmdsn < exp_cmdsn -1 && + max_cmdsn > exp_cmdsn - INVALID_SN_DELTA) + return ISCSI_ERR_MAX_CMDSN; + if (max_cmdsn > session->max_cmdsn || + max_cmdsn < session->max_cmdsn - INVALID_SN_DELTA) + session->max_cmdsn = max_cmdsn; + if (exp_cmdsn > session->exp_cmdsn || + exp_cmdsn < session->exp_cmdsn - INVALID_SN_DELTA) + session->exp_cmdsn = exp_cmdsn; + + return 0; +} + +static inline int +iscsi_hdr_extract(struct iscsi_conn *conn) +{ + struct sk_buff *skb = conn->in.skb; + + if (conn->in.copy >= conn->hdr_size && + conn->in_progress == IN_PROGRESS_WAIT_HEADER) { + /* + * Zero-copy PDU Header: using connection context + * to store header pointer. + */ + if (skb_shinfo(skb)->frag_list == NULL && + !skb_shinfo(skb)->nr_frags) + conn->in.hdr = (struct iscsi_hdr *) + ((char*)skb->data + conn->in.offset); + else { + /* ignoring return code since we checked + * in.copy before */ + skb_copy_bits(skb, conn->in.offset, + &conn->hdr, conn->hdr_size); + conn->in.hdr = &conn->hdr; + } + conn->in.offset += conn->hdr_size; + conn->in.copy -= conn->hdr_size; + } else { + int hdr_remains; + int copylen; + + /* + * PDU header scattered across SKB's, + * copying it... This'll happen quite rarely. + */ + + if (conn->in_progress == IN_PROGRESS_WAIT_HEADER) + conn->in.hdr_offset = 0; + + hdr_remains = conn->hdr_size - conn->in.hdr_offset; + BUG_ON(hdr_remains <= 0); + + copylen = min(conn->in.copy, hdr_remains); + skb_copy_bits(skb, conn->in.offset, + (char*)&conn->hdr + conn->in.hdr_offset, copylen); + + debug_tcp("PDU gather offset %d bytes %d in.offset %d " + "in.copy %d\n", conn->in.hdr_offset, copylen, + conn->in.offset, conn->in.copy); + + conn->in.offset += copylen; + conn->in.copy -= copylen; + if (copylen < hdr_remains) { + conn->in_progress = IN_PROGRESS_HEADER_GATHER; + conn->in.hdr_offset += copylen; + return -EAGAIN; + } + conn->in.hdr = &conn->hdr; + conn->discontiguous_hdr_cnt++; + conn->in_progress = IN_PROGRESS_WAIT_HEADER; + } + + return 0; +} + +static inline void +iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + struct scsi_cmnd *sc = ctask->sc; + struct iscsi_session *session = conn->session; + + spin_lock(&session->lock); + if (unlikely(!sc)) { + spin_unlock(&session->lock); + return; + } + if (sc->sc_data_direction == DMA_TO_DEVICE) { + struct iscsi_data_task *dtask, *n; + /* WRITE: cleanup Data-Out's if any */ + spin_lock(&conn->lock); + list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) { + list_del(&dtask->item); + mempool_free(dtask, ctask->datapool); + } + spin_unlock(&conn->lock); + } + ctask->xmstate = XMSTATE_IDLE; + ctask->r2t = NULL; + ctask->sc = NULL; + __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); + spin_unlock(&session->lock); +} + +/** + * iscsi_cmd_rsp - SCSI Command Response processing + * @conn: iscsi connection + * @ctask: scsi command task + **/ +static int +iscsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + int rc; + struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)conn->in.hdr; + struct iscsi_session *session = conn->session; + struct scsi_cmnd *sc = ctask->sc; + + rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr); + if (rc) { + sc->result = (DID_ERROR << 16); + goto out; + } + + conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; + + sc->result = (DID_OK << 16) | rhdr->cmd_status; + + if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) { + sc->result = (DID_ERROR << 16); + goto out; + } + + if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION && conn->senselen) { + int sensecopy = min(conn->senselen, SCSI_SENSE_BUFFERSIZE); + + memcpy(sc->sense_buffer, conn->data + 2, sensecopy); + debug_scsi("copied %d bytes of sense\n", sensecopy); + } + + if (sc->sc_data_direction == DMA_TO_DEVICE) + goto out; + + if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) { + int res_count = be32_to_cpu(rhdr->residual_count); + + if (res_count > 0 && res_count <= sc->request_bufflen) + sc->resid = res_count; + else + sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; + } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW) + sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; + else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW) + sc->resid = be32_to_cpu(rhdr->residual_count); + +out: + debug_scsi("done [sc %lx res %d itt 0x%x]\n", + (long)sc, sc->result, ctask->itt); + conn->scsirsp_pdus_cnt++; + iscsi_ctask_cleanup(conn, ctask); + sc->scsi_done(sc); + return rc; +} + +/** + * iscsi_data_rsp - SCSI Data-In Response processing + * @conn: iscsi connection + * @ctask: scsi command task + **/ +static int +iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + int rc; + struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)conn->in.hdr; + struct iscsi_session *session = conn->session; + int datasn = be32_to_cpu(rhdr->datasn); + + rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr); + if (rc) + return rc; + /* + * setup Data-In byte counter (gets decremented..) + */ + ctask->data_count = conn->in.datalen; + + if (conn->in.datalen == 0) + return 0; + + if (ctask->datasn != datasn) + return ISCSI_ERR_DATASN; + + ctask->datasn++; + + ctask->data_offset = be32_to_cpu(rhdr->offset); + if (ctask->data_offset + conn->in.datalen > ctask->total_length) + return ISCSI_ERR_DATA_OFFSET; + + if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) { + struct scsi_cmnd *sc = ctask->sc; + + conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; + if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) { + int res_count = be32_to_cpu(rhdr->residual_count); + + if (res_count > 0 && + res_count <= sc->request_bufflen) { + sc->resid = res_count; + sc->result = (DID_OK << 16) | rhdr->cmd_status; + } else + sc->result = (DID_BAD_TARGET << 16) | + rhdr->cmd_status; + } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW) + sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; + else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW) { + sc->resid = be32_to_cpu(rhdr->residual_count); + sc->result = (DID_OK << 16) | rhdr->cmd_status; + } else + sc->result = (DID_OK << 16) | rhdr->cmd_status; + } + + conn->datain_pdus_cnt++; + return 0; +} + +/** + * iscsi_solicit_data_init - initialize first Data-Out + * @conn: iscsi connection + * @ctask: scsi command task + * @r2t: R2T info + * + * Notes: + * Initialize first Data-Out within this R2T sequence and finds + * proper data_offset within this SCSI command. + * + * This function is called with connection lock taken. + **/ +static void +iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, + struct iscsi_r2t_info *r2t) +{ + struct iscsi_data *hdr; + struct iscsi_data_task *dtask; + struct scsi_cmnd *sc = ctask->sc; + + dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC); + BUG_ON(!dtask); + hdr = &dtask->hdr; + memset(hdr, 0, sizeof(struct iscsi_data)); + hdr->ttt = r2t->ttt; + hdr->datasn = cpu_to_be32(r2t->solicit_datasn); + r2t->solicit_datasn++; + hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; + memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun)); + hdr->itt = ctask->hdr.itt; + hdr->exp_statsn = r2t->exp_statsn; + hdr->offset = cpu_to_be32(r2t->data_offset); + if (r2t->data_length > conn->max_xmit_dlength) { + hton24(hdr->dlength, conn->max_xmit_dlength); + r2t->data_count = conn->max_xmit_dlength; + hdr->flags = 0; + } else { + hton24(hdr->dlength, r2t->data_length); + r2t->data_count = r2t->data_length; + hdr->flags = ISCSI_FLAG_CMD_FINAL; + } + conn->dataout_pdus_cnt++; + + r2t->sent = 0; + + iscsi_buf_init_virt(&r2t->headbuf, (char*)hdr, + sizeof(struct iscsi_hdr)); + + r2t->dtask = dtask; + + if (sc->use_sg) { + int i, sg_count = 0; + struct scatterlist *sg = sc->request_buffer; + + r2t->sg = NULL; + for (i = 0; i < sc->use_sg; i++, sg += 1) { + /* FIXME: prefetch ? */ + if (sg_count + sg->length > r2t->data_offset) { + int page_offset; + + /* sg page found! */ + + /* offset within this page */ + page_offset = r2t->data_offset - sg_count; + + /* fill in this buffer */ + iscsi_buf_init_sg(&r2t->sendbuf, sg); + r2t->sendbuf.sg.offset += page_offset; + r2t->sendbuf.sg.length -= page_offset; + + /* xmit logic will continue with next one */ + r2t->sg = sg + 1; + break; + } + sg_count += sg->length; + } + BUG_ON(r2t->sg == NULL); + } else + iscsi_buf_init_iov(&ctask->sendbuf, + (char*)sc->request_buffer + r2t->data_offset, + r2t->data_count); + + list_add(&dtask->item, &ctask->dataqueue); +} + +/** + * iscsi_r2t_rsp - iSCSI R2T Response processing + * @conn: iscsi connection + * @ctask: scsi command task + **/ +static int +iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + struct iscsi_r2t_info *r2t; + struct iscsi_session *session = conn->session; + struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)conn->in.hdr; + int r2tsn = be32_to_cpu(rhdr->r2tsn); + int rc; + + if (conn->in.ahslen) + return ISCSI_ERR_AHSLEN; + + if (conn->in.datalen) + return ISCSI_ERR_DATALEN; + + if (ctask->exp_r2tsn && ctask->exp_r2tsn != r2tsn) + return ISCSI_ERR_R2TSN; + + rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr); + if (rc) + return rc; + + /* FIXME: use R2TSN to detect missing R2T */ + + /* fill-in new R2T associated with the task */ + spin_lock(&session->lock); + if (!ctask->sc || ctask->mtask || + session->state != ISCSI_STATE_LOGGED_IN) { + printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in " + "recovery...\n", ctask->itt); + spin_unlock(&session->lock); + return 0; + } + rc = __kfifo_get(ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); + BUG_ON(!rc); + + r2t->exp_statsn = rhdr->statsn; + r2t->data_length = be32_to_cpu(rhdr->data_length); + if (r2t->data_length == 0 || + r2t->data_length > session->max_burst) { + spin_unlock(&session->lock); + return ISCSI_ERR_DATALEN; + } + + r2t->data_offset = be32_to_cpu(rhdr->data_offset); + if (r2t->data_offset + r2t->data_length > ctask->total_length) { + spin_unlock(&session->lock); + return ISCSI_ERR_DATALEN; + } + + r2t->ttt = rhdr->ttt; /* no flip */ + r2t->solicit_datasn = 0; + + iscsi_solicit_data_init(conn, ctask, r2t); + + ctask->exp_r2tsn = r2tsn + 1; + ctask->xmstate |= XMSTATE_SOL_HDR; + __kfifo_put(ctask->r2tqueue, (void*)&r2t, sizeof(void*)); + __kfifo_put(conn->writequeue, (void*)&ctask, sizeof(void*)); + + schedule_work(&conn->xmitwork); + conn->r2t_pdus_cnt++; + spin_unlock(&session->lock); + + return 0; +} + +static int +iscsi_hdr_recv(struct iscsi_conn *conn) +{ + int rc = 0; + struct iscsi_hdr *hdr; + struct iscsi_cmd_task *ctask; + struct iscsi_session *session = conn->session; + uint32_t cdgst, rdgst = 0; + + hdr = conn->in.hdr; + + /* verify PDU length */ + conn->in.datalen = ntoh24(hdr->dlength); + if (conn->in.datalen > conn->max_recv_dlength) { + printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n", + conn->in.datalen, conn->max_recv_dlength); + return ISCSI_ERR_DATALEN; + } + conn->data_copied = 0; + + /* read AHS */ + conn->in.ahslen = hdr->hlength * 4; + conn->in.offset += conn->in.ahslen; + conn->in.copy -= conn->in.ahslen; + if (conn->in.copy < 0) { + printk(KERN_ERR "iscsi_tcp: can't handle AHS with length " + "%d bytes\n", conn->in.ahslen); + return ISCSI_ERR_AHSLEN; + } + + /* calculate read padding */ + conn->in.padding = conn->in.datalen & (ISCSI_PAD_LEN-1); + if (conn->in.padding) { + conn->in.padding = ISCSI_PAD_LEN - conn->in.padding; + debug_scsi("read padding %d bytes\n", conn->in.padding); + } + + if (conn->hdrdgst_en) { + struct scatterlist sg; + + sg_init_one(&sg, (u8 *)hdr, + sizeof(struct iscsi_hdr) + conn->in.ahslen); + crypto_digest_digest(conn->rx_tfm, &sg, 1, (u8 *)&cdgst); + rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) + + conn->in.ahslen); + } + + /* save opcode for later */ + conn->in.opcode = hdr->opcode; + + /* verify itt (itt encoding: age+cid+itt) */ + if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) { + if ((hdr->itt & AGE_MASK) != + (session->age << AGE_SHIFT)) { + printk(KERN_ERR "iscsi_tcp: received itt %x expected " + "session age (%x)\n", hdr->itt, + session->age & AGE_MASK); + return ISCSI_ERR_BAD_ITT; + } + + if ((hdr->itt & CID_MASK) != (conn->id << CID_SHIFT)) { + printk(KERN_ERR "iscsi_tcp: received itt %x, expected " + "CID (%x)\n", hdr->itt, conn->id); + return ISCSI_ERR_BAD_ITT; + } + conn->in.itt = hdr->itt & ITT_MASK; + } else + conn->in.itt = hdr->itt; + + debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n", + hdr->opcode, conn->in.offset, conn->in.copy, + conn->in.ahslen, conn->in.datalen); + + if (conn->in.itt < session->cmds_max) { + if (conn->hdrdgst_en && cdgst != rdgst) { + printk(KERN_ERR "iscsi_tcp: itt %x: hdrdgst error " + "recv 0x%x calc 0x%x\n", conn->in.itt, rdgst, + cdgst); + return ISCSI_ERR_HDR_DGST; + } + + ctask = (struct iscsi_cmd_task *)session->cmds[conn->in.itt]; + + if (!ctask->sc) { + printk(KERN_INFO "iscsi_tcp: dropping ctask with " + "itt 0x%x\n", ctask->itt); + conn->in.datalen = 0; /* force drop */ + return 0; + } + + if (ctask->sc->SCp.phase != session->age) { + printk(KERN_ERR "iscsi_tcp: ctask's session age %d, " + "expected %d\n", ctask->sc->SCp.phase, + session->age); + return ISCSI_ERR_SESSION_FAILED; + } + + conn->in.ctask = ctask; + + debug_scsi("rsp [op 0x%x cid %d sc %lx itt 0x%x len %d]\n", + hdr->opcode, conn->id, (long)ctask->sc, + ctask->itt, conn->in.datalen); + + switch(conn->in.opcode) { + case ISCSI_OP_SCSI_CMD_RSP: + BUG_ON((void*)ctask != ctask->sc->SCp.ptr); + if (ctask->hdr.flags & ISCSI_FLAG_CMD_WRITE) + rc = iscsi_cmd_rsp(conn, ctask); + else if (!conn->in.datalen) + rc = iscsi_cmd_rsp(conn, ctask); + else + /* + * got sense or response data; copying PDU + * Header to the connection's header + * placeholder + */ + memcpy(&conn->hdr, hdr, + sizeof(struct iscsi_hdr)); + break; + case ISCSI_OP_SCSI_DATA_IN: + BUG_ON((void*)ctask != ctask->sc->SCp.ptr); + /* save flags for non-exceptional status */ + conn->in.flags = hdr->flags; + /* save cmd_status for sense data */ + conn->in.cmd_status = + ((struct iscsi_data_rsp*)hdr)->cmd_status; + rc = iscsi_data_rsp(conn, ctask); + break; + case ISCSI_OP_R2T: + BUG_ON((void*)ctask != ctask->sc->SCp.ptr); + if (ctask->hdr.flags & ISCSI_FLAG_CMD_WRITE && + ctask->sc->sc_data_direction == DMA_TO_DEVICE) + rc = iscsi_r2t_rsp(conn, ctask); + else + rc = ISCSI_ERR_PROTO; + break; + default: + rc = ISCSI_ERR_BAD_OPCODE; + break; + } + } else if (conn->in.itt >= ISCSI_MGMT_ITT_OFFSET && + conn->in.itt < ISCSI_MGMT_ITT_OFFSET + + session->mgmtpool_max) { + struct iscsi_mgmt_task *mtask = (struct iscsi_mgmt_task *) + session->mgmt_cmds[conn->in.itt - + ISCSI_MGMT_ITT_OFFSET]; + + debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n", + conn->in.opcode, conn->id, mtask->itt, + conn->in.datalen); + + switch(conn->in.opcode) { + case ISCSI_OP_LOGIN_RSP: + case ISCSI_OP_TEXT_RSP: + case ISCSI_OP_LOGOUT_RSP: + rc = iscsi_check_assign_cmdsn(session, + (struct iscsi_nopin*)hdr); + if (rc) + break; + + if (!conn->in.datalen) { + rc = iscsi_recv_pdu(iscsi_handle(conn), hdr, + NULL, 0); + if (conn->login_mtask != mtask) { + spin_lock(&session->lock); + __kfifo_put(session->mgmtpool.queue, + (void*)&mtask, sizeof(void*)); + spin_unlock(&session->lock); + } + } + break; + case ISCSI_OP_SCSI_TMFUNC_RSP: + rc = iscsi_check_assign_cmdsn(session, + (struct iscsi_nopin*)hdr); + if (rc) + break; + + if (conn->in.datalen || conn->in.ahslen) { + rc = ISCSI_ERR_PROTO; + break; + } + conn->tmfrsp_pdus_cnt++; + spin_lock(&session->lock); + if (conn->tmabort_state == TMABORT_INITIAL) { + __kfifo_put(session->mgmtpool.queue, + (void*)&mtask, sizeof(void*)); + conn->tmabort_state = + ((struct iscsi_tm_rsp *)hdr)-> + response == ISCSI_TMF_RSP_COMPLETE ? + TMABORT_SUCCESS:TMABORT_FAILED; + /* unblock eh_abort() */ + wake_up(&conn->ehwait); + } + spin_unlock(&session->lock); + break; + case ISCSI_OP_NOOP_IN: + if (hdr->ttt != ISCSI_RESERVED_TAG) { + rc = ISCSI_ERR_PROTO; + break; + } + rc = iscsi_check_assign_cmdsn(session, + (struct iscsi_nopin*)hdr); + if (rc) + break; + conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; + + if (!conn->in.datalen) { + struct iscsi_mgmt_task *mtask; + + rc = iscsi_recv_pdu(iscsi_handle(conn), hdr, + NULL, 0); + mtask = (struct iscsi_mgmt_task *) + session->mgmt_cmds[conn->in.itt - + ISCSI_MGMT_ITT_OFFSET]; + if (conn->login_mtask != mtask) { + spin_lock(&session->lock); + __kfifo_put(session->mgmtpool.queue, + (void*)&mtask, sizeof(void*)); + spin_unlock(&session->lock); + } + } + break; + default: + rc = ISCSI_ERR_BAD_OPCODE; + break; + } + } else if (conn->in.itt == ISCSI_RESERVED_TAG) { + switch(conn->in.opcode) { + case ISCSI_OP_NOOP_IN: + if (!conn->in.datalen) { + rc = iscsi_check_assign_cmdsn(session, + (struct iscsi_nopin*)hdr); + if (!rc && hdr->ttt != ISCSI_RESERVED_TAG) + rc = iscsi_recv_pdu(iscsi_handle(conn), + hdr, NULL, 0); + } else + rc = ISCSI_ERR_PROTO; + break; + case ISCSI_OP_REJECT: + /* we need sth like iscsi_reject_rsp()*/ + case ISCSI_OP_ASYNC_EVENT: + /* we need sth like iscsi_async_event_rsp() */ + rc = ISCSI_ERR_BAD_OPCODE; + break; + default: + rc = ISCSI_ERR_BAD_OPCODE; + break; + } + } else + rc = ISCSI_ERR_BAD_ITT; + + return rc; +} + +/** + * iscsi_ctask_copy - copy skb bits to the destanation cmd task + * @conn: iscsi connection + * @ctask: scsi command task + * @buf: buffer to copy to + * @buf_size: size of buffer + * @offset: offset within the buffer + * + * Notes: + * The function calls skb_copy_bits() and updates per-connection and + * per-cmd byte counters. + * + * Read counters (in bytes): + * + * conn->in.offset offset within in progress SKB + * conn->in.copy left to copy from in progress SKB + * including padding + * conn->in.copied copied already from in progress SKB + * conn->data_copied copied already from in progress buffer + * ctask->sent total bytes sent up to the MidLayer + * ctask->data_count left to copy from in progress Data-In + * buf_left left to copy from in progress buffer + **/ +static inline int +iscsi_ctask_copy(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, + void *buf, int buf_size, int offset) +{ + int buf_left = buf_size - (conn->data_copied + offset); + int size = min(conn->in.copy, buf_left); + int rc; + + size = min(size, ctask->data_count); + + debug_tcp("ctask_copy %d bytes at offset %d copied %d\n", + size, conn->in.offset, conn->in.copied); + + BUG_ON(size <= 0); + BUG_ON(ctask->sent + size > ctask->total_length); + + rc = skb_copy_bits(conn->in.skb, conn->in.offset, + (char*)buf + (offset + conn->data_copied), size); + /* must fit into skb->len */ + BUG_ON(rc); + + conn->in.offset += size; + conn->in.copy -= size; + conn->in.copied += size; + conn->data_copied += size; + ctask->sent += size; + ctask->data_count -= size; + + BUG_ON(conn->in.copy < 0); + BUG_ON(ctask->data_count < 0); + + if (buf_size != (conn->data_copied + offset)) { + if (!ctask->data_count) { + BUG_ON(buf_size - conn->data_copied < 0); + /* done with this PDU */ + return buf_size - conn->data_copied; + } + return -EAGAIN; + } + + /* done with this buffer or with both - PDU and buffer */ + conn->data_copied = 0; + return 0; +} + +/** + * iscsi_tcp_copy - copy skb bits to the destanation buffer + * @conn: iscsi connection + * @buf: buffer to copy to + * @buf_size: number of bytes to copy + * + * Notes: + * The function calls skb_copy_bits() and updates per-connection + * byte counters. + **/ +static inline int +iscsi_tcp_copy(struct iscsi_conn *conn, void *buf, int buf_size) +{ + int buf_left = buf_size - conn->data_copied; + int size = min(conn->in.copy, buf_left); + int rc; + + debug_tcp("tcp_copy %d bytes at offset %d copied %d\n", + size, conn->in.offset, conn->data_copied); + BUG_ON(size <= 0); + + rc = skb_copy_bits(conn->in.skb, conn->in.offset, + (char*)buf + conn->data_copied, size); + BUG_ON(rc); + + conn->in.offset += size; + conn->in.copy -= size; + conn->in.copied += size; + conn->data_copied += size; + + if (buf_size != conn->data_copied) + return -EAGAIN; + + return 0; +} + +static inline void +partial_sg_digest_update(struct iscsi_conn *conn, struct scatterlist *sg, + int offset, int length) +{ + struct scatterlist temp; + + memcpy(&temp, sg, sizeof(struct scatterlist)); + temp.offset = offset; + temp.length = length; + crypto_digest_update(conn->data_rx_tfm, &temp, 1); +} + +static int iscsi_scsi_data_in(struct iscsi_conn *conn) +{ + struct iscsi_cmd_task *ctask = conn->in.ctask; + struct scsi_cmnd *sc = ctask->sc; + struct scatterlist tmp, *sg; + int i, offset, rc = 0; + + BUG_ON((void*)ctask != sc->SCp.ptr); + + /* + * copying Data-In into the Scsi_Cmnd + */ + if (!sc->use_sg) { + i = ctask->data_count; + rc = iscsi_ctask_copy(conn, ctask, sc->request_buffer, + sc->request_bufflen, ctask->data_offset); + if (rc == -EAGAIN) + return rc; + if (conn->datadgst_en) { + sg_init_one(&tmp, sc->request_buffer, i); + crypto_digest_update(conn->data_rx_tfm, &tmp, 1); + } + rc = 0; + goto done; + } + + offset = ctask->data_offset; + sg = sc->request_buffer; + + if (ctask->data_offset) + for (i = 0; i < ctask->sg_count; i++) + offset -= sg[i].length; + /* we've passed through partial sg*/ + if (offset < 0) + offset = 0; + + for (i = ctask->sg_count; i < sc->use_sg; i++) { + char *dest; + + dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0); + rc = iscsi_ctask_copy(conn, ctask, dest + sg[i].offset, + sg[i].length, offset); + kunmap_atomic(dest, KM_SOFTIRQ0); + if (rc == -EAGAIN) + /* continue with the next SKB/PDU */ + return rc; + if (!rc) { + if (conn->datadgst_en) { + if (!offset) + crypto_digest_update(conn->data_rx_tfm, + &sg[i], 1); + else + partial_sg_digest_update(conn, &sg[i], + sg[i].offset + offset, + sg[i].length - offset); + } + offset = 0; + ctask->sg_count++; + } + + if (!ctask->data_count) { + if (rc && conn->datadgst_en) + /* + * data-in is complete, but buffer not... + */ + partial_sg_digest_update(conn, &sg[i], + sg[i].offset, sg[i].length-rc); + rc = 0; + break; + } + + if (!conn->in.copy) + return -EAGAIN; + } + BUG_ON(ctask->data_count); + +done: + /* check for non-exceptional status */ + if (conn->in.flags & ISCSI_FLAG_DATA_STATUS) { + debug_scsi("done [sc %lx res %d itt 0x%x]\n", + (long)sc, sc->result, ctask->itt); + conn->scsirsp_pdus_cnt++; + iscsi_ctask_cleanup(conn, ctask); + sc->scsi_done(sc); + } + + return rc; +} + +static int +iscsi_data_recv(struct iscsi_conn *conn) +{ + struct iscsi_session *session = conn->session; + int rc = 0; + + switch(conn->in.opcode) { + case ISCSI_OP_SCSI_DATA_IN: + rc = iscsi_scsi_data_in(conn); + break; + case ISCSI_OP_SCSI_CMD_RSP: { + /* + * SCSI Sense Data: + * copying the entire Data Segment. + */ + if (iscsi_tcp_copy(conn, conn->data, conn->in.datalen)) { + rc = -EAGAIN; + goto exit; + } + + /* + * check for sense + */ + conn->in.hdr = &conn->hdr; + conn->senselen = (conn->data[0] << 8) | conn->data[1]; + rc = iscsi_cmd_rsp(conn, conn->in.ctask); + } + break; + case ISCSI_OP_TEXT_RSP: + case ISCSI_OP_LOGIN_RSP: + case ISCSI_OP_NOOP_IN: { + struct iscsi_mgmt_task *mtask = NULL; + + if (conn->in.itt != ISCSI_RESERVED_TAG) + mtask = (struct iscsi_mgmt_task *) + session->mgmt_cmds[conn->in.itt - + ISCSI_MGMT_ITT_OFFSET]; + + /* + * Collect data segment to the connection's data + * placeholder + */ + if (iscsi_tcp_copy(conn, conn->data, conn->in.datalen)) { + rc = -EAGAIN; + goto exit; + } + + rc = iscsi_recv_pdu(iscsi_handle(conn), conn->in.hdr, + conn->data, conn->in.datalen); + + if (mtask && conn->login_mtask != mtask) { + spin_lock(&session->lock); + __kfifo_put(session->mgmtpool.queue, (void*)&mtask, + sizeof(void*)); + spin_unlock(&session->lock); + } + } + break; + default: + BUG_ON(1); + } +exit: + return rc; +} + +/** + * iscsi_tcp_data_recv - TCP receive in sendfile fashion + * @rd_desc: read descriptor + * @skb: socket buffer + * @offset: offset in skb + * @len: skb->len - offset + **/ +static int +iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, + unsigned int offset, size_t len) +{ + int rc; + struct iscsi_conn *conn = rd_desc->arg.data; + int processed; + char pad[ISCSI_PAD_LEN]; + struct scatterlist sg; + + /* + * Save current SKB and its offset in the corresponding + * connection context. + */ + conn->in.copy = skb->len - offset; + conn->in.offset = offset; + conn->in.skb = skb; + conn->in.len = conn->in.copy; + BUG_ON(conn->in.copy <= 0); + debug_tcp("in %d bytes\n", conn->in.copy); + +more: + conn->in.copied = 0; + rc = 0; + + if (unlikely(conn->suspend_rx)) { + debug_tcp("conn %d Rx suspended!\n", conn->id); + return 0; + } + + if (conn->in_progress == IN_PROGRESS_WAIT_HEADER || + conn->in_progress == IN_PROGRESS_HEADER_GATHER) { + rc = iscsi_hdr_extract(conn); + if (rc) { + if (rc == -EAGAIN) + goto nomore; + else { + iscsi_conn_failure(conn, rc); + return 0; + } + } + + /* + * Verify and process incoming PDU header. + */ + rc = iscsi_hdr_recv(conn); + if (!rc && conn->in.datalen) { + if (conn->datadgst_en && + conn->in.opcode == ISCSI_OP_SCSI_DATA_IN) { + BUG_ON(!conn->data_rx_tfm); + crypto_digest_init(conn->data_rx_tfm); + } + conn->in_progress = IN_PROGRESS_DATA_RECV; + } else if (rc) { + iscsi_conn_failure(conn, rc); + return 0; + } + } + + if (conn->in_progress == IN_PROGRESS_DDIGEST_RECV) { + debug_tcp("extra data_recv offset %d copy %d\n", + conn->in.offset, conn->in.copy); + if (conn->in.opcode == ISCSI_OP_SCSI_DATA_IN) { + uint32_t recv_digest; + skb_copy_bits(conn->in.skb, conn->in.offset, + &recv_digest, 4); + conn->in.offset += 4; + conn->in.copy -= 4; + if (recv_digest != conn->in.datadgst) { + debug_tcp("iscsi_tcp: data digest error!" + "0x%x != 0x%x\n", recv_digest, + conn->in.datadgst); + iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); + return 0; + } else { + debug_tcp("iscsi_tcp: data digest match!" + "0x%x == 0x%x\n", recv_digest, + conn->in.datadgst); + conn->in_progress = IN_PROGRESS_WAIT_HEADER; + } + } + } + + if (conn->in_progress == IN_PROGRESS_DATA_RECV && conn->in.copy) { + + debug_tcp("data_recv offset %d copy %d\n", + conn->in.offset, conn->in.copy); + + rc = iscsi_data_recv(conn); + if (rc) { + if (rc == -EAGAIN) { + rd_desc->count = conn->in.datalen - + conn->in.ctask->data_count; + goto again; + } + iscsi_conn_failure(conn, rc); + return 0; + } + conn->in.copy -= conn->in.padding; + conn->in.offset += conn->in.padding; + if (conn->datadgst_en && + conn->in.opcode == ISCSI_OP_SCSI_DATA_IN) { + if (conn->in.padding) { + debug_tcp("padding -> %d\n", conn->in.padding); + memset(pad, 0, conn->in.padding); + sg_init_one(&sg, pad, conn->in.padding); + crypto_digest_update(conn->data_rx_tfm, &sg, 1); + } + crypto_digest_final(conn->data_rx_tfm, + (u8 *) & conn->in.datadgst); + debug_tcp("rx digest 0x%x\n", conn->in.datadgst); + conn->in_progress = IN_PROGRESS_DDIGEST_RECV; + } else + conn->in_progress = IN_PROGRESS_WAIT_HEADER; + } + + debug_tcp("f, processed %d from out of %d padding %d\n", + conn->in.offset - offset, (int)len, conn->in.padding); + BUG_ON(conn->in.offset - offset > len); + + if (conn->in.offset - offset != len) { + debug_tcp("continue to process %d bytes\n", + (int)len - (conn->in.offset - offset)); + goto more; + } + +nomore: + processed = conn->in.offset - offset; + BUG_ON(processed == 0); + return processed; + +again: + processed = conn->in.offset - offset; + debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n", + processed, (int)len, (int)rd_desc->count); + BUG_ON(processed == 0); + BUG_ON(processed > len); + + conn->rxdata_octets += processed; + return processed; +} + +static void +iscsi_tcp_data_ready(struct sock *sk, int flag) +{ + struct iscsi_conn *conn = sk->sk_user_data; + read_descriptor_t rd_desc; + + read_lock(&sk->sk_callback_lock); + + /* use rd_desc to pass 'conn' to iscsi_tcp_data_recv */ + rd_desc.arg.data = conn; + rd_desc.count = 0; + tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv); + + read_unlock(&sk->sk_callback_lock); +} + +static void +iscsi_tcp_state_change(struct sock *sk) +{ + struct iscsi_conn *conn; + struct iscsi_session *session; + void (*old_state_change)(struct sock *); + + read_lock(&sk->sk_callback_lock); + + conn = (struct iscsi_conn*)sk->sk_user_data; + session = conn->session; + + if (sk->sk_state == TCP_CLOSE_WAIT || + sk->sk_state == TCP_CLOSE) { + debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n"); + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + } + + old_state_change = conn->old_state_change; + + read_unlock(&sk->sk_callback_lock); + + old_state_change(sk); +} + +/** + * iscsi_write_space - Called when more output buffer space is available + * @sk: socket space is available for + **/ +static void +iscsi_write_space(struct sock *sk) +{ + struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data; + conn->old_write_space(sk); + debug_tcp("iscsi_write_space: cid %d\n", conn->id); + clear_bit(SUSPEND_BIT, &conn->suspend_tx); + schedule_work(&conn->xmitwork); +} + +static void +iscsi_conn_set_callbacks(struct iscsi_conn *conn) +{ + struct sock *sk = conn->sock->sk; + + /* assign new callbacks */ + write_lock_bh(&sk->sk_callback_lock); + sk->sk_user_data = conn; + conn->old_data_ready = sk->sk_data_ready; + conn->old_state_change = sk->sk_state_change; + conn->old_write_space = sk->sk_write_space; + sk->sk_data_ready = iscsi_tcp_data_ready; + sk->sk_state_change = iscsi_tcp_state_change; + sk->sk_write_space = iscsi_write_space; + write_unlock_bh(&sk->sk_callback_lock); +} + +static void +iscsi_conn_restore_callbacks(struct iscsi_conn *conn) +{ + struct sock *sk = conn->sock->sk; + + /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ + write_lock_bh(&sk->sk_callback_lock); + sk->sk_user_data = NULL; + sk->sk_data_ready = conn->old_data_ready; + sk->sk_state_change = conn->old_state_change; + sk->sk_write_space = conn->old_write_space; + sk->sk_no_check = 0; + write_unlock_bh(&sk->sk_callback_lock); +} + +/** + * iscsi_send - generic send routine + * @sk: kernel's socket + * @buf: buffer to write from + * @size: actual size to write + * @flags: socket's flags + * + * Notes: + * depending on buffer will use tcp_sendpage() or tcp_sendmsg(). + * buf->sg.offset == -1 tells us that buffer is non S/G and forces + * to use tcp_sendmsg(). + */ +static inline int +iscsi_send(struct socket *sk, struct iscsi_buf *buf, int size, int flags) +{ + int res; + + if ((int)buf->sg.offset >= 0) { + int offset = buf->sg.offset + buf->sent; + + /* tcp_sendpage */ + res = sk->ops->sendpage(sk, buf->sg.page, offset, size, flags); + } else { + struct msghdr msg; + + buf->iov.iov_base = iscsi_buf_iov_base(buf); + buf->iov.iov_len = size; + + memset(&msg, 0, sizeof(struct msghdr)); + + /* tcp_sendmsg */ + res = kernel_sendmsg(sk, &msg, &buf->iov, 1, size); + } + + return res; +} + +/** + * iscsi_sendhdr - send PDU Header via tcp_sendpage() + * @conn: iscsi connection + * @buf: buffer to write from + * @datalen: lenght of data to be sent after the header + * + * Notes: + * (Tx, Fast Path) + **/ +static inline int +iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen) +{ + struct socket *sk = conn->sock; + int flags = 0; /* MSG_DONTWAIT; */ + int res, size; + + size = buf->sg.length - buf->sent; + BUG_ON(buf->sent + size > buf->sg.length); + if (buf->sent + size != buf->sg.length || datalen) + flags |= MSG_MORE; + + res = iscsi_send(sk, buf, size, flags); + debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res); + if (res >= 0) { + conn->txdata_octets += res; + buf->sent += res; + if (size != res) + return -EAGAIN; + return 0; + } else if (res == -EAGAIN) { + conn->sendpage_failures_cnt++; + set_bit(SUSPEND_BIT, &conn->suspend_tx); + } else if (res == -EPIPE) + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + + return res; +} + +/** + * iscsi_sendpage - send one page of iSCSI Data-Out. + * @conn: iscsi connection + * @buf: buffer to write from + * @count: remaining data + * @sent: number of bytes sent + * + * Notes: + * (Tx, Fast Path) + **/ +static inline int +iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf, + int *count, int *sent) +{ + struct socket *sk = conn->sock; + int flags = 0; /* MSG_DONTWAIT; */ + int res, size; + + size = buf->sg.length - buf->sent; + BUG_ON(buf->sent + size > buf->sg.length); + if (size > *count) + size = *count; + if (buf->sent + size != buf->sg.length || *count != size) + flags |= MSG_MORE; + + res = iscsi_send(sk, buf, size, flags); + debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n", + size, buf->sent, *count, *sent, res); + if (res >= 0) { + conn->txdata_octets += res; + buf->sent += res; + *count -= res; + *sent += res; + if (size != res) + return -EAGAIN; + return 0; + } else if (res == -EAGAIN) { + conn->sendpage_failures_cnt++; + set_bit(SUSPEND_BIT, &conn->suspend_tx); + } else if (res == -EPIPE) + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + + return res; +} + +static inline void +iscsi_data_digest_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + BUG_ON(!conn->data_tx_tfm); + crypto_digest_init(conn->data_tx_tfm); + ctask->digest_count = 4; +} + +static inline void +iscsi_buf_data_digest_update(struct iscsi_conn *conn, struct iscsi_buf *buf) +{ + struct scatterlist sg; + + if (buf->sg.offset != -1) + crypto_digest_update(conn->data_tx_tfm, &buf->sg, 1); + else { + sg_init_one(&sg, (char *)buf->sg.page, buf->sg.length); + crypto_digest_update(conn->data_tx_tfm, &sg, 1); + } +} + +static inline int +iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, + struct iscsi_buf *buf, uint32_t *digest, int final) +{ + int rc = 0; + int sent = 0; + + if (final) + crypto_digest_final(conn->data_tx_tfm, (u8*)digest); + + iscsi_buf_init_virt(buf, (char*)digest, 4); + rc = iscsi_sendpage(conn, buf, &ctask->digest_count, &sent); + if (rc) { + ctask->datadigest = *digest; + ctask->xmstate |= XMSTATE_DATA_DIGEST; + } else + ctask->digest_count = 4; + return rc; +} + +/** + * iscsi_solicit_data_cont - initialize next Data-Out + * @conn: iscsi connection + * @ctask: scsi command task + * @r2t: R2T info + * @left: bytes left to transfer + * + * Notes: + * Initialize next Data-Out within this R2T sequence and continue + * to process next Scatter-Gather element(if any) of this SCSI command. + * + * Called under connection lock. + **/ +static void +iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, + struct iscsi_r2t_info *r2t, int left) +{ + struct iscsi_data *hdr; + struct iscsi_data_task *dtask; + struct scsi_cmnd *sc = ctask->sc; + int new_offset; + + dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC); + BUG_ON(!dtask); + hdr = &dtask->hdr; + memset(hdr, 0, sizeof(struct iscsi_data)); + hdr->ttt = r2t->ttt; + hdr->datasn = cpu_to_be32(r2t->solicit_datasn); + r2t->solicit_datasn++; + hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; + memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun)); + hdr->itt = ctask->hdr.itt; + hdr->exp_statsn = r2t->exp_statsn; + new_offset = r2t->data_offset + r2t->sent; + hdr->offset = cpu_to_be32(new_offset); + if (left > conn->max_xmit_dlength) { + hton24(hdr->dlength, conn->max_xmit_dlength); + r2t->data_count = conn->max_xmit_dlength; + } else { + hton24(hdr->dlength, left); + r2t->data_count = left; + hdr->flags = ISCSI_FLAG_CMD_FINAL; + } + conn->dataout_pdus_cnt++; + + iscsi_buf_init_virt(&r2t->headbuf, (char*)hdr, + sizeof(struct iscsi_hdr)); + + r2t->dtask = dtask; + + if (sc->use_sg && !iscsi_buf_left(&r2t->sendbuf)) { + BUG_ON(ctask->bad_sg == r2t->sg); + iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg); + r2t->sg += 1; + } else + iscsi_buf_init_iov(&ctask->sendbuf, + (char*)sc->request_buffer + new_offset, + r2t->data_count); + + list_add(&dtask->item, &ctask->dataqueue); +} + +static void +iscsi_unsolicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + struct iscsi_data *hdr; + struct iscsi_data_task *dtask; + + dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC); + BUG_ON(!dtask); + hdr = &dtask->hdr; + memset(hdr, 0, sizeof(struct iscsi_data)); + hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); + hdr->datasn = cpu_to_be32(ctask->unsol_datasn); + ctask->unsol_datasn++; + hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; + memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun)); + hdr->itt = ctask->hdr.itt; + hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); + hdr->offset = cpu_to_be32(ctask->total_length - + ctask->r2t_data_count - + ctask->unsol_count); + if (ctask->unsol_count > conn->max_xmit_dlength) { + hton24(hdr->dlength, conn->max_xmit_dlength); + ctask->data_count = conn->max_xmit_dlength; + hdr->flags = 0; + } else { + hton24(hdr->dlength, ctask->unsol_count); + ctask->data_count = ctask->unsol_count; + hdr->flags = ISCSI_FLAG_CMD_FINAL; + } + + iscsi_buf_init_virt(&ctask->headbuf, (char*)hdr, + sizeof(struct iscsi_hdr)); + + list_add(&dtask->item, &ctask->dataqueue); + + ctask->dtask = dtask; +} + +/** + * iscsi_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands + * @conn: iscsi connection + * @ctask: scsi command task + * @sc: scsi command + **/ +static void +iscsi_cmd_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, + struct scsi_cmnd *sc) +{ + struct iscsi_session *session = conn->session; + + BUG_ON(__kfifo_len(ctask->r2tqueue)); + + ctask->sc = sc; + ctask->conn = conn; + ctask->hdr.opcode = ISCSI_OP_SCSI_CMD; + ctask->hdr.flags = ISCSI_ATTR_SIMPLE; + int_to_scsilun(sc->device->lun, (struct scsi_lun *)ctask->hdr.lun); + ctask->hdr.itt = ctask->itt | (conn->id << CID_SHIFT) | + (session->age << AGE_SHIFT); + ctask->hdr.data_length = cpu_to_be32(sc->request_bufflen); + ctask->hdr.cmdsn = cpu_to_be32(session->cmdsn); session->cmdsn++; + ctask->hdr.exp_statsn = cpu_to_be32(conn->exp_statsn); + memcpy(ctask->hdr.cdb, sc->cmnd, sc->cmd_len); + memset(&ctask->hdr.cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len); + + ctask->mtask = NULL; + ctask->sent = 0; + ctask->sg_count = 0; + + ctask->total_length = sc->request_bufflen; + + if (sc->sc_data_direction == DMA_TO_DEVICE) { + ctask->exp_r2tsn = 0; + ctask->hdr.flags |= ISCSI_FLAG_CMD_WRITE; + BUG_ON(ctask->total_length == 0); + if (sc->use_sg) { + struct scatterlist *sg = sc->request_buffer; + + iscsi_buf_init_sg(&ctask->sendbuf, + &sg[ctask->sg_count++]); + ctask->sg = sg; + ctask->bad_sg = sg + sc->use_sg; + } else { + iscsi_buf_init_iov(&ctask->sendbuf, sc->request_buffer, + sc->request_bufflen); + } + + /* + * Write counters: + * + * imm_count bytes to be sent right after + * SCSI PDU Header + * + * unsol_count bytes(as Data-Out) to be sent + * without R2T ack right after + * immediate data + * + * r2t_data_count bytes to be sent via R2T ack's + * + * pad_count bytes to be sent as zero-padding + */ + ctask->imm_count = 0; + ctask->unsol_count = 0; + ctask->unsol_datasn = 0; + ctask->xmstate = XMSTATE_W_HDR; + /* calculate write padding */ + ctask->pad_count = ctask->total_length & (ISCSI_PAD_LEN-1); + if (ctask->pad_count) { + ctask->pad_count = ISCSI_PAD_LEN - ctask->pad_count; + debug_scsi("write padding %d bytes\n", + ctask->pad_count); + ctask->xmstate |= XMSTATE_W_PAD; + } + if (session->imm_data_en) { + if (ctask->total_length >= session->first_burst) + ctask->imm_count = min(session->first_burst, + conn->max_xmit_dlength); + else + ctask->imm_count = min(ctask->total_length, + conn->max_xmit_dlength); + hton24(ctask->hdr.dlength, ctask->imm_count); + ctask->xmstate |= XMSTATE_IMM_DATA; + } else + zero_data(ctask->hdr.dlength); + + if (!session->initial_r2t_en) + ctask->unsol_count = min(session->first_burst, + ctask->total_length) - ctask->imm_count; + if (!ctask->unsol_count) + /* No unsolicit Data-Out's */ + ctask->hdr.flags |= ISCSI_FLAG_CMD_FINAL; + else + ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT; + + ctask->r2t_data_count = ctask->total_length - + ctask->imm_count - + ctask->unsol_count; + + debug_scsi("cmd [itt %x total %d imm %d imm_data %d " + "r2t_data %d]\n", + ctask->itt, ctask->total_length, ctask->imm_count, + ctask->unsol_count, ctask->r2t_data_count); + } else { + ctask->hdr.flags |= ISCSI_FLAG_CMD_FINAL; + if (sc->sc_data_direction == DMA_FROM_DEVICE) + ctask->hdr.flags |= ISCSI_FLAG_CMD_READ; + ctask->datasn = 0; + ctask->xmstate = XMSTATE_R_HDR; + zero_data(ctask->hdr.dlength); + } + + iscsi_buf_init_virt(&ctask->headbuf, (char*)&ctask->hdr, + sizeof(struct iscsi_hdr)); + conn->scsicmd_pdus_cnt++; +} + +/** + * iscsi_mtask_xmit - xmit management(immediate) task + * @conn: iscsi connection + * @mtask: task management task + * + * Notes: + * The function can return -EAGAIN in which case caller must + * call it again later, or recover. '0' return code means successful + * xmit. + * + * Management xmit state machine consists of two states: + * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress + * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress + **/ +static int +iscsi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) +{ + + debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n", + conn->id, mtask->xmstate, mtask->itt); + + if (mtask->xmstate & XMSTATE_IMM_HDR) { + mtask->xmstate &= ~XMSTATE_IMM_HDR; + if (mtask->data_count) + mtask->xmstate |= XMSTATE_IMM_DATA; + if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE && + conn->stop_stage != STOP_CONN_RECOVER && + conn->hdrdgst_en) + iscsi_hdr_digest(conn, &mtask->headbuf, + (u8*)mtask->hdrext); + if (iscsi_sendhdr(conn, &mtask->headbuf, mtask->data_count)) { + mtask->xmstate |= XMSTATE_IMM_HDR; + if (mtask->data_count) + mtask->xmstate &= ~XMSTATE_IMM_DATA; + return -EAGAIN; + } + } + + if (mtask->xmstate & XMSTATE_IMM_DATA) { + BUG_ON(!mtask->data_count); + mtask->xmstate &= ~XMSTATE_IMM_DATA; + /* FIXME: implement. + * Virtual buffer could be spreaded across multiple pages... + */ + do { + if (iscsi_sendpage(conn, &mtask->sendbuf, + &mtask->data_count, &mtask->sent)) { + mtask->xmstate |= XMSTATE_IMM_DATA; + return -EAGAIN; + } + } while (mtask->data_count); + } + + BUG_ON(mtask->xmstate != XMSTATE_IDLE); + return 0; +} + +static inline int +handle_xmstate_r_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + ctask->xmstate &= ~XMSTATE_R_HDR; + if (conn->hdrdgst_en) + iscsi_hdr_digest(conn, &ctask->headbuf, (u8*)ctask->hdrext); + if (!iscsi_sendhdr(conn, &ctask->headbuf, 0)) { + BUG_ON(ctask->xmstate != XMSTATE_IDLE); + return 0; /* wait for Data-In */ + } + ctask->xmstate |= XMSTATE_R_HDR; + return -EAGAIN; +} + +static inline int +handle_xmstate_w_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + ctask->xmstate &= ~XMSTATE_W_HDR; + if (conn->hdrdgst_en) + iscsi_hdr_digest(conn, &ctask->headbuf, (u8*)ctask->hdrext); + if (iscsi_sendhdr(conn, &ctask->headbuf, ctask->imm_count)) { + ctask->xmstate |= XMSTATE_W_HDR; + return -EAGAIN; + } + return 0; +} + +static inline int +handle_xmstate_data_digest(struct iscsi_conn *conn, + struct iscsi_cmd_task *ctask) +{ + ctask->xmstate &= ~XMSTATE_DATA_DIGEST; + debug_tcp("resent data digest 0x%x\n", ctask->datadigest); + if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf, + &ctask->datadigest, 0)) { + ctask->xmstate |= XMSTATE_DATA_DIGEST; + debug_tcp("resent data digest 0x%x fail!\n", + ctask->datadigest); + return -EAGAIN; + } + return 0; +} + +static inline int +handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + BUG_ON(!ctask->imm_count); + ctask->xmstate &= ~XMSTATE_IMM_DATA; + + if (conn->datadgst_en) { + iscsi_data_digest_init(conn, ctask); + ctask->immdigest = 0; + } + + for (;;) { + if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->imm_count, + &ctask->sent)) { + ctask->xmstate |= XMSTATE_IMM_DATA; + if (conn->datadgst_en) { + crypto_digest_final(conn->data_tx_tfm, + (u8*)&ctask->immdigest); + debug_tcp("tx imm sendpage fail 0x%x\n", + ctask->datadigest); + } + return -EAGAIN; + } + if (conn->datadgst_en) + iscsi_buf_data_digest_update(conn, &ctask->sendbuf); + + if (!ctask->imm_count) + break; + iscsi_buf_init_sg(&ctask->sendbuf, + &ctask->sg[ctask->sg_count++]); + } + + if (conn->datadgst_en && !(ctask->xmstate & XMSTATE_W_PAD)) { + if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf, + &ctask->immdigest, 1)) { + debug_tcp("sending imm digest 0x%x fail!\n", + ctask->immdigest); + return -EAGAIN; + } + debug_tcp("sending imm digest 0x%x\n", ctask->immdigest); + } + + return 0; +} + +static inline int +handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + struct iscsi_data_task *dtask; + + ctask->xmstate |= XMSTATE_UNS_DATA; + if (ctask->xmstate & XMSTATE_UNS_INIT) { + iscsi_unsolicit_data_init(conn, ctask); + BUG_ON(!ctask->dtask); + dtask = ctask->dtask; + if (conn->hdrdgst_en) + iscsi_hdr_digest(conn, &ctask->headbuf, + (u8*)dtask->hdrext); + ctask->xmstate &= ~XMSTATE_UNS_INIT; + } + if (iscsi_sendhdr(conn, &ctask->headbuf, ctask->data_count)) { + ctask->xmstate &= ~XMSTATE_UNS_DATA; + ctask->xmstate |= XMSTATE_UNS_HDR; + return -EAGAIN; + } + + debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n", + ctask->itt, ctask->unsol_count, ctask->sent); + return 0; +} + +static inline int +handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + struct iscsi_data_task *dtask = ctask->dtask; + + BUG_ON(!ctask->data_count); + ctask->xmstate &= ~XMSTATE_UNS_DATA; + + if (conn->datadgst_en) { + iscsi_data_digest_init(conn, ctask); + dtask->digest = 0; + } + + for (;;) { + int start = ctask->sent; + + if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->data_count, + &ctask->sent)) { + ctask->unsol_count -= ctask->sent - start; + ctask->xmstate |= XMSTATE_UNS_DATA; + /* will continue with this ctask later.. */ + if (conn->datadgst_en) { + crypto_digest_final(conn->data_tx_tfm, + (u8 *)&dtask->digest); + debug_tcp("tx uns data fail 0x%x\n", + dtask->digest); + } + return -EAGAIN; + } + + BUG_ON(ctask->sent > ctask->total_length); + ctask->unsol_count -= ctask->sent - start; + + /* + * XXX:we may run here with un-initial sendbuf. + * so pass it + */ + if (conn->datadgst_en && ctask->sent - start > 0) + iscsi_buf_data_digest_update(conn, &ctask->sendbuf); + + if (!ctask->data_count) + break; + iscsi_buf_init_sg(&ctask->sendbuf, + &ctask->sg[ctask->sg_count++]); + } + BUG_ON(ctask->unsol_count < 0); + + /* + * Done with the Data-Out. Next, check if we need + * to send another unsolicited Data-Out. + */ + if (ctask->unsol_count) { + if (conn->datadgst_en) { + if (iscsi_digest_final_send(conn, ctask, + &dtask->digestbuf, + &dtask->digest, 1)) { + debug_tcp("send uns digest 0x%x fail\n", + dtask->digest); + return -EAGAIN; + } + debug_tcp("sending uns digest 0x%x, more uns\n", + dtask->digest); + } + ctask->xmstate |= XMSTATE_UNS_INIT; + return 1; + } + + if (conn->datadgst_en && !(ctask->xmstate & XMSTATE_W_PAD)) { + if (iscsi_digest_final_send(conn, ctask, + &dtask->digestbuf, + &dtask->digest, 1)) { + debug_tcp("send last uns digest 0x%x fail\n", + dtask->digest); + return -EAGAIN; + } + debug_tcp("sending uns digest 0x%x\n",dtask->digest); + } + + return 0; +} + +static inline int +handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + struct iscsi_session *session = conn->session; + struct iscsi_r2t_info *r2t = ctask->r2t; + struct iscsi_data_task *dtask = r2t->dtask; + int left; + + ctask->xmstate &= ~XMSTATE_SOL_DATA; + ctask->dtask = dtask; + + if (conn->datadgst_en) { + iscsi_data_digest_init(conn, ctask); + dtask->digest = 0; + } +solicit_again: + /* + * send Data-Out whitnin this R2T sequence. + */ + if (!r2t->data_count) + goto data_out_done; + + if (iscsi_sendpage(conn, &r2t->sendbuf, &r2t->data_count, &r2t->sent)) { + ctask->xmstate |= XMSTATE_SOL_DATA; + /* will continue with this ctask later.. */ + if (conn->datadgst_en) { + crypto_digest_final(conn->data_tx_tfm, + (u8 *)&dtask->digest); + debug_tcp("r2t data send fail 0x%x\n", dtask->digest); + } + return -EAGAIN; + } + + BUG_ON(r2t->data_count < 0); + if (conn->datadgst_en) + iscsi_buf_data_digest_update(conn, &r2t->sendbuf); + + if (r2t->data_count) { + BUG_ON(ctask->sc->use_sg == 0); + if (!iscsi_buf_left(&r2t->sendbuf)) { + BUG_ON(ctask->bad_sg == r2t->sg); + iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg); + r2t->sg += 1; + } + goto solicit_again; + } + +data_out_done: + /* + * Done with this Data-Out. Next, check if we have + * to send another Data-Out for this R2T. + */ + BUG_ON(r2t->data_length - r2t->sent < 0); + left = r2t->data_length - r2t->sent; + if (left) { + if (conn->datadgst_en) { + if (iscsi_digest_final_send(conn, ctask, + &dtask->digestbuf, + &dtask->digest, 1)) { + debug_tcp("send r2t data digest 0x%x" + "fail\n", dtask->digest); + return -EAGAIN; + } + debug_tcp("r2t data send digest 0x%x\n", + dtask->digest); + } + iscsi_solicit_data_cont(conn, ctask, r2t, left); + ctask->xmstate |= XMSTATE_SOL_DATA; + ctask->xmstate &= ~XMSTATE_SOL_HDR; + return 1; + } + + /* + * Done with this R2T. Check if there are more + * outstanding R2Ts ready to be processed. + */ + BUG_ON(ctask->r2t_data_count - r2t->data_length < 0); + if (conn->datadgst_en) { + if (iscsi_digest_final_send(conn, ctask, &dtask->digestbuf, + &dtask->digest, 1)) { + debug_tcp("send last r2t data digest 0x%x" + "fail\n", dtask->digest); + return -EAGAIN; + } + debug_tcp("r2t done dout digest 0x%x\n", dtask->digest); + } + + ctask->r2t_data_count -= r2t->data_length; + ctask->r2t = NULL; + spin_lock_bh(&session->lock); + __kfifo_put(ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); + spin_unlock_bh(&session->lock); + if (__kfifo_get(ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { + ctask->r2t = r2t; + ctask->xmstate |= XMSTATE_SOL_DATA; + ctask->xmstate &= ~XMSTATE_SOL_HDR; + return 1; + } + + return 0; +} + +static inline int +handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + struct iscsi_data_task *dtask = ctask->dtask; + int sent; + + ctask->xmstate &= ~XMSTATE_W_PAD; + iscsi_buf_init_virt(&ctask->sendbuf, (char*)&ctask->pad, + ctask->pad_count); + if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->pad_count, &sent)) { + ctask->xmstate |= XMSTATE_W_PAD; + return -EAGAIN; + } + + if (conn->datadgst_en) { + iscsi_buf_data_digest_update(conn, &ctask->sendbuf); + /* imm data? */ + if (!dtask) { + if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf, + &ctask->immdigest, 1)) { + debug_tcp("send padding digest 0x%x" + "fail!\n", ctask->immdigest); + return -EAGAIN; + } + debug_tcp("done with padding, digest 0x%x\n", + ctask->datadigest); + } else { + if (iscsi_digest_final_send(conn, ctask, + &dtask->digestbuf, + &dtask->digest, 1)) { + debug_tcp("send padding digest 0x%x" + "fail\n", dtask->digest); + return -EAGAIN; + } + debug_tcp("done with padding, digest 0x%x\n", + dtask->digest); + } + } + + return 0; +} + +static int +iscsi_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + int rc = 0; + + debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n", + conn->id, ctask->xmstate, ctask->itt); + + /* + * serialize with TMF AbortTask + */ + if (ctask->mtask) + return rc; + + if (ctask->xmstate & XMSTATE_R_HDR) { + rc = handle_xmstate_r_hdr(conn, ctask); + return rc; + } + + if (ctask->xmstate & XMSTATE_W_HDR) { + rc = handle_xmstate_w_hdr(conn, ctask); + if (rc) + return rc; + } + + /* XXX: for data digest xmit recover */ + if (ctask->xmstate & XMSTATE_DATA_DIGEST) { + rc = handle_xmstate_data_digest(conn, ctask); + if (rc) + return rc; + } + + if (ctask->xmstate & XMSTATE_IMM_DATA) { + rc = handle_xmstate_imm_data(conn, ctask); + if (rc) + return rc; + } + + if (ctask->xmstate & XMSTATE_UNS_HDR) { + BUG_ON(!ctask->unsol_count); + ctask->xmstate &= ~XMSTATE_UNS_HDR; +unsolicit_head_again: + rc = handle_xmstate_uns_hdr(conn, ctask); + if (rc) + return rc; + } + + if (ctask->xmstate & XMSTATE_UNS_DATA) { + rc = handle_xmstate_uns_data(conn, ctask); + if (rc == 1) + goto unsolicit_head_again; + else if (rc) + return rc; + goto done; + } + + if (ctask->xmstate & XMSTATE_SOL_HDR) { + struct iscsi_r2t_info *r2t; + + ctask->xmstate &= ~XMSTATE_SOL_HDR; + ctask->xmstate |= XMSTATE_SOL_DATA; + if (!ctask->r2t) + __kfifo_get(ctask->r2tqueue, (void*)&ctask->r2t, + sizeof(void*)); +solicit_head_again: + r2t = ctask->r2t; + if (conn->hdrdgst_en) + iscsi_hdr_digest(conn, &r2t->headbuf, + (u8*)r2t->dtask->hdrext); + if (iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count)) { + ctask->xmstate &= ~XMSTATE_SOL_DATA; + ctask->xmstate |= XMSTATE_SOL_HDR; + return -EAGAIN; + } + + debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n", + r2t->solicit_datasn - 1, ctask->itt, r2t->data_count, + r2t->sent); + } + + if (ctask->xmstate & XMSTATE_SOL_DATA) { + rc = handle_xmstate_sol_data(conn, ctask); + if (rc == 1) + goto solicit_head_again; + if (rc) + return rc; + } + +done: + /* + * Last thing to check is whether we need to send write + * padding. Note that we check for xmstate equality, not just the bit. + */ + if (ctask->xmstate == XMSTATE_W_PAD) + rc = handle_xmstate_w_pad(conn, ctask); + + return rc; +} + +/** + * iscsi_data_xmit - xmit any command into the scheduled connection + * @conn: iscsi connection + * + * Notes: + * The function can return -EAGAIN in which case the caller must + * re-schedule it again later or recover. '0' return code means + * successful xmit. + **/ +static int +iscsi_data_xmit(struct iscsi_conn *conn) +{ + if (unlikely(conn->suspend_tx)) { + debug_tcp("conn %d Tx suspended!\n", conn->id); + return 0; + } + + /* + * Transmit in the following order: + * + * 1) un-finished xmit (ctask or mtask) + * 2) immediate control PDUs + * 3) write data + * 4) SCSI commands + * 5) non-immediate control PDUs + * + * No need to lock around __kfifo_get as long as + * there's one producer and one consumer. + */ + + BUG_ON(conn->ctask && conn->mtask); + + if (conn->ctask) { + if (iscsi_ctask_xmit(conn, conn->ctask)) + goto again; + /* done with this in-progress ctask */ + conn->ctask = NULL; + } + if (conn->mtask) { + if (iscsi_mtask_xmit(conn, conn->mtask)) + goto again; + /* done with this in-progress mtask */ + conn->mtask = NULL; + } + + /* process immediate first */ + if (unlikely(__kfifo_len(conn->immqueue))) { + struct iscsi_session *session = conn->session; + while (__kfifo_get(conn->immqueue, (void*)&conn->mtask, + sizeof(void*))) { + if (iscsi_mtask_xmit(conn, conn->mtask)) + goto again; + + if (conn->mtask->hdr.itt == + cpu_to_be32(ISCSI_RESERVED_TAG)) { + spin_lock_bh(&session->lock); + __kfifo_put(session->mgmtpool.queue, + (void*)&conn->mtask, sizeof(void*)); + spin_unlock_bh(&session->lock); + } + } + /* done with this mtask */ + conn->mtask = NULL; + } + + /* process write queue */ + while (__kfifo_get(conn->writequeue, (void*)&conn->ctask, + sizeof(void*))) { + if (iscsi_ctask_xmit(conn, conn->ctask)) + goto again; + } + + /* process command queue */ + while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask, + sizeof(void*))) { + if (iscsi_ctask_xmit(conn, conn->ctask)) + goto again; + } + /* done with this ctask */ + conn->ctask = NULL; + + /* process the rest control plane PDUs, if any */ + if (unlikely(__kfifo_len(conn->mgmtqueue))) { + struct iscsi_session *session = conn->session; + + while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask, + sizeof(void*))) { + if (iscsi_mtask_xmit(conn, conn->mtask)) + goto again; + + if (conn->mtask->hdr.itt == + cpu_to_be32(ISCSI_RESERVED_TAG)) { + spin_lock_bh(&session->lock); + __kfifo_put(session->mgmtpool.queue, + (void*)&conn->mtask, + sizeof(void*)); + spin_unlock_bh(&session->lock); + } + } + /* done with this mtask */ + conn->mtask = NULL; + } + + return 0; + +again: + if (unlikely(conn->suspend_tx)) + return 0; + + return -EAGAIN; +} + +static void +iscsi_xmitworker(void *data) +{ + struct iscsi_conn *conn = data; + + /* + * serialize Xmit worker on a per-connection basis. + */ + down(&conn->xmitsema); + if (iscsi_data_xmit(conn)) + schedule_work(&conn->xmitwork); + up(&conn->xmitsema); +} + +#define FAILURE_BAD_HOST 1 +#define FAILURE_SESSION_FAILED 2 +#define FAILURE_SESSION_FREED 3 +#define FAILURE_WINDOW_CLOSED 4 +#define FAILURE_SESSION_TERMINATE 5 + +static int +iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) +{ + struct Scsi_Host *host; + int reason = 0; + struct iscsi_session *session; + struct iscsi_conn *conn = NULL; + struct iscsi_cmd_task *ctask = NULL; + + sc->scsi_done = done; + sc->result = 0; + + host = sc->device->host; + session = iscsi_hostdata(host->hostdata); + BUG_ON(host != session->host); + + spin_lock(&session->lock); + + if (session->state != ISCSI_STATE_LOGGED_IN) { + if (session->state == ISCSI_STATE_FAILED) { + reason = FAILURE_SESSION_FAILED; + goto reject; + } else if (session->state == ISCSI_STATE_TERMINATE) { + reason = FAILURE_SESSION_TERMINATE; + goto fault; + } + reason = FAILURE_SESSION_FREED; + goto fault; + } + + /* + * Check for iSCSI window and take care of CmdSN wrap-around + */ + if ((int)(session->max_cmdsn - session->cmdsn) < 0) { + reason = FAILURE_WINDOW_CLOSED; + goto reject; + } + + conn = session->leadconn; + + __kfifo_get(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); + BUG_ON(ctask->sc); + + sc->SCp.phase = session->age; + sc->SCp.ptr = (char*)ctask; + iscsi_cmd_init(conn, ctask, sc); + + __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); + debug_scsi( + "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", + sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", + conn->id, (long)sc, ctask->itt, sc->request_bufflen, + session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); + spin_unlock(&session->lock); + + if (!in_interrupt() && !down_trylock(&conn->xmitsema)) { + spin_unlock_irq(host->host_lock); + if (iscsi_data_xmit(conn)) + schedule_work(&conn->xmitwork); + up(&conn->xmitsema); + spin_lock_irq(host->host_lock); + } else + schedule_work(&conn->xmitwork); + + return 0; + +reject: + spin_unlock(&session->lock); + debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason); + return SCSI_MLQUEUE_HOST_BUSY; + +fault: + spin_unlock(&session->lock); + printk(KERN_ERR "iscsi_tcp: cmd 0x%x is not queued (%d)\n", + sc->cmnd[0], reason); + sc->sense_buffer[0] = 0x70; + sc->sense_buffer[2] = NOT_READY; + sc->sense_buffer[7] = 0x6; + sc->sense_buffer[12] = 0x08; + sc->sense_buffer[13] = 0x00; + sc->result = (DID_NO_CONNECT << 16); + sc->resid = sc->request_bufflen; + sc->scsi_done(sc); + return 0; +} + +static int +iscsi_pool_init(struct iscsi_queue *q, int max, void ***items, int item_size) +{ + int i; + + *items = kmalloc(max * sizeof(void*), GFP_KERNEL); + if (*items == NULL) + return -ENOMEM; + + q->max = max; + q->pool = kmalloc(max * sizeof(void*), GFP_KERNEL); + if (q->pool == NULL) { + kfree(*items); + return -ENOMEM; + } + + q->queue = kfifo_init((void*)q->pool, max * sizeof(void*), + GFP_KERNEL, NULL); + if (q->queue == ERR_PTR(-ENOMEM)) { + kfree(q->pool); + kfree(*items); + return -ENOMEM; + } + + for (i = 0; i < max; i++) { + q->pool[i] = kmalloc(item_size, GFP_KERNEL); + if (q->pool[i] == NULL) { + int j; + + for (j = 0; j < i; j++) + kfree(q->pool[j]); + + kfifo_free(q->queue); + kfree(q->pool); + kfree(*items); + return -ENOMEM; + } + memset(q->pool[i], 0, item_size); + (*items)[i] = q->pool[i]; + __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*)); + } + return 0; +} + +static void +iscsi_pool_free(struct iscsi_queue *q, void **items) +{ + int i; + + for (i = 0; i < q->max; i++) + kfree(items[i]); + kfree(q->pool); + kfree(items); +} + +static iscsi_connh_t +iscsi_conn_create(iscsi_sessionh_t sessionh, uint32_t conn_idx) +{ + struct iscsi_session *session = iscsi_ptr(sessionh); + struct iscsi_conn *conn = NULL; + + conn = kmalloc(sizeof(struct iscsi_conn), GFP_KERNEL); + if (conn == NULL) + goto conn_alloc_fail; + memset(conn, 0, sizeof(struct iscsi_conn)); + + conn->c_stage = ISCSI_CONN_INITIAL_STAGE; + conn->in_progress = IN_PROGRESS_WAIT_HEADER; + conn->id = conn_idx; + conn->exp_statsn = 0; + conn->tmabort_state = TMABORT_INITIAL; + + /* initial operational parameters */ + conn->hdr_size = sizeof(struct iscsi_hdr); + conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; + conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; + + spin_lock_init(&conn->lock); + + /* initialize general xmit PDU commands queue */ + conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*), + GFP_KERNEL, NULL); + if (conn->xmitqueue == ERR_PTR(-ENOMEM)) + goto xmitqueue_alloc_fail; + + /* initialize write response PDU commands queue */ + conn->writequeue = kfifo_alloc(session->cmds_max * sizeof(void*), + GFP_KERNEL, NULL); + if (conn->writequeue == ERR_PTR(-ENOMEM)) + goto writequeue_alloc_fail; + + /* initialize general immediate & non-immediate PDU commands queue */ + conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), + GFP_KERNEL, NULL); + if (conn->immqueue == ERR_PTR(-ENOMEM)) + goto immqueue_alloc_fail; + + conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), + GFP_KERNEL, NULL); + if (conn->mgmtqueue == ERR_PTR(-ENOMEM)) + goto mgmtqueue_alloc_fail; + + INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn); + + /* allocate login_mtask used for the login/text sequences */ + spin_lock_bh(&session->lock); + if (!__kfifo_get(session->mgmtpool.queue, + (void*)&conn->login_mtask, + sizeof(void*))) { + spin_unlock_bh(&session->lock); + goto login_mtask_alloc_fail; + } + spin_unlock_bh(&session->lock); + + /* allocate initial PDU receive place holder */ + if (conn->data_size <= PAGE_SIZE) + conn->data = kmalloc(conn->data_size, GFP_KERNEL); + else + conn->data = (void*)__get_free_pages(GFP_KERNEL, + get_order(conn->data_size)); + if (!conn->data) + goto max_recv_dlenght_alloc_fail; + + init_timer(&conn->tmabort_timer); + init_MUTEX(&conn->xmitsema); + init_waitqueue_head(&conn->ehwait); + + return iscsi_handle(conn); + +max_recv_dlenght_alloc_fail: + spin_lock_bh(&session->lock); + __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, + sizeof(void*)); + spin_unlock_bh(&session->lock); +login_mtask_alloc_fail: + kfifo_free(conn->mgmtqueue); +mgmtqueue_alloc_fail: + kfifo_free(conn->immqueue); +immqueue_alloc_fail: + kfifo_free(conn->writequeue); +writequeue_alloc_fail: + kfifo_free(conn->xmitqueue); +xmitqueue_alloc_fail: + kfree(conn); +conn_alloc_fail: + return iscsi_handle(NULL); +} + +static void +iscsi_conn_destroy(iscsi_connh_t connh) +{ + struct iscsi_conn *conn = iscsi_ptr(connh); + struct iscsi_session *session = conn->session; + + down(&conn->xmitsema); + set_bit(SUSPEND_BIT, &conn->suspend_tx); + if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE && conn->sock) { + struct sock *sk = conn->sock->sk; + + /* + * conn_start() has never been called! + * need to cleanup the socket. + */ + write_lock_bh(&sk->sk_callback_lock); + set_bit(SUSPEND_BIT, &conn->suspend_rx); + write_unlock_bh(&sk->sk_callback_lock); + + sock_hold(conn->sock->sk); + iscsi_conn_restore_callbacks(conn); + sock_put(conn->sock->sk); + sock_release(conn->sock); + conn->sock = NULL; + } + + spin_lock_bh(&session->lock); + conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; + if (session->leadconn == conn) { + /* + * leading connection? then give up on recovery. + */ + session->state = ISCSI_STATE_TERMINATE; + wake_up(&conn->ehwait); + } + spin_unlock_bh(&session->lock); + + up(&conn->xmitsema); + + /* + * Block until all in-progress commands for this connection + * time out or fail. + */ + for (;;) { + spin_lock_bh(&conn->lock); + if (!session->host->host_busy) { /* OK for ERL == 0 */ + spin_unlock_bh(&conn->lock); + break; + } + spin_unlock_bh(&conn->lock); + msleep_interruptible(500); + printk("conn_destroy(): host_busy %d host_failed %d\n", + session->host->host_busy, session->host->host_failed); + /* + * force eh_abort() to unblock + */ + wake_up(&conn->ehwait); + } + + /* now free crypto */ + if (conn->hdrdgst_en || conn->datadgst_en) { + if (conn->tx_tfm) + crypto_free_tfm(conn->tx_tfm); + if (conn->rx_tfm) + crypto_free_tfm(conn->rx_tfm); + if (conn->data_tx_tfm) + crypto_free_tfm(conn->data_tx_tfm); + if (conn->data_rx_tfm) + crypto_free_tfm(conn->data_rx_tfm); + } + + /* free conn->data, size = MaxRecvDataSegmentLength */ + if (conn->data_size <= PAGE_SIZE) + kfree(conn->data); + else + free_pages((unsigned long)conn->data, + get_order(conn->data_size)); + + spin_lock_bh(&session->lock); + __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, + sizeof(void*)); + list_del(&conn->item); + if (list_empty(&session->connections)) + session->leadconn = NULL; + if (session->leadconn && session->leadconn == conn) + session->leadconn = container_of(session->connections.next, + struct iscsi_conn, item); + + if (session->leadconn == NULL) + /* none connections exits.. reset sequencing */ + session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1; + spin_unlock_bh(&session->lock); + + kfifo_free(conn->xmitqueue); + kfifo_free(conn->writequeue); + kfifo_free(conn->immqueue); + kfifo_free(conn->mgmtqueue); + kfree(conn); +} + +static int +iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh, + uint32_t transport_fd, int is_leading) +{ + struct iscsi_session *session = iscsi_ptr(sessionh); + struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = iscsi_ptr(connh); + struct sock *sk; + struct socket *sock; + int err; + + /* lookup for existing socket */ + sock = sockfd_lookup(transport_fd, &err); + if (!sock) { + printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err); + return -EEXIST; + } + + /* lookup for existing connection */ + spin_lock_bh(&session->lock); + list_for_each_entry(tmp, &session->connections, item) { + if (tmp == conn) { + if (conn->c_stage != ISCSI_CONN_STOPPED || + conn->stop_stage == STOP_CONN_TERM) { + printk(KERN_ERR "iscsi_tcp: can't bind " + "non-stopped connection (%d:%d)\n", + conn->c_stage, conn->stop_stage); + spin_unlock_bh(&session->lock); + return -EIO; + } + break; + } + } + if (tmp != conn) { + /* bind new iSCSI connection to session */ + conn->session = session; + + list_add(&conn->item, &session->connections); + } + spin_unlock_bh(&session->lock); + + if (conn->stop_stage != STOP_CONN_SUSPEND) { + /* bind iSCSI connection and socket */ + conn->sock = sock; + + /* setup Socket parameters */ + sk = sock->sk; + sk->sk_reuse = 1; + sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ + sk->sk_allocation = GFP_ATOMIC; + + /* FIXME: disable Nagle's algorithm */ + + /* + * Intercept TCP callbacks for sendfile like receive + * processing. + */ + iscsi_conn_set_callbacks(conn); + + /* + * set receive state machine into initial state + */ + conn->in_progress = IN_PROGRESS_WAIT_HEADER; + } + + if (is_leading) + session->leadconn = conn; + + /* + * Unblock xmitworker(), Login Phase will pass through. + */ + clear_bit(SUSPEND_BIT, &conn->suspend_rx); + clear_bit(SUSPEND_BIT, &conn->suspend_tx); + + return 0; +} + +static int +iscsi_conn_start(iscsi_connh_t connh) +{ + struct iscsi_conn *conn = iscsi_ptr(connh); + struct iscsi_session *session = conn->session; + struct sock *sk; + + /* FF phase warming up... */ + + if (session == NULL) { + printk(KERN_ERR "iscsi_tcp: can't start unbound connection\n"); + return -EPERM; + } + + sk = conn->sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + spin_lock_bh(&session->lock); + conn->c_stage = ISCSI_CONN_STARTED; + session->state = ISCSI_STATE_LOGGED_IN; + + switch(conn->stop_stage) { + case STOP_CONN_RECOVER: + /* + * unblock eh_abort() if it is blocked. re-try all + * commands after successful recovery + */ + session->conn_cnt++; + conn->stop_stage = 0; + conn->tmabort_state = TMABORT_INITIAL; + session->age++; + wake_up(&conn->ehwait); + break; + case STOP_CONN_TERM: + session->conn_cnt++; + conn->stop_stage = 0; + break; + case STOP_CONN_SUSPEND: + conn->stop_stage = 0; + clear_bit(SUSPEND_BIT, &conn->suspend_rx); + clear_bit(SUSPEND_BIT, &conn->suspend_tx); + break; + default: + break; + } + spin_unlock_bh(&session->lock); + write_unlock_bh(&sk->sk_callback_lock); + + return 0; +} + +static void +iscsi_conn_stop(iscsi_connh_t connh, int flag) +{ + struct iscsi_conn *conn = iscsi_ptr(connh); + struct iscsi_session *session = conn->session; + struct sock *sk; + unsigned long flags; + + BUG_ON(!conn->sock); + sk = conn->sock->sk; + write_lock_bh(&sk->sk_callback_lock); + set_bit(SUSPEND_BIT, &conn->suspend_rx); + write_unlock_bh(&sk->sk_callback_lock); + + down(&conn->xmitsema); + + spin_lock_irqsave(session->host->host_lock, flags); + spin_lock(&session->lock); + conn->stop_stage = flag; + conn->c_stage = ISCSI_CONN_STOPPED; + set_bit(SUSPEND_BIT, &conn->suspend_tx); + + if (flag != STOP_CONN_SUSPEND) + session->conn_cnt--; + + if (session->conn_cnt == 0 || session->leadconn == conn) + session->state = ISCSI_STATE_FAILED; + + spin_unlock(&session->lock); + spin_unlock_irqrestore(session->host->host_lock, flags); + + if (flag == STOP_CONN_TERM || flag == STOP_CONN_RECOVER) { + struct iscsi_cmd_task *ctask; + struct iscsi_mgmt_task *mtask; + + /* + * Socket must go now. + */ + sock_hold(conn->sock->sk); + iscsi_conn_restore_callbacks(conn); + sock_put(conn->sock->sk); + + /* + * flush xmit queues. + */ + spin_lock_bh(&session->lock); + while (__kfifo_get(conn->writequeue, (void*)&ctask, + sizeof(void*)) || + __kfifo_get(conn->xmitqueue, (void*)&ctask, + sizeof(void*))) { + struct iscsi_r2t_info *r2t; + + /* + * flush ctask's r2t queues + */ + while (__kfifo_get(ctask->r2tqueue, (void*)&r2t, + sizeof(void*))) + __kfifo_put(ctask->r2tpool.queue, (void*)&r2t, + sizeof(void*)); + + spin_unlock_bh(&session->lock); + local_bh_disable(); + iscsi_ctask_cleanup(conn, ctask); + local_bh_enable(); + spin_lock_bh(&session->lock); + } + conn->ctask = NULL; + while (__kfifo_get(conn->immqueue, (void*)&mtask, + sizeof(void*)) || + __kfifo_get(conn->mgmtqueue, (void*)&mtask, + sizeof(void*))) { + __kfifo_put(session->mgmtpool.queue, + (void*)&mtask, sizeof(void*)); + } + conn->mtask = NULL; + spin_unlock_bh(&session->lock); + + /* + * release socket only after we stopped data_xmit() + * activity and flushed all outstandings + */ + sock_release(conn->sock); + conn->sock = NULL; + + /* + * for connection level recovery we should not calculate + * header digest. conn->hdr_size used for optimization + * in hdr_extract() and will be re-negotiated at + * set_param() time. + */ + if (flag == STOP_CONN_RECOVER) + conn->hdr_size = sizeof(struct iscsi_hdr); + } + up(&conn->xmitsema); +} + +static int +iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + char *data, uint32_t data_size) +{ + struct iscsi_session *session = conn->session; + struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; + struct iscsi_mgmt_task *mtask; + + spin_lock_bh(&session->lock); + if (session->state == ISCSI_STATE_TERMINATE) { + spin_unlock_bh(&session->lock); + return -EPERM; + } + if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) || + hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE)) + /* + * Login and Text are sent serially, in + * request-followed-by-response sequence. + * Same mtask can be used. Same ITT must be used. + * Note that login_mtask is preallocated at conn_create(). + */ + mtask = conn->login_mtask; + else { + BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); + BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); + + if (!__kfifo_get(session->mgmtpool.queue, + (void*)&mtask, sizeof(void*))) { + spin_unlock_bh(&session->lock); + return -ENOSPC; + } + } + + /* + * pre-format CmdSN and ExpStatSN for outgoing PDU. + */ + if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) { + hdr->itt = mtask->itt | (conn->id << CID_SHIFT) | + (session->age << AGE_SHIFT); + nop->cmdsn = cpu_to_be32(session->cmdsn); + if (conn->c_stage == ISCSI_CONN_STARTED && + !(hdr->opcode & ISCSI_OP_IMMEDIATE)) + session->cmdsn++; + } else + /* do not advance CmdSN */ + nop->cmdsn = cpu_to_be32(session->cmdsn); + + nop->exp_statsn = cpu_to_be32(conn->exp_statsn); + + memcpy(&mtask->hdr, hdr, sizeof(struct iscsi_hdr)); + + iscsi_buf_init_virt(&mtask->headbuf, (char*)&mtask->hdr, + sizeof(struct iscsi_hdr)); + + spin_unlock_bh(&session->lock); + + if (data_size) { + memcpy(mtask->data, data, data_size); + mtask->data_count = data_size; + } else + mtask->data_count = 0; + + mtask->xmstate = XMSTATE_IMM_HDR; + + if (mtask->data_count) { + iscsi_buf_init_iov(&mtask->sendbuf, (char*)mtask->data, + mtask->data_count); + } + + debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n", + hdr->opcode, hdr->itt, data_size); + + /* + * since send_pdu() could be called at least from two contexts, + * we need to serialize __kfifo_put, so we don't have to take + * additional lock on fast data-path + */ + if (hdr->opcode & ISCSI_OP_IMMEDIATE) + __kfifo_put(conn->immqueue, (void*)&mtask, sizeof(void*)); + else + __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*)); + + schedule_work(&conn->xmitwork); + + return 0; +} + +static int +iscsi_eh_host_reset(struct scsi_cmnd *sc) +{ + struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; + struct iscsi_conn *conn = ctask->conn; + struct iscsi_session *session = conn->session; + + spin_lock_bh(&session->lock); + if (session->state == ISCSI_STATE_TERMINATE) { + debug_scsi("failing host reset: session terminated " + "[CID %d age %d]", conn->id, session->age); + spin_unlock_bh(&session->lock); + return FAILED; + } + spin_unlock_bh(&session->lock); + + debug_scsi("failing connection CID %d due to SCSI host reset " + "[itt 0x%x age %d]", conn->id, ctask->itt, + session->age); + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + + return SUCCESS; +} + +static void +iscsi_tmabort_timedout(unsigned long data) +{ + struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)data; + struct iscsi_conn *conn = ctask->conn; + struct iscsi_session *session = conn->session; + + spin_lock(&session->lock); + if (conn->tmabort_state == TMABORT_INITIAL) { + __kfifo_put(session->mgmtpool.queue, + (void*)&ctask->mtask, sizeof(void*)); + conn->tmabort_state = TMABORT_TIMEDOUT; + debug_scsi("tmabort timedout [sc %lx itt 0x%x]\n", + (long)ctask->sc, ctask->itt); + /* unblock eh_abort() */ + wake_up(&conn->ehwait); + } + spin_unlock(&session->lock); +} + +static int +iscsi_eh_abort(struct scsi_cmnd *sc) +{ + int rc; + struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; + struct iscsi_conn *conn = ctask->conn; + struct iscsi_session *session = conn->session; + + conn->eh_abort_cnt++; + debug_scsi("aborting [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); + + /* + * two cases for ERL=0 here: + * + * 1) connection-level failure; + * 2) recovery due protocol error; + */ + down(&conn->xmitsema); + spin_lock_bh(&session->lock); + if (session->state != ISCSI_STATE_LOGGED_IN) { + if (session->state == ISCSI_STATE_TERMINATE) { + spin_unlock_bh(&session->lock); + up(&conn->xmitsema); + goto failed; + } + spin_unlock_bh(&session->lock); + } else { + struct iscsi_tm *hdr = &conn->tmhdr; + + /* + * Still LOGGED_IN... + */ + + if (!ctask->sc || sc->SCp.phase != session->age) { + /* + * 1) ctask completed before time out. But session + * is still ok => Happy Retry. + * 2) session was re-open during time out of ctask. + */ + spin_unlock_bh(&session->lock); + up(&conn->xmitsema); + goto success; + } + conn->tmabort_state = TMABORT_INITIAL; + spin_unlock_bh(&session->lock); + + /* + * ctask timed out but session is OK + * ERL=0 requires task mgmt abort to be issued on each + * failed command. requests must be serialized. + */ + memset(hdr, 0, sizeof(struct iscsi_tm)); + hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; + hdr->flags = ISCSI_TM_FUNC_ABORT_TASK; + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun)); + hdr->rtt = ctask->hdr.itt; + hdr->refcmdsn = ctask->hdr.cmdsn; + + rc = iscsi_conn_send_generic(conn, (struct iscsi_hdr *)hdr, + NULL, 0); + if (rc) { + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + debug_scsi("abort sent failure [itt 0x%x]", ctask->itt); + } else { + struct iscsi_r2t_info *r2t; + + /* + * TMF abort vs. TMF response race logic + */ + spin_lock_bh(&session->lock); + ctask->mtask = (struct iscsi_mgmt_task *) + session->mgmt_cmds[(hdr->itt & ITT_MASK) - + ISCSI_MGMT_ITT_OFFSET]; + /* + * have to flush r2tqueue to avoid r2t leaks + */ + while (__kfifo_get(ctask->r2tqueue, (void*)&r2t, + sizeof(void*))) { + __kfifo_put(ctask->r2tpool.queue, (void*)&r2t, + sizeof(void*)); + } + if (conn->tmabort_state == TMABORT_INITIAL) { + conn->tmfcmd_pdus_cnt++; + conn->tmabort_timer.expires = 3*HZ + jiffies; + conn->tmabort_timer.function = + iscsi_tmabort_timedout; + conn->tmabort_timer.data = (unsigned long)ctask; + add_timer(&conn->tmabort_timer); + debug_scsi("abort sent [itt 0x%x]", ctask->itt); + } else { + if (!ctask->sc || + conn->tmabort_state == TMABORT_SUCCESS) { + conn->tmabort_state = TMABORT_INITIAL; + spin_unlock_bh(&session->lock); + up(&conn->xmitsema); + goto success; + } + conn->tmabort_state = TMABORT_INITIAL; + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + } + spin_unlock_bh(&session->lock); + } + } + up(&conn->xmitsema); + + + /* + * block eh thread until: + * + * 1) abort response; + * 2) abort timeout; + * 3) session re-opened; + * 4) session terminated; + */ + for (;;) { + int p_state = session->state; + + rc = wait_event_interruptible(conn->ehwait, + (p_state == ISCSI_STATE_LOGGED_IN ? + (session->state == ISCSI_STATE_TERMINATE || + conn->tmabort_state != TMABORT_INITIAL) : + (session->state == ISCSI_STATE_TERMINATE || + session->state == ISCSI_STATE_LOGGED_IN))); + if (rc) { + /* shutdown.. */ + session->state = ISCSI_STATE_TERMINATE; + goto failed; + } + + if (signal_pending(current)) + flush_signals(current); + + if (session->state == ISCSI_STATE_TERMINATE) + goto failed; + + spin_lock_bh(&session->lock); + if (sc->SCp.phase == session->age && + (conn->tmabort_state == TMABORT_TIMEDOUT || + conn->tmabort_state == TMABORT_FAILED)) { + conn->tmabort_state = TMABORT_INITIAL; + if (!ctask->sc) { + /* + * ctask completed before tmf abort response or + * time out. + * But session is still ok => Happy Retry. + */ + spin_unlock_bh(&session->lock); + break; + } + spin_unlock_bh(&session->lock); + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + continue; + } + spin_unlock_bh(&session->lock); + break; + } + +success: + debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); + rc = SUCCESS; + goto exit; + +failed: + debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); + rc = FAILED; + +exit: + del_timer_sync(&conn->tmabort_timer); + + down(&conn->xmitsema); + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + iscsi_ctask_cleanup(conn, ctask); + write_unlock_bh(&sk->sk_callback_lock); + } + up(&conn->xmitsema); + return rc; +} + +static int +iscsi_r2tpool_alloc(struct iscsi_session *session) +{ + int i; + int cmd_i; + + /* + * initialize per-task: R2T pool and xmit queue + */ + for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { + struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; + + /* + * pre-allocated x4 as much r2ts to handle race when + * target acks DataOut faster than we data_xmit() queues + * could replenish r2tqueue. + */ + + /* R2T pool */ + if (iscsi_pool_init(&ctask->r2tpool, session->max_r2t * 4, + (void***)&ctask->r2ts, sizeof(struct iscsi_r2t_info))) { + goto r2t_alloc_fail; + } + + /* R2T xmit queue */ + ctask->r2tqueue = kfifo_alloc( + session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL); + if (ctask->r2tqueue == ERR_PTR(-ENOMEM)) { + iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts); + goto r2t_alloc_fail; + } + + /* + * number of + * Data-Out PDU's within R2T-sequence can be quite big; + * using mempool + */ + ctask->datapool = mempool_create(ISCSI_DTASK_DEFAULT_MAX, + mempool_alloc_slab, mempool_free_slab, taskcache); + if (ctask->datapool == NULL) { + kfifo_free(ctask->r2tqueue); + iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts); + goto r2t_alloc_fail; + } + INIT_LIST_HEAD(&ctask->dataqueue); + } + + return 0; + +r2t_alloc_fail: + for (i = 0; i < cmd_i; i++) { + mempool_destroy(session->cmds[i]->datapool); + kfifo_free(session->cmds[i]->r2tqueue); + iscsi_pool_free(&session->cmds[i]->r2tpool, + (void**)session->cmds[i]->r2ts); + } + return -ENOMEM; +} + +static void +iscsi_r2tpool_free(struct iscsi_session *session) +{ + int i; + + for (i = 0; i < session->cmds_max; i++) { + mempool_destroy(session->cmds[i]->datapool); + kfifo_free(session->cmds[i]->r2tqueue); + iscsi_pool_free(&session->cmds[i]->r2tpool, + (void**)session->cmds[i]->r2ts); + } +} + +static struct scsi_host_template iscsi_sht = { + .name = "iSCSI Initiator over TCP/IP, v." + ISCSI_VERSION_STR, + .queuecommand = iscsi_queuecommand, + .can_queue = ISCSI_XMIT_CMDS_MAX - 1, + .sg_tablesize = ISCSI_SG_TABLESIZE, + .cmd_per_lun = ISCSI_CMD_PER_LUN, + .eh_abort_handler = iscsi_eh_abort, + .eh_host_reset_handler = iscsi_eh_host_reset, + .use_clustering = DISABLE_CLUSTERING, + .proc_name = "iscsi_tcp", + .this_id = -1, +}; + +static iscsi_sessionh_t +iscsi_session_create(uint32_t initial_cmdsn, struct Scsi_Host *host) +{ + int cmd_i; + struct iscsi_session *session; + + session = iscsi_hostdata(host->hostdata); + memset(session, 0, sizeof(struct iscsi_session)); + + session->host = host; + session->id = host->host_no; + session->state = ISCSI_STATE_LOGGED_IN; + session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX; + session->cmds_max = ISCSI_XMIT_CMDS_MAX; + session->cmdsn = initial_cmdsn; + session->exp_cmdsn = initial_cmdsn + 1; + session->max_cmdsn = initial_cmdsn + 1; + session->max_r2t = 1; + + /* initialize SCSI PDU commands pool */ + if (iscsi_pool_init(&session->cmdpool, session->cmds_max, + (void***)&session->cmds, sizeof(struct iscsi_cmd_task))) + goto cmdpool_alloc_fail; + + /* pre-format cmds pool with ITT */ + for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) + session->cmds[cmd_i]->itt = cmd_i; + + spin_lock_init(&session->lock); + INIT_LIST_HEAD(&session->connections); + + /* initialize immediate command pool */ + if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max, + (void***)&session->mgmt_cmds, sizeof(struct iscsi_mgmt_task))) + goto mgmtpool_alloc_fail; + + + /* pre-format immediate cmds pool with ITT */ + for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) { + session->mgmt_cmds[cmd_i]->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i; + session->mgmt_cmds[cmd_i]->data = kmalloc( + DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); + if (!session->mgmt_cmds[cmd_i]->data) { + int j; + + for (j = 0; j < cmd_i; j++) + kfree(session->mgmt_cmds[j]->data); + goto immdata_alloc_fail; + } + } + + if (iscsi_r2tpool_alloc(session)) + goto r2tpool_alloc_fail; + + return iscsi_handle(session); + +r2tpool_alloc_fail: + for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) + kfree(session->mgmt_cmds[cmd_i]->data); + iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); +immdata_alloc_fail: +mgmtpool_alloc_fail: + iscsi_pool_free(&session->cmdpool, (void**)session->cmds); +cmdpool_alloc_fail: + return iscsi_handle(NULL); +} + +static void +iscsi_session_destroy(iscsi_sessionh_t sessionh) +{ + int cmd_i; + struct iscsi_data_task *dtask, *n; + struct iscsi_session *session = iscsi_ptr(sessionh); + + for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { + struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; + list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) { + list_del(&dtask->item); + mempool_free(dtask, ctask->datapool); + } + } + + for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) + kfree(session->mgmt_cmds[cmd_i]->data); + + iscsi_r2tpool_free(session); + iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); + iscsi_pool_free(&session->cmdpool, (void**)session->cmds); +} + +static int +iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param, + uint32_t value) +{ + struct iscsi_conn *conn = iscsi_ptr(connh); + struct iscsi_session *session = conn->session; + + spin_lock_bh(&session->lock); + if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE && + conn->stop_stage != STOP_CONN_RECOVER) { + printk(KERN_ERR "iscsi_tcp: can not change parameter [%d]\n", + param); + spin_unlock_bh(&session->lock); + return 0; + } + spin_unlock_bh(&session->lock); + + switch(param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: { + char *saveptr = conn->data; + int flags = GFP_KERNEL; + + if (conn->data_size >= value) { + conn->max_recv_dlength = value; + break; + } + + spin_lock_bh(&session->lock); + if (conn->stop_stage == STOP_CONN_RECOVER) + flags = GFP_ATOMIC; + spin_unlock_bh(&session->lock); + + if (value <= PAGE_SIZE) + conn->data = kmalloc(value, flags); + else + conn->data = (void*)__get_free_pages(flags, + get_order(value)); + if (conn->data == NULL) { + conn->data = saveptr; + return -ENOMEM; + } + if (conn->data_size <= PAGE_SIZE) + kfree(saveptr); + else + free_pages((unsigned long)saveptr, + get_order(conn->data_size)); + conn->max_recv_dlength = value; + conn->data_size = value; + } + break; + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + conn->max_xmit_dlength = value; + break; + case ISCSI_PARAM_HDRDGST_EN: + conn->hdrdgst_en = value; + conn->hdr_size = sizeof(struct iscsi_hdr); + if (conn->hdrdgst_en) { + conn->hdr_size += sizeof(__u32); + if (!conn->tx_tfm) + conn->tx_tfm = crypto_alloc_tfm("crc32c", 0); + if (!conn->tx_tfm) + return -ENOMEM; + if (!conn->rx_tfm) + conn->rx_tfm = crypto_alloc_tfm("crc32c", 0); + if (!conn->rx_tfm) { + crypto_free_tfm(conn->tx_tfm); + return -ENOMEM; + } + } else { + if (conn->tx_tfm) + crypto_free_tfm(conn->tx_tfm); + if (conn->rx_tfm) + crypto_free_tfm(conn->rx_tfm); + } + break; + case ISCSI_PARAM_DATADGST_EN: + conn->datadgst_en = value; + if (conn->datadgst_en) { + if (!conn->data_tx_tfm) + conn->data_tx_tfm = + crypto_alloc_tfm("crc32c", 0); + if (!conn->data_tx_tfm) + return -ENOMEM; + if (!conn->data_rx_tfm) + conn->data_rx_tfm = + crypto_alloc_tfm("crc32c", 0); + if (!conn->data_rx_tfm) { + crypto_free_tfm(conn->data_tx_tfm); + return -ENOMEM; + } + } else { + if (conn->data_tx_tfm) + crypto_free_tfm(conn->data_tx_tfm); + if (conn->data_rx_tfm) + crypto_free_tfm(conn->data_rx_tfm); + } + break; + case ISCSI_PARAM_INITIAL_R2T_EN: + session->initial_r2t_en = value; + break; + case ISCSI_PARAM_MAX_R2T: + if (session->max_r2t == roundup_pow_of_two(value)) + break; + iscsi_r2tpool_free(session); + session->max_r2t = value; + if (session->max_r2t & (session->max_r2t - 1)) + session->max_r2t = roundup_pow_of_two(session->max_r2t); + if (iscsi_r2tpool_alloc(session)) + return -ENOMEM; + break; + case ISCSI_PARAM_IMM_DATA_EN: + session->imm_data_en = value; + break; + case ISCSI_PARAM_FIRST_BURST: + session->first_burst = value; + break; + case ISCSI_PARAM_MAX_BURST: + session->max_burst = value; + break; + case ISCSI_PARAM_PDU_INORDER_EN: + session->pdu_inorder_en = value; + break; + case ISCSI_PARAM_DATASEQ_INORDER_EN: + session->dataseq_inorder_en = value; + break; + case ISCSI_PARAM_ERL: + session->erl = value; + break; + case ISCSI_PARAM_IFMARKER_EN: + BUG_ON(value); + session->ifmarker_en = value; + break; + case ISCSI_PARAM_OFMARKER_EN: + BUG_ON(value); + session->ofmarker_en = value; + break; + default: + break; + } + + return 0; +} + +static int +iscsi_conn_get_param(iscsi_connh_t connh, enum iscsi_param param, + uint32_t *value) +{ + struct iscsi_conn *conn = iscsi_ptr(connh); + struct iscsi_session *session = conn->session; + + switch(param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + *value = conn->max_recv_dlength; + break; + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + *value = conn->max_xmit_dlength; + break; + case ISCSI_PARAM_HDRDGST_EN: + *value = conn->hdrdgst_en; + break; + case ISCSI_PARAM_DATADGST_EN: + *value = conn->datadgst_en; + break; + case ISCSI_PARAM_INITIAL_R2T_EN: + *value = session->initial_r2t_en; + break; + case ISCSI_PARAM_MAX_R2T: + *value = session->max_r2t; + break; + case ISCSI_PARAM_IMM_DATA_EN: + *value = session->imm_data_en; + break; + case ISCSI_PARAM_FIRST_BURST: + *value = session->first_burst; + break; + case ISCSI_PARAM_MAX_BURST: + *value = session->max_burst; + break; + case ISCSI_PARAM_PDU_INORDER_EN: + *value = session->pdu_inorder_en; + break; + case ISCSI_PARAM_DATASEQ_INORDER_EN: + *value = session->dataseq_inorder_en; + break; + case ISCSI_PARAM_ERL: + *value = session->erl; + break; + case ISCSI_PARAM_IFMARKER_EN: + *value = session->ifmarker_en; + break; + case ISCSI_PARAM_OFMARKER_EN: + *value = session->ofmarker_en; + break; + default: + return ISCSI_ERR_PARAM_NOT_FOUND; + } + + return 0; +} + +static void +iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats) +{ + struct iscsi_conn *conn = iscsi_ptr(connh); + + stats->txdata_octets = conn->txdata_octets; + stats->rxdata_octets = conn->rxdata_octets; + stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; + stats->dataout_pdus = conn->dataout_pdus_cnt; + stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; + stats->datain_pdus = conn->datain_pdus_cnt; + stats->r2t_pdus = conn->r2t_pdus_cnt; + stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; + stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; + stats->custom_length = 3; + strcpy(stats->custom[0].desc, "tx_sendpage_failures"); + stats->custom[0].value = conn->sendpage_failures_cnt; + strcpy(stats->custom[1].desc, "rx_discontiguous_hdr"); + stats->custom[1].value = conn->discontiguous_hdr_cnt; + strcpy(stats->custom[2].desc, "eh_abort_cnt"); + stats->custom[2].value = conn->eh_abort_cnt; +} + +static int +iscsi_conn_send_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, char *data, + uint32_t data_size) +{ + struct iscsi_conn *conn = iscsi_ptr(connh); + int rc; + + down(&conn->xmitsema); + rc = iscsi_conn_send_generic(conn, hdr, data, data_size); + up(&conn->xmitsema); + + return rc; +} + +static struct iscsi_transport iscsi_tcp_transport = { + .owner = THIS_MODULE, + .name = "tcp", + .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST + | CAP_DATADGST, + .host_template = &iscsi_sht, + .hostdata_size = sizeof(struct iscsi_session), + .max_conn = 1, + .max_cmd_len = ISCSI_TCP_MAX_CMD_LEN, + .create_session = iscsi_session_create, + .destroy_session = iscsi_session_destroy, + .create_conn = iscsi_conn_create, + .bind_conn = iscsi_conn_bind, + .destroy_conn = iscsi_conn_destroy, + .set_param = iscsi_conn_set_param, + .get_param = iscsi_conn_get_param, + .start_conn = iscsi_conn_start, + .stop_conn = iscsi_conn_stop, + .send_pdu = iscsi_conn_send_pdu, + .get_stats = iscsi_conn_get_stats, +}; + +static int __init +iscsi_tcp_init(void) +{ + int error; + + if (iscsi_max_lun < 1) { + printk(KERN_ERR "Invalid max_lun value of %u\n", iscsi_max_lun); + return -EINVAL; + } + iscsi_tcp_transport.max_lun = iscsi_max_lun; + + taskcache = kmem_cache_create("iscsi_taskcache", + sizeof(struct iscsi_data_task), 0, + SLAB_HWCACHE_ALIGN | SLAB_NO_REAP, NULL, NULL); + if (!taskcache) + return -ENOMEM; + + error = iscsi_register_transport(&iscsi_tcp_transport); + if (error) + kmem_cache_destroy(taskcache); + + return error; +} + +static void __exit +iscsi_tcp_exit(void) +{ + iscsi_unregister_transport(&iscsi_tcp_transport); + kmem_cache_destroy(taskcache); +} + +module_init(iscsi_tcp_init); +module_exit(iscsi_tcp_exit); diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h new file mode 100644 index 000000000000..d23ae68fae0d --- /dev/null +++ b/drivers/scsi/iscsi_tcp.h @@ -0,0 +1,322 @@ +/* + * iSCSI Initiator TCP Transport + * Copyright (C) 2004 Dmitry Yusupov + * Copyright (C) 2004 Alex Aizman + * Copyright (C) 2005 Mike Christie + * maintained by open-iscsi@googlegroups.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * See the file COPYING included with this distribution for more details. + */ + +#ifndef ISCSI_TCP_H +#define ISCSI_TCP_H + +/* Session's states */ +#define ISCSI_STATE_FREE 1 +#define ISCSI_STATE_LOGGED_IN 2 +#define ISCSI_STATE_FAILED 3 +#define ISCSI_STATE_TERMINATE 4 + +/* Connection's states */ +#define ISCSI_CONN_INITIAL_STAGE 0 +#define ISCSI_CONN_STARTED 1 +#define ISCSI_CONN_STOPPED 2 +#define ISCSI_CONN_CLEANUP_WAIT 3 + +/* Connection suspend "bit" */ +#define SUSPEND_BIT 1 + +/* Socket's Receive state machine */ +#define IN_PROGRESS_WAIT_HEADER 0x0 +#define IN_PROGRESS_HEADER_GATHER 0x1 +#define IN_PROGRESS_DATA_RECV 0x2 +#define IN_PROGRESS_DDIGEST_RECV 0x3 + +/* Task Mgmt states */ +#define TMABORT_INITIAL 0x0 +#define TMABORT_SUCCESS 0x1 +#define TMABORT_FAILED 0x2 +#define TMABORT_TIMEDOUT 0x3 + +/* xmit state machine */ +#define XMSTATE_IDLE 0x0 +#define XMSTATE_R_HDR 0x1 +#define XMSTATE_W_HDR 0x2 +#define XMSTATE_IMM_HDR 0x4 +#define XMSTATE_IMM_DATA 0x8 +#define XMSTATE_UNS_INIT 0x10 +#define XMSTATE_UNS_HDR 0x20 +#define XMSTATE_UNS_DATA 0x40 +#define XMSTATE_SOL_HDR 0x80 +#define XMSTATE_SOL_DATA 0x100 +#define XMSTATE_W_PAD 0x200 +#define XMSTATE_DATA_DIGEST 0x400 + +#define ISCSI_CONN_MAX 1 +#define ISCSI_CONN_RCVBUF_MIN 262144 +#define ISCSI_CONN_SNDBUF_MIN 262144 +#define ISCSI_PAD_LEN 4 +#define ISCSI_R2T_MAX 16 +#define ISCSI_XMIT_CMDS_MAX 128 /* must be power of 2 */ +#define ISCSI_MGMT_CMDS_MAX 32 /* must be power of 2 */ +#define ISCSI_MGMT_ITT_OFFSET 0xa00 +#define ISCSI_SG_TABLESIZE SG_ALL +#define ISCSI_CMD_PER_LUN 128 +#define ISCSI_TCP_MAX_CMD_LEN 16 + +#define ITT_MASK (0xfff) +#define CID_SHIFT 12 +#define CID_MASK (0xffff<<CID_SHIFT) +#define AGE_SHIFT 28 +#define AGE_MASK (0xf<<AGE_SHIFT) + +struct iscsi_queue { + struct kfifo *queue; /* FIFO Queue */ + void **pool; /* Pool of elements */ + int max; /* Max number of elements */ +}; + +struct iscsi_session; +struct iscsi_cmd_task; +struct iscsi_mgmt_task; + +/* Socket connection recieve helper */ +struct iscsi_tcp_recv { + struct iscsi_hdr *hdr; + struct sk_buff *skb; + int offset; + int len; + int hdr_offset; + int copy; + int copied; + int padding; + struct iscsi_cmd_task *ctask; /* current cmd in progress */ + + /* copied and flipped values */ + int opcode; + int flags; + int cmd_status; + int ahslen; + int datalen; + uint32_t itt; + int datadgst; +}; + +struct iscsi_conn { + struct iscsi_hdr hdr; /* header placeholder */ + char hdrext[4*sizeof(__u16) + + sizeof(__u32)]; + int data_copied; + char *data; /* data placeholder */ + struct socket *sock; /* TCP socket */ + int data_size; /* actual recv_dlength */ + int stop_stage; /* conn_stop() flag: * + * stop to recover, * + * stop to terminate */ + /* iSCSI connection-wide sequencing */ + uint32_t exp_statsn; + int hdr_size; /* PDU header size */ + unsigned long suspend_rx; /* suspend Rx */ + + struct crypto_tfm *rx_tfm; /* CRC32C (Rx) */ + struct crypto_tfm *data_rx_tfm; /* CRC32C (Rx) for data */ + + /* control data */ + int senselen; /* scsi sense length */ + int id; /* CID */ + struct iscsi_tcp_recv in; /* TCP receive context */ + struct iscsi_session *session; /* parent session */ + struct list_head item; /* maintains list of conns */ + int in_progress; /* connection state machine */ + int c_stage; /* connection state */ + struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */ + struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */ + struct iscsi_cmd_task *ctask; /* xmit ctask in progress */ + spinlock_t lock; /* FIXME: to be removed */ + + /* old values for socket callbacks */ + void (*old_data_ready)(struct sock *, int); + void (*old_state_change)(struct sock *); + void (*old_write_space)(struct sock *); + + /* xmit */ + struct crypto_tfm *tx_tfm; /* CRC32C (Tx) */ + struct crypto_tfm *data_tx_tfm; /* CRC32C (Tx) for data */ + struct kfifo *writequeue; /* write cmds for Data-Outs */ + struct kfifo *immqueue; /* immediate xmit queue */ + struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */ + struct kfifo *xmitqueue; /* data-path cmd queue */ + struct work_struct xmitwork; /* per-conn. xmit workqueue */ + struct semaphore xmitsema; /* serializes connection xmit, + * access to kfifos: * + * xmitqueue, writequeue, * + * immqueue, mgmtqueue */ + unsigned long suspend_tx; /* suspend Tx */ + + /* abort */ + wait_queue_head_t ehwait; /* used in eh_abort() */ + struct iscsi_tm tmhdr; + struct timer_list tmabort_timer; /* abort timer */ + int tmabort_state; /* see TMABORT_INITIAL, etc.*/ + + /* negotiated params */ + int max_recv_dlength; + int max_xmit_dlength; + int hdrdgst_en; + int datadgst_en; + + /* MIB-statistics */ + uint64_t txdata_octets; + uint64_t rxdata_octets; + uint32_t scsicmd_pdus_cnt; + uint32_t dataout_pdus_cnt; + uint32_t scsirsp_pdus_cnt; + uint32_t datain_pdus_cnt; + uint32_t r2t_pdus_cnt; + uint32_t tmfcmd_pdus_cnt; + int32_t tmfrsp_pdus_cnt; + + /* custom statistics */ + uint32_t sendpage_failures_cnt; + uint32_t discontiguous_hdr_cnt; + uint32_t eh_abort_cnt; +}; + +struct iscsi_session { + /* iSCSI session-wide sequencing */ + uint32_t cmdsn; + uint32_t exp_cmdsn; + uint32_t max_cmdsn; + + /* configuration */ + int initial_r2t_en; + int max_r2t; + int imm_data_en; + int first_burst; + int max_burst; + int time2wait; + int time2retain; + int pdu_inorder_en; + int dataseq_inorder_en; + int erl; + int ifmarker_en; + int ofmarker_en; + + /* control data */ + struct Scsi_Host *host; + int id; + struct iscsi_conn *leadconn; /* leading connection */ + spinlock_t lock; /* protects session state, * + * sequence numbers, * + * session resources: * + * - cmdpool, * + * - mgmtpool, * + * - r2tpool */ + int state; /* session state */ + struct list_head item; + void *auth_client; + int conn_cnt; + int age; /* counts session re-opens */ + + struct list_head connections; /* list of connections */ + int cmds_max; /* size of cmds array */ + struct iscsi_cmd_task **cmds; /* Original Cmds arr */ + struct iscsi_queue cmdpool; /* PDU's pool */ + int mgmtpool_max; /* size of mgmt array */ + struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */ + struct iscsi_queue mgmtpool; /* Mgmt PDU's pool */ +}; + +struct iscsi_buf { + struct scatterlist sg; + struct kvec iov; + unsigned int sent; +}; + +struct iscsi_data_task { + struct iscsi_data hdr; /* PDU */ + char hdrext[sizeof(__u32)]; /* Header-Digest */ + struct list_head item; /* data queue item */ + struct iscsi_buf digestbuf; /* digest buffer */ + uint32_t digest; /* data digest */ +}; +#define ISCSI_DTASK_DEFAULT_MAX ISCSI_SG_TABLESIZE * PAGE_SIZE / 512 + +struct iscsi_mgmt_task { + struct iscsi_hdr hdr; /* mgmt. PDU */ + char hdrext[sizeof(__u32)]; /* Header-Digest */ + char *data; /* mgmt payload */ + int xmstate; /* mgmt xmit progress */ + int data_count; /* counts data to be sent */ + struct iscsi_buf headbuf; /* header buffer */ + struct iscsi_buf sendbuf; /* in progress buffer */ + int sent; + uint32_t itt; /* this ITT */ +}; + +struct iscsi_r2t_info { + __be32 ttt; /* copied from R2T */ + __be32 exp_statsn; /* copied from R2T */ + uint32_t data_length; /* copied from R2T */ + uint32_t data_offset; /* copied from R2T */ + struct iscsi_buf headbuf; /* Data-Out Header Buffer */ + struct iscsi_buf sendbuf; /* Data-Out in progress buffer*/ + int sent; /* R2T sequence progress */ + int data_count; /* DATA-Out payload progress */ + struct scatterlist *sg; /* per-R2T SG list */ + int solicit_datasn; + struct iscsi_data_task *dtask; /* which data task */ +}; + +struct iscsi_cmd_task { + struct iscsi_cmd hdr; /* iSCSI PDU header */ + char hdrext[4*sizeof(__u16)+ /* AHS */ + sizeof(__u32)]; /* HeaderDigest */ + char pad[ISCSI_PAD_LEN]; + int itt; /* this ITT */ + int datasn; /* DataSN */ + struct iscsi_buf headbuf; /* header buf (xmit) */ + struct iscsi_buf sendbuf; /* in progress buffer*/ + int sent; + struct scatterlist *sg; /* per-cmd SG list */ + struct scatterlist *bad_sg; /* assert statement */ + int sg_count; /* SG's to process */ + uint32_t unsol_datasn; + uint32_t exp_r2tsn; + int xmstate; /* xmit xtate machine */ + int imm_count; /* imm-data (bytes) */ + int unsol_count; /* unsolicited (bytes)*/ + int r2t_data_count; /* R2T Data-Out bytes */ + int data_count; /* remaining Data-Out */ + int pad_count; /* padded bytes */ + struct scsi_cmnd *sc; /* associated SCSI cmd*/ + int total_length; + int data_offset; + struct iscsi_conn *conn; /* used connection */ + struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */ + + struct iscsi_r2t_info *r2t; /* in progress R2T */ + struct iscsi_queue r2tpool; + struct kfifo *r2tqueue; + struct iscsi_r2t_info **r2ts; + struct list_head dataqueue; /* Data-Out dataqueue */ + mempool_t *datapool; + uint32_t datadigest; /* for recover digest */ + int digest_count; + uint32_t immdigest; /* for imm data */ + struct iscsi_buf immbuf; /* for imm data digest */ + struct iscsi_data_task *dtask; /* data task in progress*/ + int digest_offset; /* for partial buff digest */ +}; + +#endif /* ISCSI_H */ diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index 519486d24b28..9a4f576c0d5e 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c @@ -3481,8 +3481,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd) **---------------------------------------------------- */ if (np->settle_time && cmd->timeout_per_command >= HZ) { - u_long tlimit = ktime_get(cmd->timeout_per_command - HZ); - if (ktime_dif(np->settle_time, tlimit) > 0) + u_long tlimit = jiffies + cmd->timeout_per_command - HZ; + if (time_after(np->settle_time, tlimit)) np->settle_time = tlimit; } @@ -3516,7 +3516,7 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd) ** Force ordered tag if necessary to avoid timeouts ** and to preserve interactivity. */ - if (lp && ktime_exp(lp->tags_stime)) { + if (lp && time_after(jiffies, lp->tags_stime)) { if (lp->tags_smap) { order = M_ORDERED_TAG; if ((DEBUG_FLAGS & DEBUG_TAGS)||bootverbose>2){ @@ -3524,7 +3524,7 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd) "ordered tag forced.\n"); } } - lp->tags_stime = ktime_get(3*HZ); + lp->tags_stime = jiffies + 3*HZ; lp->tags_smap = lp->tags_umap; } @@ -3792,7 +3792,7 @@ static int ncr_reset_scsi_bus(struct ncb *np, int enab_int, int settle_delay) u32 term; int retv = 0; - np->settle_time = ktime_get(settle_delay * HZ); + np->settle_time = jiffies + settle_delay * HZ; if (bootverbose > 1) printk("%s: resetting, " @@ -5044,7 +5044,7 @@ static void ncr_setup_tags (struct ncb *np, struct scsi_device *sdev) static void ncr_timeout (struct ncb *np) { - u_long thistime = ktime_get(0); + u_long thistime = jiffies; /* ** If release process in progress, let's go @@ -5057,7 +5057,7 @@ static void ncr_timeout (struct ncb *np) return; } - np->timer.expires = ktime_get(SCSI_NCR_TIMER_INTERVAL); + np->timer.expires = jiffies + SCSI_NCR_TIMER_INTERVAL; add_timer(&np->timer); /* @@ -5336,8 +5336,8 @@ void ncr_exception (struct ncb *np) **========================================================= */ - if (ktime_exp(np->regtime)) { - np->regtime = ktime_get(10*HZ); + if (time_after(jiffies, np->regtime)) { + np->regtime = jiffies + 10*HZ; for (i = 0; i<sizeof(np->regdump); i++) ((char*)&np->regdump)[i] = INB_OFF(i); np->regdump.nc_dstat = dstat; @@ -5453,7 +5453,7 @@ static int ncr_int_sbmc (struct ncb *np) ** Suspend command processing for 1 second and ** reinitialize all except the chip. */ - np->settle_time = ktime_get(1*HZ); + np->settle_time = jiffies + HZ; ncr_init (np, 0, bootverbose ? "scsi mode change" : NULL, HS_RESET); return 1; } @@ -6923,7 +6923,7 @@ static struct lcb *ncr_setup_lcb (struct ncb *np, struct scsi_device *sdev) for (i = 0 ; i < MAX_TAGS ; i++) lp->cb_tags[i] = i; lp->maxnxs = MAX_TAGS; - lp->tags_stime = ktime_get(3*HZ); + lp->tags_stime = jiffies + 3*HZ; ncr_setup_tags (np, sdev); } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 8bb8222ea589..d010aeda9d62 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1,8 +1,10 @@ -/* +/* * iSCSI transport class definitions * * Copyright (C) IBM Corporation, 2004 - * Copyright (C) Mike Christie, 2004 + * Copyright (C) Mike Christie, 2004 - 2005 + * Copyright (C) Dmitry Yusupov, 2004 - 2005 + * Copyright (C) Alex Aizman, 2004 - 2005 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,370 +21,1254 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> +#include <linux/mempool.h> +#include <net/tcp.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_iscsi.h> +#include <scsi/iscsi_if.h> -#define ISCSI_SESSION_ATTRS 20 -#define ISCSI_HOST_ATTRS 2 +#define ISCSI_SESSION_ATTRS 8 +#define ISCSI_CONN_ATTRS 6 struct iscsi_internal { struct scsi_transport_template t; - struct iscsi_function_template *fnt; + struct iscsi_transport *iscsi_transport; + struct list_head list; + /* + * List of sessions for this transport + */ + struct list_head sessions; + /* + * lock to serialize access to the sessions list which must + * be taken after the rx_queue_sema + */ + spinlock_t session_lock; + /* + * based on transport capabilities, at register time we set these + * bits to tell the transport class it wants attributes displayed + * in sysfs or that it can support different iSCSI Data-Path + * capabilities + */ + uint32_t param_mask; + + struct class_device cdev; /* * We do not have any private or other attrs. */ + struct transport_container conn_cont; + struct class_device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1]; + struct transport_container session_cont; struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1]; - struct class_device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1]; }; -#define to_iscsi_internal(tmpl) container_of(tmpl, struct iscsi_internal, t) +/* + * list of registered transports and lock that must + * be held while accessing list. The iscsi_transport_lock must + * be acquired after the rx_queue_sema. + */ +static LIST_HEAD(iscsi_transports); +static DEFINE_SPINLOCK(iscsi_transport_lock); + +#define to_iscsi_internal(tmpl) \ + container_of(tmpl, struct iscsi_internal, t) + +#define cdev_to_iscsi_internal(_cdev) \ + container_of(_cdev, struct iscsi_internal, cdev) + +static void iscsi_transport_release(struct class_device *cdev) +{ + struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev); + kfree(priv); +} -static DECLARE_TRANSPORT_CLASS(iscsi_transport_class, - "iscsi_transport", +/* + * iscsi_transport_class represents the iscsi_transports that are + * registered. + */ +static struct class iscsi_transport_class = { + .name = "iscsi_transport", + .release = iscsi_transport_release, +}; + +static ssize_t +show_transport_handle(struct class_device *cdev, char *buf) +{ + struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev); + return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport)); +} +static CLASS_DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL); + +#define show_transport_attr(name, format) \ +static ssize_t \ +show_transport_##name(struct class_device *cdev, char *buf) \ +{ \ + struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev); \ + return sprintf(buf, format"\n", priv->iscsi_transport->name); \ +} \ +static CLASS_DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL); + +show_transport_attr(caps, "0x%x"); +show_transport_attr(max_lun, "%d"); +show_transport_attr(max_conn, "%d"); +show_transport_attr(max_cmd_len, "%d"); + +static struct attribute *iscsi_transport_attrs[] = { + &class_device_attr_handle.attr, + &class_device_attr_caps.attr, + &class_device_attr_max_lun.attr, + &class_device_attr_max_conn.attr, + &class_device_attr_max_cmd_len.attr, + NULL, +}; + +static struct attribute_group iscsi_transport_group = { + .attrs = iscsi_transport_attrs, +}; + +static DECLARE_TRANSPORT_CLASS(iscsi_session_class, + "iscsi_session", NULL, NULL, NULL); -static DECLARE_TRANSPORT_CLASS(iscsi_host_class, - "iscsi_host", +static DECLARE_TRANSPORT_CLASS(iscsi_connection_class, + "iscsi_connection", NULL, NULL, NULL); + +static struct sock *nls; +static int daemon_pid; +static DECLARE_MUTEX(rx_queue_sema); + +struct mempool_zone { + mempool_t *pool; + atomic_t allocated; + int size; + int hiwat; + struct list_head freequeue; + spinlock_t freelock; +}; + +static struct mempool_zone z_reply; + /* - * iSCSI target and session attrs + * Z_MAX_* - actual mempool size allocated at the mempool_zone_init() time + * Z_HIWAT_* - zone's high watermark when if_error bit will be set to -ENOMEM + * so daemon will notice OOM on NETLINK tranposrt level and will + * be able to predict or change operational behavior */ -#define iscsi_session_show_fn(field, format) \ - \ -static ssize_t \ -show_session_##field(struct class_device *cdev, char *buf) \ -{ \ - struct scsi_target *starget = transport_class_to_starget(cdev); \ - struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ - struct iscsi_internal *i = to_iscsi_internal(shost->transportt); \ - \ - if (i->fnt->get_##field) \ - i->fnt->get_##field(starget); \ - return snprintf(buf, 20, format"\n", iscsi_##field(starget)); \ +#define Z_MAX_REPLY 8 +#define Z_HIWAT_REPLY 6 +#define Z_MAX_PDU 8 +#define Z_HIWAT_PDU 6 +#define Z_MAX_ERROR 16 +#define Z_HIWAT_ERROR 12 + +struct iscsi_if_conn { + struct list_head conn_list; /* item in connlist */ + struct list_head session_list; /* item in session->connections */ + iscsi_connh_t connh; + int active; /* must be accessed with the connlock */ + struct Scsi_Host *host; /* originated shost */ + struct device dev; /* sysfs transport/container device */ + struct iscsi_transport *transport; + struct mempool_zone z_error; + struct mempool_zone z_pdu; + struct list_head freequeue; +}; + +#define iscsi_dev_to_if_conn(_dev) \ + container_of(_dev, struct iscsi_if_conn, dev) + +#define iscsi_cdev_to_if_conn(_cdev) \ + iscsi_dev_to_if_conn(_cdev->dev) + +static LIST_HEAD(connlist); +static DEFINE_SPINLOCK(connlock); + +struct iscsi_if_session { + struct list_head list; /* item in session_list */ + struct list_head connections; + iscsi_sessionh_t sessionh; + struct iscsi_transport *transport; + struct device dev; /* sysfs transport/container device */ +}; + +#define iscsi_dev_to_if_session(_dev) \ + container_of(_dev, struct iscsi_if_session, dev) + +#define iscsi_cdev_to_if_session(_cdev) \ + iscsi_dev_to_if_session(_cdev->dev) + +#define iscsi_if_session_to_shost(_session) \ + dev_to_shost(_session->dev.parent) + +static struct iscsi_if_conn* +iscsi_if_find_conn(uint64_t key) +{ + unsigned long flags; + struct iscsi_if_conn *conn; + + spin_lock_irqsave(&connlock, flags); + list_for_each_entry(conn, &connlist, conn_list) + if (conn->connh == key) { + spin_unlock_irqrestore(&connlock, flags); + return conn; + } + spin_unlock_irqrestore(&connlock, flags); + return NULL; } -#define iscsi_session_rd_attr(field, format) \ - iscsi_session_show_fn(field, format) \ -static CLASS_DEVICE_ATTR(field, S_IRUGO, show_session_##field, NULL); +static struct iscsi_internal * +iscsi_if_transport_lookup(struct iscsi_transport *tt) +{ + struct iscsi_internal *priv; + unsigned long flags; + + spin_lock_irqsave(&iscsi_transport_lock, flags); + list_for_each_entry(priv, &iscsi_transports, list) { + if (tt == priv->iscsi_transport) { + spin_unlock_irqrestore(&iscsi_transport_lock, flags); + return priv; + } + } + spin_unlock_irqrestore(&iscsi_transport_lock, flags); + return NULL; +} -iscsi_session_rd_attr(tpgt, "%hu"); -iscsi_session_rd_attr(tsih, "%2x"); -iscsi_session_rd_attr(max_recv_data_segment_len, "%u"); -iscsi_session_rd_attr(max_burst_len, "%u"); -iscsi_session_rd_attr(first_burst_len, "%u"); -iscsi_session_rd_attr(def_time2wait, "%hu"); -iscsi_session_rd_attr(def_time2retain, "%hu"); -iscsi_session_rd_attr(max_outstanding_r2t, "%hu"); -iscsi_session_rd_attr(erl, "%d"); +static inline struct list_head *skb_to_lh(struct sk_buff *skb) +{ + return (struct list_head *)&skb->cb; +} +static void* +mempool_zone_alloc_skb(unsigned int gfp_mask, void *pool_data) +{ + struct mempool_zone *zone = pool_data; -#define iscsi_session_show_bool_fn(field) \ - \ -static ssize_t \ -show_session_bool_##field(struct class_device *cdev, char *buf) \ -{ \ - struct scsi_target *starget = transport_class_to_starget(cdev); \ - struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ - struct iscsi_internal *i = to_iscsi_internal(shost->transportt); \ - \ - if (i->fnt->get_##field) \ - i->fnt->get_##field(starget); \ - \ - if (iscsi_##field(starget)) \ - return sprintf(buf, "Yes\n"); \ - return sprintf(buf, "No\n"); \ + return alloc_skb(zone->size, gfp_mask); } -#define iscsi_session_rd_bool_attr(field) \ - iscsi_session_show_bool_fn(field) \ -static CLASS_DEVICE_ATTR(field, S_IRUGO, show_session_bool_##field, NULL); +static void +mempool_zone_free_skb(void *element, void *pool_data) +{ + kfree_skb(element); +} -iscsi_session_rd_bool_attr(initial_r2t); -iscsi_session_rd_bool_attr(immediate_data); -iscsi_session_rd_bool_attr(data_pdu_in_order); -iscsi_session_rd_bool_attr(data_sequence_in_order); +static void +mempool_zone_complete(struct mempool_zone *zone) +{ + unsigned long flags; + struct list_head *lh, *n; -#define iscsi_session_show_digest_fn(field) \ - \ -static ssize_t \ -show_##field(struct class_device *cdev, char *buf) \ -{ \ - struct scsi_target *starget = transport_class_to_starget(cdev); \ - struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ - struct iscsi_internal *i = to_iscsi_internal(shost->transportt); \ - \ - if (i->fnt->get_##field) \ - i->fnt->get_##field(starget); \ - \ - if (iscsi_##field(starget)) \ - return sprintf(buf, "CRC32C\n"); \ - return sprintf(buf, "None\n"); \ + spin_lock_irqsave(&zone->freelock, flags); + list_for_each_safe(lh, n, &zone->freequeue) { + struct sk_buff *skb = (struct sk_buff *)((char *)lh - + offsetof(struct sk_buff, cb)); + if (!skb_shared(skb)) { + list_del(skb_to_lh(skb)); + mempool_free(skb, zone->pool); + atomic_dec(&zone->allocated); + } + } + spin_unlock_irqrestore(&zone->freelock, flags); } -#define iscsi_session_rd_digest_attr(field) \ - iscsi_session_show_digest_fn(field) \ -static CLASS_DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); +static int +mempool_zone_init(struct mempool_zone *zp, unsigned max, unsigned size, + unsigned hiwat) +{ + zp->pool = mempool_create(max, mempool_zone_alloc_skb, + mempool_zone_free_skb, zp); + if (!zp->pool) + return -ENOMEM; -iscsi_session_rd_digest_attr(header_digest); -iscsi_session_rd_digest_attr(data_digest); + zp->size = size; + zp->hiwat = hiwat; -static ssize_t -show_port(struct class_device *cdev, char *buf) + INIT_LIST_HEAD(&zp->freequeue); + spin_lock_init(&zp->freelock); + atomic_set(&zp->allocated, 0); + + return 0; +} + + +static struct sk_buff* +mempool_zone_get_skb(struct mempool_zone *zone) { - struct scsi_target *starget = transport_class_to_starget(cdev); - struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); - struct iscsi_internal *i = to_iscsi_internal(shost->transportt); + struct sk_buff *skb; + + skb = mempool_alloc(zone->pool, GFP_ATOMIC); + if (skb) + atomic_inc(&zone->allocated); + return skb; +} - if (i->fnt->get_port) - i->fnt->get_port(starget); +static int +iscsi_unicast_skb(struct mempool_zone *zone, struct sk_buff *skb) +{ + unsigned long flags; + int rc; - return snprintf(buf, 20, "%hu\n", ntohs(iscsi_port(starget))); + skb_get(skb); + rc = netlink_unicast(nls, skb, daemon_pid, MSG_DONTWAIT); + if (rc < 0) { + mempool_free(skb, zone->pool); + printk(KERN_ERR "iscsi: can not unicast skb (%d)\n", rc); + return rc; + } + + spin_lock_irqsave(&zone->freelock, flags); + list_add(skb_to_lh(skb), &zone->freequeue); + spin_unlock_irqrestore(&zone->freelock, flags); + + return 0; } -static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL); -static ssize_t -show_ip_address(struct class_device *cdev, char *buf) +int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, + char *data, uint32_t data_size) { - struct scsi_target *starget = transport_class_to_starget(cdev); - struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); - struct iscsi_internal *i = to_iscsi_internal(shost->transportt); + struct nlmsghdr *nlh; + struct sk_buff *skb; + struct iscsi_uevent *ev; + struct iscsi_if_conn *conn; + char *pdu; + int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) + + data_size); + + conn = iscsi_if_find_conn(connh); + BUG_ON(!conn); + + mempool_zone_complete(&conn->z_pdu); + + skb = mempool_zone_get_skb(&conn->z_pdu); + if (!skb) { + iscsi_conn_error(connh, ISCSI_ERR_CONN_FAILED); + printk(KERN_ERR "iscsi%d: can not deliver control PDU: OOM\n", + conn->host->host_no); + return -ENOMEM; + } - if (i->fnt->get_ip_address) - i->fnt->get_ip_address(starget); + nlh = __nlmsg_put(skb, daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); + ev = NLMSG_DATA(nlh); + memset(ev, 0, sizeof(*ev)); + ev->transport_handle = iscsi_handle(conn->transport); + ev->type = ISCSI_KEVENT_RECV_PDU; + if (atomic_read(&conn->z_pdu.allocated) >= conn->z_pdu.hiwat) + ev->iferror = -ENOMEM; + ev->r.recv_req.conn_handle = connh; + pdu = (char*)ev + sizeof(*ev); + memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); + memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); - if (iscsi_addr_type(starget) == AF_INET) - return sprintf(buf, "%u.%u.%u.%u\n", - NIPQUAD(iscsi_sin_addr(starget))); - else if(iscsi_addr_type(starget) == AF_INET6) - return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", - NIP6(iscsi_sin6_addr(starget))); - return -EINVAL; + return iscsi_unicast_skb(&conn->z_pdu, skb); } -static CLASS_DEVICE_ATTR(ip_address, S_IRUGO, show_ip_address, NULL); +EXPORT_SYMBOL_GPL(iscsi_recv_pdu); -static ssize_t -show_isid(struct class_device *cdev, char *buf) +void iscsi_conn_error(iscsi_connh_t connh, enum iscsi_err error) { - struct scsi_target *starget = transport_class_to_starget(cdev); - struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); - struct iscsi_internal *i = to_iscsi_internal(shost->transportt); + struct nlmsghdr *nlh; + struct sk_buff *skb; + struct iscsi_uevent *ev; + struct iscsi_if_conn *conn; + int len = NLMSG_SPACE(sizeof(*ev)); + + conn = iscsi_if_find_conn(connh); + BUG_ON(!conn); + + mempool_zone_complete(&conn->z_error); + + skb = mempool_zone_get_skb(&conn->z_error); + if (!skb) { + printk(KERN_ERR "iscsi%d: gracefully ignored conn error (%d)\n", + conn->host->host_no, error); + return; + } + + nlh = __nlmsg_put(skb, daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); + ev = NLMSG_DATA(nlh); + ev->transport_handle = iscsi_handle(conn->transport); + ev->type = ISCSI_KEVENT_CONN_ERROR; + if (atomic_read(&conn->z_error.allocated) >= conn->z_error.hiwat) + ev->iferror = -ENOMEM; + ev->r.connerror.error = error; + ev->r.connerror.conn_handle = connh; - if (i->fnt->get_isid) - i->fnt->get_isid(starget); + iscsi_unicast_skb(&conn->z_error, skb); - return sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", - iscsi_isid(starget)[0], iscsi_isid(starget)[1], - iscsi_isid(starget)[2], iscsi_isid(starget)[3], - iscsi_isid(starget)[4], iscsi_isid(starget)[5]); + printk(KERN_INFO "iscsi%d: detected conn error (%d)\n", + conn->host->host_no, error); +} +EXPORT_SYMBOL_GPL(iscsi_conn_error); + +static int +iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, + void *payload, int size) +{ + struct sk_buff *skb; + struct nlmsghdr *nlh; + int len = NLMSG_SPACE(size); + int flags = multi ? NLM_F_MULTI : 0; + int t = done ? NLMSG_DONE : type; + + mempool_zone_complete(&z_reply); + + skb = mempool_zone_get_skb(&z_reply); + /* + * FIXME: + * user is supposed to react on iferror == -ENOMEM; + * see iscsi_if_rx(). + */ + BUG_ON(!skb); + + nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0); + nlh->nlmsg_flags = flags; + memcpy(NLMSG_DATA(nlh), payload, size); + return iscsi_unicast_skb(&z_reply, skb); } -static CLASS_DEVICE_ATTR(isid, S_IRUGO, show_isid, NULL); /* - * This is used for iSCSI names. Normally, we follow - * the transport class convention of having the lld - * set the field, but in these cases the value is - * too large. + * iSCSI Session's hostdata organization: + * + * *------------------* <== host->hostdata + * | transport | + * |------------------| <== iscsi_hostdata(host->hostdata) + * | transport's data | + * |------------------| <== hostdata_session(host->hostdata) + * | interface's data | + * *------------------* */ -#define iscsi_session_show_str_fn(field) \ - \ -static ssize_t \ -show_session_str_##field(struct class_device *cdev, char *buf) \ -{ \ - ssize_t ret = 0; \ - struct scsi_target *starget = transport_class_to_starget(cdev); \ - struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ - struct iscsi_internal *i = to_iscsi_internal(shost->transportt); \ - \ - if (i->fnt->get_##field) \ - ret = i->fnt->get_##field(starget, buf, PAGE_SIZE); \ - return ret; \ + +#define hostdata_privsize(_t) (sizeof(unsigned long) + _t->hostdata_size + \ + _t->hostdata_size % sizeof(unsigned long) + \ + sizeof(struct iscsi_if_session)) + +#define hostdata_session(_hostdata) ((void*)_hostdata + sizeof(unsigned long) + \ + ((struct iscsi_transport *) \ + iscsi_ptr(*(uint64_t *)_hostdata))->hostdata_size) + +static void iscsi_if_session_dev_release(struct device *dev) +{ + struct iscsi_if_session *session = iscsi_dev_to_if_session(dev); + struct iscsi_transport *transport = session->transport; + struct Scsi_Host *shost = iscsi_if_session_to_shost(session); + struct iscsi_if_conn *conn, *tmp; + unsigned long flags; + + /* now free connections */ + spin_lock_irqsave(&connlock, flags); + list_for_each_entry_safe(conn, tmp, &session->connections, + session_list) { + list_del(&conn->session_list); + mempool_destroy(conn->z_pdu.pool); + mempool_destroy(conn->z_error.pool); + kfree(conn); + } + spin_unlock_irqrestore(&connlock, flags); + scsi_host_put(shost); + module_put(transport->owner); +} + +static int +iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) +{ + struct iscsi_transport *transport = priv->iscsi_transport; + struct iscsi_if_session *session; + struct Scsi_Host *shost; + unsigned long flags; + int error; + + if (!try_module_get(transport->owner)) + return -EPERM; + + shost = scsi_host_alloc(transport->host_template, + hostdata_privsize(transport)); + if (!shost) { + ev->r.c_session_ret.session_handle = iscsi_handle(NULL); + printk(KERN_ERR "iscsi: can not allocate SCSI host for " + "session\n"); + error = -ENOMEM; + goto out_module_put; + } + shost->max_id = 1; + shost->max_channel = 0; + shost->max_lun = transport->max_lun; + shost->max_cmd_len = transport->max_cmd_len; + shost->transportt = &priv->t; + + /* store struct iscsi_transport in hostdata */ + *(uint64_t*)shost->hostdata = ev->transport_handle; + + ev->r.c_session_ret.session_handle = transport->create_session( + ev->u.c_session.initial_cmdsn, shost); + if (ev->r.c_session_ret.session_handle == iscsi_handle(NULL)) { + error = 0; + goto out_host_put; + } + + /* host_no becomes assigned SID */ + ev->r.c_session_ret.sid = shost->host_no; + /* initialize session */ + session = hostdata_session(shost->hostdata); + INIT_LIST_HEAD(&session->connections); + INIT_LIST_HEAD(&session->list); + session->sessionh = ev->r.c_session_ret.session_handle; + session->transport = transport; + + error = scsi_add_host(shost, NULL); + if (error) + goto out_destroy_session; + + /* + * this is released in the dev's release function) + */ + scsi_host_get(shost); + snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u", shost->host_no); + session->dev.parent = &shost->shost_gendev; + session->dev.release = iscsi_if_session_dev_release; + error = device_register(&session->dev); + if (error) { + printk(KERN_ERR "iscsi: could not register session%d's dev\n", + shost->host_no); + goto out_remove_host; + } + transport_register_device(&session->dev); + + /* add this session to the list of active sessions */ + spin_lock_irqsave(&priv->session_lock, flags); + list_add(&session->list, &priv->sessions); + spin_unlock_irqrestore(&priv->session_lock, flags); + + return 0; + +out_remove_host: + scsi_remove_host(shost); +out_destroy_session: + transport->destroy_session(ev->r.c_session_ret.session_handle); + ev->r.c_session_ret.session_handle = iscsi_handle(NULL); +out_host_put: + scsi_host_put(shost); +out_module_put: + module_put(transport->owner); + return error; +} + +static int +iscsi_if_destroy_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) +{ + struct iscsi_transport *transport = priv->iscsi_transport; + struct Scsi_Host *shost; + struct iscsi_if_session *session; + unsigned long flags; + struct iscsi_if_conn *conn; + int error = 0; + + shost = scsi_host_lookup(ev->u.d_session.sid); + if (shost == ERR_PTR(-ENXIO)) + return -EEXIST; + session = hostdata_session(shost->hostdata); + + /* check if we have active connections */ + spin_lock_irqsave(&connlock, flags); + list_for_each_entry(conn, &session->connections, session_list) { + if (conn->active) { + printk(KERN_ERR "iscsi%d: can not destroy session: " + "has active connection (%p)\n", + shost->host_no, iscsi_ptr(conn->connh)); + spin_unlock_irqrestore(&connlock, flags); + error = EIO; + goto out_release_ref; + } + } + spin_unlock_irqrestore(&connlock, flags); + + scsi_remove_host(shost); + transport->destroy_session(ev->u.d_session.session_handle); + transport_unregister_device(&session->dev); + device_unregister(&session->dev); + + /* remove this session from the list of active sessions */ + spin_lock_irqsave(&priv->session_lock, flags); + list_del(&session->list); + spin_unlock_irqrestore(&priv->session_lock, flags); + + /* ref from host alloc */ + scsi_host_put(shost); +out_release_ref: + /* ref from host lookup */ + scsi_host_put(shost); + return error; +} + +static void iscsi_if_conn_dev_release(struct device *dev) +{ + struct iscsi_if_conn *conn = iscsi_dev_to_if_conn(dev); + struct Scsi_Host *shost = conn->host; + + scsi_host_put(shost); +} + +static int +iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) +{ + struct iscsi_if_session *session; + struct Scsi_Host *shost; + struct iscsi_if_conn *conn; + unsigned long flags; + int error; + + shost = scsi_host_lookup(ev->u.c_conn.sid); + if (shost == ERR_PTR(-ENXIO)) + return -EEXIST; + session = hostdata_session(shost->hostdata); + + conn = kmalloc(sizeof(struct iscsi_if_conn), GFP_KERNEL); + if (!conn) { + error = -ENOMEM; + goto out_release_ref; + } + memset(conn, 0, sizeof(struct iscsi_if_conn)); + INIT_LIST_HEAD(&conn->session_list); + INIT_LIST_HEAD(&conn->conn_list); + conn->host = shost; + conn->transport = transport; + + error = mempool_zone_init(&conn->z_pdu, Z_MAX_PDU, + NLMSG_SPACE(sizeof(struct iscsi_uevent) + + sizeof(struct iscsi_hdr) + + DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH), + Z_HIWAT_PDU); + if (error) { + printk(KERN_ERR "iscsi%d: can not allocate pdu zone for new " + "conn\n", shost->host_no); + goto out_free_conn; + } + error = mempool_zone_init(&conn->z_error, Z_MAX_ERROR, + NLMSG_SPACE(sizeof(struct iscsi_uevent)), + Z_HIWAT_ERROR); + if (error) { + printk(KERN_ERR "iscsi%d: can not allocate error zone for " + "new conn\n", shost->host_no); + goto out_free_pdu_pool; + } + + ev->r.handle = transport->create_conn(ev->u.c_conn.session_handle, + ev->u.c_conn.cid); + if (!ev->r.handle) { + error = -ENODEV; + goto out_free_error_pool; + } + + conn->connh = ev->r.handle; + + /* + * this is released in the dev's release function + */ + if (!scsi_host_get(shost)) + goto out_destroy_conn; + snprintf(conn->dev.bus_id, BUS_ID_SIZE, "connection%d:%u", + shost->host_no, ev->u.c_conn.cid); + conn->dev.parent = &session->dev; + conn->dev.release = iscsi_if_conn_dev_release; + error = device_register(&conn->dev); + if (error) { + printk(KERN_ERR "iscsi%d: could not register connections%u " + "dev\n", shost->host_no, ev->u.c_conn.cid); + goto out_release_parent_ref; + } + transport_register_device(&conn->dev); + + spin_lock_irqsave(&connlock, flags); + list_add(&conn->conn_list, &connlist); + list_add(&conn->session_list, &session->connections); + conn->active = 1; + spin_unlock_irqrestore(&connlock, flags); + + scsi_host_put(shost); + return 0; + +out_release_parent_ref: + scsi_host_put(shost); +out_destroy_conn: + transport->destroy_conn(ev->r.handle); +out_free_error_pool: + mempool_destroy(conn->z_error.pool); +out_free_pdu_pool: + mempool_destroy(conn->z_pdu.pool); +out_free_conn: + kfree(conn); +out_release_ref: + scsi_host_put(shost); + return error; } -#define iscsi_session_rd_str_attr(field) \ - iscsi_session_show_str_fn(field) \ -static CLASS_DEVICE_ATTR(field, S_IRUGO, show_session_str_##field, NULL); +static int +iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) +{ + unsigned long flags; + struct iscsi_if_conn *conn; + + conn = iscsi_if_find_conn(ev->u.d_conn.conn_handle); + if (!conn) + return -EEXIST; + + transport->destroy_conn(ev->u.d_conn.conn_handle); + + spin_lock_irqsave(&connlock, flags); + conn->active = 0; + list_del(&conn->conn_list); + spin_unlock_irqrestore(&connlock, flags); + + transport_unregister_device(&conn->dev); + device_unregister(&conn->dev); + return 0; +} + +static int +iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb, + struct nlmsghdr *nlh) +{ + struct iscsi_uevent *ev = NLMSG_DATA(nlh); + struct iscsi_stats *stats; + struct sk_buff *skbstat; + struct iscsi_if_conn *conn; + struct nlmsghdr *nlhstat; + struct iscsi_uevent *evstat; + int len = NLMSG_SPACE(sizeof(*ev) + + sizeof(struct iscsi_stats) + + sizeof(struct iscsi_stats_custom) * + ISCSI_STATS_CUSTOM_MAX); + int err = 0; + + conn = iscsi_if_find_conn(ev->u.get_stats.conn_handle); + if (!conn) + return -EEXIST; + + do { + int actual_size; + + mempool_zone_complete(&conn->z_pdu); + + skbstat = mempool_zone_get_skb(&conn->z_pdu); + if (!skbstat) { + printk(KERN_ERR "iscsi%d: can not deliver stats: OOM\n", + conn->host->host_no); + return -ENOMEM; + } + + nlhstat = __nlmsg_put(skbstat, daemon_pid, 0, 0, + (len - sizeof(*nlhstat)), 0); + evstat = NLMSG_DATA(nlhstat); + memset(evstat, 0, sizeof(*evstat)); + evstat->transport_handle = iscsi_handle(conn->transport); + evstat->type = nlh->nlmsg_type; + if (atomic_read(&conn->z_pdu.allocated) >= conn->z_pdu.hiwat) + evstat->iferror = -ENOMEM; + evstat->u.get_stats.conn_handle = + ev->u.get_stats.conn_handle; + stats = (struct iscsi_stats *) + ((char*)evstat + sizeof(*evstat)); + memset(stats, 0, sizeof(*stats)); + + transport->get_stats(ev->u.get_stats.conn_handle, stats); + actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) + + sizeof(struct iscsi_stats) + + sizeof(struct iscsi_stats_custom) * + stats->custom_length); + actual_size -= sizeof(*nlhstat); + actual_size = NLMSG_LENGTH(actual_size); + skb_trim(skb, NLMSG_ALIGN(actual_size)); + nlhstat->nlmsg_len = actual_size; + + err = iscsi_unicast_skb(&conn->z_pdu, skbstat); + } while (err < 0 && err != -ECONNREFUSED); + + return err; +} + +static int +iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) +{ + int err = 0; + struct iscsi_uevent *ev = NLMSG_DATA(nlh); + struct iscsi_transport *transport = NULL; + struct iscsi_internal *priv; + + if (NETLINK_CREDS(skb)->uid) + return -EPERM; + + priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); + if (!priv) + return -EINVAL; + transport = priv->iscsi_transport; + + daemon_pid = NETLINK_CREDS(skb)->pid; + + switch (nlh->nlmsg_type) { + case ISCSI_UEVENT_CREATE_SESSION: + err = iscsi_if_create_session(priv, ev); + break; + case ISCSI_UEVENT_DESTROY_SESSION: + err = iscsi_if_destroy_session(priv, ev); + break; + case ISCSI_UEVENT_CREATE_CONN: + err = iscsi_if_create_conn(transport, ev); + break; + case ISCSI_UEVENT_DESTROY_CONN: + err = iscsi_if_destroy_conn(transport, ev); + break; + case ISCSI_UEVENT_BIND_CONN: + if (!iscsi_if_find_conn(ev->u.b_conn.conn_handle)) + return -EEXIST; + ev->r.retcode = transport->bind_conn( + ev->u.b_conn.session_handle, + ev->u.b_conn.conn_handle, + ev->u.b_conn.transport_fd, + ev->u.b_conn.is_leading); + break; + case ISCSI_UEVENT_SET_PARAM: + if (!iscsi_if_find_conn(ev->u.set_param.conn_handle)) + return -EEXIST; + ev->r.retcode = transport->set_param( + ev->u.set_param.conn_handle, + ev->u.set_param.param, ev->u.set_param.value); + break; + case ISCSI_UEVENT_START_CONN: + if (!iscsi_if_find_conn(ev->u.start_conn.conn_handle)) + return -EEXIST; + ev->r.retcode = transport->start_conn( + ev->u.start_conn.conn_handle); + break; + case ISCSI_UEVENT_STOP_CONN: + if (!iscsi_if_find_conn(ev->u.stop_conn.conn_handle)) + return -EEXIST; + transport->stop_conn(ev->u.stop_conn.conn_handle, + ev->u.stop_conn.flag); + break; + case ISCSI_UEVENT_SEND_PDU: + if (!iscsi_if_find_conn(ev->u.send_pdu.conn_handle)) + return -EEXIST; + ev->r.retcode = transport->send_pdu( + ev->u.send_pdu.conn_handle, + (struct iscsi_hdr*)((char*)ev + sizeof(*ev)), + (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, + ev->u.send_pdu.data_size); + break; + case ISCSI_UEVENT_GET_STATS: + err = iscsi_if_get_stats(transport, skb, nlh); + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +/* Get message from skb (based on rtnetlink_rcv_skb). Each message is + * processed by iscsi_if_recv_msg. Malformed skbs with wrong length are + * discarded silently. */ +static void +iscsi_if_rx(struct sock *sk, int len) +{ + struct sk_buff *skb; -iscsi_session_rd_str_attr(target_name); -iscsi_session_rd_str_attr(target_alias); + down(&rx_queue_sema); + while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { + while (skb->len >= NLMSG_SPACE(0)) { + int err; + uint32_t rlen; + struct nlmsghdr *nlh; + struct iscsi_uevent *ev; + + nlh = (struct nlmsghdr *)skb->data; + if (nlh->nlmsg_len < sizeof(*nlh) || + skb->len < nlh->nlmsg_len) { + break; + } + ev = NLMSG_DATA(nlh); + rlen = NLMSG_ALIGN(nlh->nlmsg_len); + if (rlen > skb->len) + rlen = skb->len; + err = iscsi_if_recv_msg(skb, nlh); + if (err) { + ev->type = ISCSI_KEVENT_IF_ERROR; + ev->iferror = err; + } + do { + /* + * special case for GET_STATS: + * on success - sending reply and stats from + * inside of if_recv_msg(), + * on error - fall through. + */ + if (ev->type == ISCSI_UEVENT_GET_STATS && !err) + break; + err = iscsi_if_send_reply( + NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq, + nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); + if (atomic_read(&z_reply.allocated) >= + z_reply.hiwat) + ev->iferror = -ENOMEM; + } while (err < 0 && err != -ECONNREFUSED); + skb_pull(skb, rlen); + } + kfree_skb(skb); + } + up(&rx_queue_sema); +} /* - * iSCSI host attrs + * iSCSI connection attrs */ +#define iscsi_conn_int_attr_show(param, format) \ +static ssize_t \ +show_conn_int_param_##param(struct class_device *cdev, char *buf) \ +{ \ + uint32_t value = 0; \ + struct iscsi_if_conn *conn = iscsi_cdev_to_if_conn(cdev); \ + struct iscsi_internal *priv; \ + \ + priv = to_iscsi_internal(conn->host->transportt); \ + if (priv->param_mask & (1 << param)) \ + priv->iscsi_transport->get_param(conn->connh, param, &value); \ + return snprintf(buf, 20, format"\n", value); \ +} + +#define iscsi_conn_int_attr(field, param, format) \ + iscsi_conn_int_attr_show(param, format) \ +static CLASS_DEVICE_ATTR(field, S_IRUGO, show_conn_int_param_##param, NULL); + +iscsi_conn_int_attr(max_recv_dlength, ISCSI_PARAM_MAX_RECV_DLENGTH, "%u"); +iscsi_conn_int_attr(max_xmit_dlength, ISCSI_PARAM_MAX_XMIT_DLENGTH, "%u"); +iscsi_conn_int_attr(header_digest, ISCSI_PARAM_HDRDGST_EN, "%d"); +iscsi_conn_int_attr(data_digest, ISCSI_PARAM_DATADGST_EN, "%d"); +iscsi_conn_int_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN, "%d"); +iscsi_conn_int_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN, "%d"); /* - * Again, this is used for iSCSI names. Normally, we follow - * the transport class convention of having the lld set - * the field, but in these cases the value is too large. + * iSCSI session attrs */ -#define iscsi_host_show_str_fn(field) \ - \ +#define iscsi_session_int_attr_show(param, format) \ static ssize_t \ -show_host_str_##field(struct class_device *cdev, char *buf) \ +show_session_int_param_##param(struct class_device *cdev, char *buf) \ { \ - int ret = 0; \ - struct Scsi_Host *shost = transport_class_to_shost(cdev); \ - struct iscsi_internal *i = to_iscsi_internal(shost->transportt); \ + uint32_t value = 0; \ + struct iscsi_if_session *session = iscsi_cdev_to_if_session(cdev); \ + struct Scsi_Host *shost = iscsi_if_session_to_shost(session); \ + struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \ + struct iscsi_if_conn *conn = NULL; \ + unsigned long flags; \ + \ + spin_lock_irqsave(&connlock, flags); \ + if (!list_empty(&session->connections)) \ + conn = list_entry(session->connections.next, \ + struct iscsi_if_conn, session_list); \ + spin_unlock_irqrestore(&connlock, flags); \ \ - if (i->fnt->get_##field) \ - ret = i->fnt->get_##field(shost, buf, PAGE_SIZE); \ - return ret; \ + if (conn && (priv->param_mask & (1 << param))) \ + priv->iscsi_transport->get_param(conn->connh, param, &value);\ + return snprintf(buf, 20, format"\n", value); \ } -#define iscsi_host_rd_str_attr(field) \ - iscsi_host_show_str_fn(field) \ -static CLASS_DEVICE_ATTR(field, S_IRUGO, show_host_str_##field, NULL); +#define iscsi_session_int_attr(field, param, format) \ + iscsi_session_int_attr_show(param, format) \ +static CLASS_DEVICE_ATTR(field, S_IRUGO, show_session_int_param_##param, NULL); -iscsi_host_rd_str_attr(initiator_name); -iscsi_host_rd_str_attr(initiator_alias); +iscsi_session_int_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, "%d"); +iscsi_session_int_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, "%hu"); +iscsi_session_int_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, "%d"); +iscsi_session_int_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, "%u"); +iscsi_session_int_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, "%u"); +iscsi_session_int_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, "%d"); +iscsi_session_int_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, "%d"); +iscsi_session_int_attr(erl, ISCSI_PARAM_ERL, "%d"); -#define SETUP_SESSION_RD_ATTR(field) \ - if (i->fnt->show_##field) { \ - i->session_attrs[count] = &class_device_attr_##field; \ +#define SETUP_SESSION_RD_ATTR(field, param) \ + if (priv->param_mask & (1 << param)) { \ + priv->session_attrs[count] = &class_device_attr_##field;\ count++; \ } -#define SETUP_HOST_RD_ATTR(field) \ - if (i->fnt->show_##field) { \ - i->host_attrs[count] = &class_device_attr_##field; \ +#define SETUP_CONN_RD_ATTR(field, param) \ + if (priv->param_mask & (1 << param)) { \ + priv->conn_attrs[count] = &class_device_attr_##field; \ count++; \ } -static int iscsi_host_match(struct attribute_container *cont, - struct device *dev) +static int iscsi_is_session_dev(const struct device *dev) +{ + return dev->release == iscsi_if_session_dev_release; +} + +static int iscsi_session_match(struct attribute_container *cont, + struct device *dev) { + struct iscsi_if_session *session; struct Scsi_Host *shost; - struct iscsi_internal *i; + struct iscsi_internal *priv; + + if (!iscsi_is_session_dev(dev)) + return 0; - if (!scsi_is_host_device(dev)) + session = iscsi_dev_to_if_session(dev); + shost = iscsi_if_session_to_shost(session); + if (!shost->transportt) return 0; - shost = dev_to_shost(dev); - if (!shost->transportt || shost->transportt->host_attrs.ac.class - != &iscsi_host_class.class) + priv = to_iscsi_internal(shost->transportt); + if (priv->session_cont.ac.class != &iscsi_session_class.class) return 0; - i = to_iscsi_internal(shost->transportt); - - return &i->t.host_attrs.ac == cont; + return &priv->session_cont.ac == cont; } -static int iscsi_target_match(struct attribute_container *cont, - struct device *dev) +static int iscsi_is_conn_dev(const struct device *dev) { + return dev->release == iscsi_if_conn_dev_release; +} + +static int iscsi_conn_match(struct attribute_container *cont, + struct device *dev) +{ + struct iscsi_if_conn *conn; struct Scsi_Host *shost; - struct iscsi_internal *i; + struct iscsi_internal *priv; - if (!scsi_is_target_device(dev)) + if (!iscsi_is_conn_dev(dev)) return 0; - shost = dev_to_shost(dev->parent); - if (!shost->transportt || shost->transportt->host_attrs.ac.class - != &iscsi_host_class.class) + conn = iscsi_dev_to_if_conn(dev); + shost = conn->host; + if (!shost->transportt) return 0; - i = to_iscsi_internal(shost->transportt); - - return &i->t.target_attrs.ac == cont; -} - -struct scsi_transport_template * -iscsi_attach_transport(struct iscsi_function_template *fnt) -{ - struct iscsi_internal *i = kmalloc(sizeof(struct iscsi_internal), - GFP_KERNEL); - int count = 0; - - if (unlikely(!i)) - return NULL; - - memset(i, 0, sizeof(struct iscsi_internal)); - i->fnt = fnt; - - i->t.target_attrs.ac.attrs = &i->session_attrs[0]; - i->t.target_attrs.ac.class = &iscsi_transport_class.class; - i->t.target_attrs.ac.match = iscsi_target_match; - transport_container_register(&i->t.target_attrs); - i->t.target_size = sizeof(struct iscsi_class_session); - - SETUP_SESSION_RD_ATTR(tsih); - SETUP_SESSION_RD_ATTR(isid); - SETUP_SESSION_RD_ATTR(header_digest); - SETUP_SESSION_RD_ATTR(data_digest); - SETUP_SESSION_RD_ATTR(target_name); - SETUP_SESSION_RD_ATTR(target_alias); - SETUP_SESSION_RD_ATTR(port); - SETUP_SESSION_RD_ATTR(tpgt); - SETUP_SESSION_RD_ATTR(ip_address); - SETUP_SESSION_RD_ATTR(initial_r2t); - SETUP_SESSION_RD_ATTR(immediate_data); - SETUP_SESSION_RD_ATTR(max_recv_data_segment_len); - SETUP_SESSION_RD_ATTR(max_burst_len); - SETUP_SESSION_RD_ATTR(first_burst_len); - SETUP_SESSION_RD_ATTR(def_time2wait); - SETUP_SESSION_RD_ATTR(def_time2retain); - SETUP_SESSION_RD_ATTR(max_outstanding_r2t); - SETUP_SESSION_RD_ATTR(data_pdu_in_order); - SETUP_SESSION_RD_ATTR(data_sequence_in_order); - SETUP_SESSION_RD_ATTR(erl); + priv = to_iscsi_internal(shost->transportt); + if (priv->conn_cont.ac.class != &iscsi_connection_class.class) + return 0; - BUG_ON(count > ISCSI_SESSION_ATTRS); - i->session_attrs[count] = NULL; + return &priv->conn_cont.ac == cont; +} + +int iscsi_register_transport(struct iscsi_transport *tt) +{ + struct iscsi_internal *priv; + unsigned long flags; + int count = 0, err; + + BUG_ON(!tt); + + priv = iscsi_if_transport_lookup(tt); + if (priv) + return -EEXIST; + + priv = kmalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + memset(priv, 0, sizeof(*priv)); + INIT_LIST_HEAD(&priv->list); + INIT_LIST_HEAD(&priv->sessions); + spin_lock_init(&priv->session_lock); + priv->iscsi_transport = tt; + + priv->cdev.class = &iscsi_transport_class; + snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name); + err = class_device_register(&priv->cdev); + if (err) + goto free_priv; + + err = sysfs_create_group(&priv->cdev.kobj, &iscsi_transport_group); + if (err) + goto unregister_cdev; + + /* setup parameters mask */ + priv->param_mask = 0xFFFFFFFF; + if (!(tt->caps & CAP_MULTI_R2T)) + priv->param_mask &= ~(1 << ISCSI_PARAM_MAX_R2T); + if (!(tt->caps & CAP_HDRDGST)) + priv->param_mask &= ~(1 << ISCSI_PARAM_HDRDGST_EN); + if (!(tt->caps & CAP_DATADGST)) + priv->param_mask &= ~(1 << ISCSI_PARAM_DATADGST_EN); + if (!(tt->caps & CAP_MARKERS)) { + priv->param_mask &= ~(1 << ISCSI_PARAM_IFMARKER_EN); + priv->param_mask &= ~(1 << ISCSI_PARAM_OFMARKER_EN); + } - i->t.host_attrs.ac.attrs = &i->host_attrs[0]; - i->t.host_attrs.ac.class = &iscsi_host_class.class; - i->t.host_attrs.ac.match = iscsi_host_match; - transport_container_register(&i->t.host_attrs); - i->t.host_size = 0; + /* connection parameters */ + priv->conn_cont.ac.attrs = &priv->conn_attrs[0]; + priv->conn_cont.ac.class = &iscsi_connection_class.class; + priv->conn_cont.ac.match = iscsi_conn_match; + transport_container_register(&priv->conn_cont); + SETUP_CONN_RD_ATTR(max_recv_dlength, ISCSI_PARAM_MAX_RECV_DLENGTH); + SETUP_CONN_RD_ATTR(max_xmit_dlength, ISCSI_PARAM_MAX_XMIT_DLENGTH); + SETUP_CONN_RD_ATTR(header_digest, ISCSI_PARAM_HDRDGST_EN); + SETUP_CONN_RD_ATTR(data_digest, ISCSI_PARAM_DATADGST_EN); + SETUP_CONN_RD_ATTR(ifmarker, ISCSI_PARAM_IFMARKER_EN); + SETUP_CONN_RD_ATTR(ofmarker, ISCSI_PARAM_OFMARKER_EN); + + BUG_ON(count > ISCSI_CONN_ATTRS); + priv->conn_attrs[count] = NULL; count = 0; - SETUP_HOST_RD_ATTR(initiator_name); - SETUP_HOST_RD_ATTR(initiator_alias); - BUG_ON(count > ISCSI_HOST_ATTRS); - i->host_attrs[count] = NULL; + /* session parameters */ + priv->session_cont.ac.attrs = &priv->session_attrs[0]; + priv->session_cont.ac.class = &iscsi_session_class.class; + priv->session_cont.ac.match = iscsi_session_match; + transport_container_register(&priv->session_cont); + + SETUP_SESSION_RD_ATTR(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN); + SETUP_SESSION_RD_ATTR(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T); + SETUP_SESSION_RD_ATTR(immediate_data, ISCSI_PARAM_IMM_DATA_EN); + SETUP_SESSION_RD_ATTR(first_burst_len, ISCSI_PARAM_FIRST_BURST); + SETUP_SESSION_RD_ATTR(max_burst_len, ISCSI_PARAM_MAX_BURST); + SETUP_SESSION_RD_ATTR(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN); + SETUP_SESSION_RD_ATTR(data_seq_in_order,ISCSI_PARAM_DATASEQ_INORDER_EN) + SETUP_SESSION_RD_ATTR(erl, ISCSI_PARAM_ERL); + + BUG_ON(count > ISCSI_SESSION_ATTRS); + priv->session_attrs[count] = NULL; + + spin_lock_irqsave(&iscsi_transport_lock, flags); + list_add(&priv->list, &iscsi_transports); + spin_unlock_irqrestore(&iscsi_transport_lock, flags); + + printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name); + return 0; - return &i->t; +unregister_cdev: + class_device_unregister(&priv->cdev); +free_priv: + kfree(priv); + return err; } +EXPORT_SYMBOL_GPL(iscsi_register_transport); + +int iscsi_unregister_transport(struct iscsi_transport *tt) +{ + struct iscsi_internal *priv; + unsigned long flags; + + BUG_ON(!tt); + + down(&rx_queue_sema); + + priv = iscsi_if_transport_lookup(tt); + BUG_ON (!priv); + + spin_lock_irqsave(&priv->session_lock, flags); + if (!list_empty(&priv->sessions)) { + spin_unlock_irqrestore(&priv->session_lock, flags); + up(&rx_queue_sema); + return -EPERM; + } + spin_unlock_irqrestore(&priv->session_lock, flags); + + spin_lock_irqsave(&iscsi_transport_lock, flags); + list_del(&priv->list); + spin_unlock_irqrestore(&iscsi_transport_lock, flags); + + transport_container_unregister(&priv->conn_cont); + transport_container_unregister(&priv->session_cont); + + sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group); + class_device_unregister(&priv->cdev); + up(&rx_queue_sema); -EXPORT_SYMBOL(iscsi_attach_transport); + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_unregister_transport); -void iscsi_release_transport(struct scsi_transport_template *t) +static int +iscsi_rcv_nl_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct iscsi_internal *i = to_iscsi_internal(t); + struct netlink_notify *n = ptr; + + if (event == NETLINK_URELEASE && + n->protocol == NETLINK_ISCSI && n->pid) { + struct iscsi_if_conn *conn; + unsigned long flags; + + mempool_zone_complete(&z_reply); + spin_lock_irqsave(&connlock, flags); + list_for_each_entry(conn, &connlist, conn_list) { + mempool_zone_complete(&conn->z_error); + mempool_zone_complete(&conn->z_pdu); + } + spin_unlock_irqrestore(&connlock, flags); + } - transport_container_unregister(&i->t.target_attrs); - transport_container_unregister(&i->t.host_attrs); - - kfree(i); + return NOTIFY_DONE; } -EXPORT_SYMBOL(iscsi_release_transport); +static struct notifier_block iscsi_nl_notifier = { + .notifier_call = iscsi_rcv_nl_event, +}; static __init int iscsi_transport_init(void) { - int err = transport_class_register(&iscsi_transport_class); + int err; + err = class_register(&iscsi_transport_class); if (err) return err; - return transport_class_register(&iscsi_host_class); + + err = transport_class_register(&iscsi_connection_class); + if (err) + goto unregister_transport_class; + + err = transport_class_register(&iscsi_session_class); + if (err) + goto unregister_conn_class; + + err = netlink_register_notifier(&iscsi_nl_notifier); + if (err) + goto unregister_session_class; + + nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, + THIS_MODULE); + if (!nls) { + err = -ENOBUFS; + goto unregister_notifier; + } + + err = mempool_zone_init(&z_reply, Z_MAX_REPLY, + NLMSG_SPACE(sizeof(struct iscsi_uevent)), Z_HIWAT_REPLY); + if (!err) + return 0; + + sock_release(nls->sk_socket); +unregister_notifier: + netlink_unregister_notifier(&iscsi_nl_notifier); +unregister_session_class: + transport_class_unregister(&iscsi_session_class); +unregister_conn_class: + transport_class_unregister(&iscsi_connection_class); +unregister_transport_class: + class_unregister(&iscsi_transport_class); + return err; } static void __exit iscsi_transport_exit(void) { - transport_class_unregister(&iscsi_host_class); - transport_class_unregister(&iscsi_transport_class); + mempool_destroy(z_reply.pool); + sock_release(nls->sk_socket); + netlink_unregister_notifier(&iscsi_nl_notifier); + transport_class_unregister(&iscsi_connection_class); + transport_class_unregister(&iscsi_session_class); + class_unregister(&iscsi_transport_class); } module_init(iscsi_transport_init); module_exit(iscsi_transport_exit); -MODULE_AUTHOR("Mike Christie"); -MODULE_DESCRIPTION("iSCSI Transport Attributes"); +MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, " + "Dmitry Yusupov <dmitry_yus@yahoo.com>, " + "Alex Aizman <itn780@yahoo.com>"); +MODULE_DESCRIPTION("iSCSI Transport Interface"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/sym53c8xx_defs.h b/drivers/scsi/sym53c8xx_defs.h index 4c4ae7d4713f..139cd0e12e62 100644 --- a/drivers/scsi/sym53c8xx_defs.h +++ b/drivers/scsi/sym53c8xx_defs.h @@ -281,19 +281,6 @@ #endif /* -** These simple macros limit expression involving -** kernel time values (jiffies) to some that have -** chance not to be too much incorrect. :-) -*/ -#define ktime_get(o) (jiffies + (u_long) o) -#define ktime_exp(b) ((long)(jiffies) - (long)(b) >= 0) -#define ktime_dif(a, b) ((long)(a) - (long)(b)) -/* These ones are not used in this driver */ -#define ktime_add(a, o) ((a) + (u_long)(o)) -#define ktime_sub(a, o) ((a) - (u_long)(o)) - - -/* * IO functions definition for big/little endian CPU support. * For now, the NCR is only supported in little endian addressing mode, */ diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c index 9589c67de535..386bd6c67e73 100644 --- a/drivers/scsi/tmscsim.c +++ b/drivers/scsi/tmscsim.c @@ -988,7 +988,15 @@ din_1: if( residual ) { + static int feedback_requested; bval = DC390_read8 (ScsiFifo); /* get one residual byte */ + + if (!feedback_requested) { + feedback_requested = 1; + printk(KERN_WARNING "%s: Please, contact <linux-scsi@vger.kernel.org> " + "to help improve support for your system.\n", __FILE__); + } + ptr = (u8 *) bus_to_virt( pSRB->SGBusAddr ); *ptr = bval; pSRB->SGBusAddr++; xferCnt++; |