summaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c234
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c4
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c4
-rw-r--r--drivers/target/target_core_configfs.c2
-rw-r--r--drivers/target/target_core_fabric_configfs.c7
-rw-r--r--drivers/target/target_core_iblock.c8
-rw-r--r--drivers/target/target_core_pscsi.c8
-rw-r--r--drivers/target/target_core_transport.c39
-rw-r--r--drivers/target/target_core_user.c54
-rw-r--r--drivers/target/target_core_xcopy.c34
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c24
-rw-r--r--drivers/target/tcm_fc/tfc_io.c4
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c42
14 files changed, 167 insertions, 303 deletions
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 0ae0b131abfc..2fb1bf1a26c5 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -24,6 +24,7 @@
#include <net/ip6_route.h>
#include <net/addrconf.h>
+#include <libcxgb_cm.h>
#include "cxgbit.h"
#include "clip_tbl.h"
@@ -72,15 +73,6 @@ out:
return wr_waitp->ret;
}
-/* Returns whether a CPL status conveys negative advice.
- */
-static int cxgbit_is_neg_adv(unsigned int status)
-{
- return status == CPL_ERR_RTX_NEG_ADVICE ||
- status == CPL_ERR_PERSIST_NEG_ADVICE ||
- status == CPL_ERR_KEEPALV_NEG_ADVICE;
-}
-
static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
{
return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
@@ -623,21 +615,14 @@ void cxgbit_free_np(struct iscsi_np *np)
static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
- struct cpl_close_con_req *req;
- unsigned int len = roundup(sizeof(struct cpl_close_con_req), 16);
+ u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
- req = (struct cpl_close_con_req *)__skb_put(skb, len);
- memset(req, 0, len);
-
- set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
- INIT_TP_WR(req, csk->tid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
- csk->tid));
- req->rsvd = 0;
+ cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
+ NULL, NULL);
cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
__skb_queue_tail(&csk->txq, skb);
@@ -662,9 +647,8 @@ static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
{
- struct cpl_abort_req *req;
- unsigned int len = roundup(sizeof(*req), 16);
struct sk_buff *skb;
+ u32 len = roundup(sizeof(struct cpl_abort_req), 16);
pr_debug("%s: csk %p tid %u; state %d\n",
__func__, csk, csk->tid, csk->com.state);
@@ -675,15 +659,9 @@ static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
cxgbit_send_tx_flowc_wr(csk);
skb = __skb_dequeue(&csk->skbq);
- req = (struct cpl_abort_req *)__skb_put(skb, len);
- memset(req, 0, len);
+ cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
+ csk->com.cdev, cxgbit_abort_arp_failure);
- set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
- t4_set_arp_err_handler(skb, csk->com.cdev, cxgbit_abort_arp_failure);
- INIT_TP_WR(req, csk->tid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ,
- csk->tid));
- req->cmd = CPL_ABORT_SEND_RST;
return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
}
@@ -789,109 +767,6 @@ void _cxgbit_free_csk(struct kref *kref)
kfree(csk);
}
-static void
-cxgbit_get_tuple_info(struct cpl_pass_accept_req *req, int *iptype,
- __u8 *local_ip, __u8 *peer_ip, __be16 *local_port,
- __be16 *peer_port)
-{
- u32 eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
- u32 ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
- struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
- struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
- struct tcphdr *tcp = (struct tcphdr *)
- ((u8 *)(req + 1) + eth_len + ip_len);
-
- if (ip->version == 4) {
- pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n",
- __func__,
- ntohl(ip->saddr), ntohl(ip->daddr),
- ntohs(tcp->source),
- ntohs(tcp->dest));
- *iptype = 4;
- memcpy(peer_ip, &ip->saddr, 4);
- memcpy(local_ip, &ip->daddr, 4);
- } else {
- pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n",
- __func__,
- ip6->saddr.s6_addr, ip6->daddr.s6_addr,
- ntohs(tcp->source),
- ntohs(tcp->dest));
- *iptype = 6;
- memcpy(peer_ip, ip6->saddr.s6_addr, 16);
- memcpy(local_ip, ip6->daddr.s6_addr, 16);
- }
-
- *peer_port = tcp->source;
- *local_port = tcp->dest;
-}
-
-static int
-cxgbit_our_interface(struct cxgbit_device *cdev, struct net_device *egress_dev)
-{
- u8 i;
-
- egress_dev = cxgbit_get_real_dev(egress_dev);
- for (i = 0; i < cdev->lldi.nports; i++)
- if (cdev->lldi.ports[i] == egress_dev)
- return 1;
- return 0;
-}
-
-static struct dst_entry *
-cxgbit_find_route6(struct cxgbit_device *cdev, __u8 *local_ip, __u8 *peer_ip,
- __be16 local_port, __be16 peer_port, u8 tos,
- __u32 sin6_scope_id)
-{
- struct dst_entry *dst = NULL;
-
- if (IS_ENABLED(CONFIG_IPV6)) {
- struct flowi6 fl6;
-
- memset(&fl6, 0, sizeof(fl6));
- memcpy(&fl6.daddr, peer_ip, 16);
- memcpy(&fl6.saddr, local_ip, 16);
- if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
- fl6.flowi6_oif = sin6_scope_id;
- dst = ip6_route_output(&init_net, NULL, &fl6);
- if (!dst)
- goto out;
- if (!cxgbit_our_interface(cdev, ip6_dst_idev(dst)->dev) &&
- !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
- dst_release(dst);
- dst = NULL;
- }
- }
-out:
- return dst;
-}
-
-static struct dst_entry *
-cxgbit_find_route(struct cxgbit_device *cdev, __be32 local_ip, __be32 peer_ip,
- __be16 local_port, __be16 peer_port, u8 tos)
-{
- struct rtable *rt;
- struct flowi4 fl4;
- struct neighbour *n;
-
- rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip,
- local_ip,
- peer_port, local_port, IPPROTO_TCP,
- tos, 0);
- if (IS_ERR(rt))
- return NULL;
- n = dst_neigh_lookup(&rt->dst, &peer_ip);
- if (!n)
- return NULL;
- if (!cxgbit_our_interface(cdev, n->dev) &&
- !(n->dev->flags & IFF_LOOPBACK)) {
- neigh_release(n);
- dst_release(&rt->dst);
- return NULL;
- }
- neigh_release(n);
- return &rt->dst;
-}
-
static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
{
unsigned int linkspeed;
@@ -1072,21 +947,14 @@ int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
{
- struct cpl_tid_release *req;
- unsigned int len = roundup(sizeof(*req), 16);
+ u32 len = roundup(sizeof(struct cpl_tid_release), 16);
struct sk_buff *skb;
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
- req = (struct cpl_tid_release *)__skb_put(skb, len);
- memset(req, 0, len);
-
- INIT_TP_WR(req, tid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(
- CPL_TID_RELEASE, tid));
- set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+ cxgb_mk_tid_release(skb, len, tid, 0);
cxgbit_ofld_send(cdev, skb);
}
@@ -1108,20 +976,6 @@ cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
return ret < 0 ? ret : 0;
}
-static void
-cxgbit_best_mtu(const unsigned short *mtus, unsigned short mtu,
- unsigned int *idx, int use_ts, int ipv6)
-{
- unsigned short hdr_size = (ipv6 ? sizeof(struct ipv6hdr) :
- sizeof(struct iphdr)) +
- sizeof(struct tcphdr) +
- (use_ts ? round_up(TCPOLEN_TIMESTAMP,
- 4) : 0);
- unsigned short data_size = mtu - hdr_size;
-
- cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
-}
-
static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
{
if (csk->com.state != CSK_STATE_ESTABLISHED) {
@@ -1140,22 +994,18 @@ static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
- struct cpl_rx_data_ack *req;
- unsigned int len = roundup(sizeof(*req), 16);
+ u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
+ u32 credit_dack;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return -1;
- req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
- memset(req, 0, len);
+ credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
+ RX_CREDITS_V(csk->rx_credits);
- set_wr_txq(skb, CPL_PRIORITY_ACK, csk->ctrlq_idx);
- INIT_TP_WR(req, csk->tid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
- csk->tid));
- req->credit_dack = cpu_to_be32(RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
- RX_CREDITS_V(csk->rx_credits));
+ cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
+ credit_dack);
csk->rx_credits = 0;
@@ -1210,15 +1060,6 @@ out:
return -ENOMEM;
}
-static u32 cxgbit_compute_wscale(u32 win)
-{
- u32 wscale = 0;
-
- while (wscale < 14 && (65535 << wscale) < win)
- wscale++;
- return wscale;
-}
-
static void
cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
{
@@ -1246,10 +1087,10 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
INIT_TP_WR(rpl5, csk->tid);
OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
csk->tid));
- cxgbit_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
- req->tcpopt.tstamp,
- (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
- wscale = cxgbit_compute_wscale(csk->rcv_win);
+ cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
+ req->tcpopt.tstamp,
+ (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgb_compute_wscale(csk->rcv_win);
/*
* Specify the largest window that will fit in opt0. The
* remainder will be specified in the rx_data_ack.
@@ -1340,8 +1181,8 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
goto rel_skb;
}
- cxgbit_get_tuple_info(req, &iptype, local_ip, peer_ip,
- &local_port, &peer_port);
+ cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
+ peer_ip, &local_port, &peer_port);
/* Find output route */
if (iptype == 4) {
@@ -1350,21 +1191,23 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
, __func__, cnp, tid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
- dst = cxgbit_find_route(cdev, *(__be32 *)local_ip,
- *(__be32 *)peer_ip,
- local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
+ dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
+ *(__be32 *)local_ip,
+ *(__be32 *)peer_ip,
+ local_port, peer_port,
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
} else {
pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
"lport %d rport %d peer_mss %d\n"
, __func__, cnp, tid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
- dst = cxgbit_find_route6(cdev, local_ip, peer_ip,
- local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
- ((struct sockaddr_in6 *)
- &cnp->com.local_addr)->sin6_scope_id);
+ dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
+ local_ip, peer_ip,
+ local_port, peer_port,
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
+ ((struct sockaddr_in6 *)
+ &cnp->com.local_addr)->sin6_scope_id);
}
if (!dst) {
pr_err("%s - failed to find dst entry!\n",
@@ -1795,16 +1638,15 @@ static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
{
struct cpl_abort_req_rss *hdr = cplhdr(skb);
unsigned int tid = GET_TID(hdr);
- struct cpl_abort_rpl *rpl;
struct sk_buff *rpl_skb;
bool release = false;
bool wakeup_thread = false;
- unsigned int len = roundup(sizeof(*rpl), 16);
+ u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
pr_debug("%s: csk %p; tid %u; state %d\n",
__func__, csk, tid, csk->com.state);
- if (cxgbit_is_neg_adv(hdr->status)) {
+ if (cxgb_is_neg_adv(hdr->status)) {
pr_err("%s: got neg advise %d on tid %u\n",
__func__, hdr->status, tid);
goto rel_skb;
@@ -1839,14 +1681,8 @@ static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
cxgbit_send_tx_flowc_wr(csk);
rpl_skb = __skb_dequeue(&csk->skbq);
- set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
-
- rpl = (struct cpl_abort_rpl *)__skb_put(rpl_skb, len);
- memset(rpl, 0, len);
- INIT_TP_WR(rpl, csk->tid);
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
- rpl->cmd = CPL_ABORT_NO_RST;
+ cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
cxgbit_ofld_send(csk->com.cdev, rpl_skb);
if (wakeup_thread) {
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 27dd11aff934..96eedfc49c94 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -652,6 +652,10 @@ static struct iscsit_transport cxgbit_transport = {
static struct cxgb4_uld_info cxgbit_uld_info = {
.name = DRV_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ .ntxq = MAX_ULD_QSETS,
+ .rxq_size = 1024,
+ .lro = true,
.add = cxgbit_uld_add,
.state_change = cxgbit_uld_state_change,
.lro_rx_handler = cxgbit_uld_lro_rx_handler,
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 39b928c2849d..b7d747e92c7a 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1804,6 +1804,10 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* Otherwise, initiator is not expecting a NOPIN is response.
* Just ignore for now.
*/
+
+ if (cmd)
+ iscsit_free_cmd(cmd, false);
+
return 0;
}
EXPORT_SYMBOL(iscsit_process_nop_out);
@@ -2982,7 +2986,7 @@ iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
" StatSN: 0x%08x, Length %u\n", (nopout_response) ?
- "Solicitied" : "Unsolicitied", cmd->init_task_tag,
+ "Solicited" : "Unsolicited", cmd->init_task_tag,
cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
}
EXPORT_SYMBOL(iscsit_build_nopin_rsp);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index adf419fa4291..15f79a2ca34a 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -434,7 +434,7 @@ static int iscsi_login_zero_tsih_s2(
/*
* Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
- * Immediate Data + Unsolicitied Data-OUT if necessary..
+ * Immediate Data + Unsolicited Data-OUT if necessary..
*/
param = iscsi_find_param_from_key("MaxRecvDataSegmentLength",
conn->param_list);
@@ -646,7 +646,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
/*
- * FIXME: Unsolicitied NopIN support for ISER
+ * FIXME: Unsolicited NopIN support for ISER
*/
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
return;
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 2001005bef45..a35a347ec357 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -143,7 +143,7 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
pr_err("db_root: cannot open: %s\n", db_root_stage);
return -EINVAL;
}
- if (!S_ISDIR(fp->f_inode->i_mode)) {
+ if (!S_ISDIR(file_inode(fp)->i_mode)) {
filp_close(fp, 0);
mutex_unlock(&g_tf_lock);
pr_err("db_root: not a directory: %s\n", db_root_stage);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 31a096aa16ab..d8a16ca6baa5 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -137,7 +137,7 @@ static int target_fabric_mappedlun_link(
return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro);
}
-static int target_fabric_mappedlun_unlink(
+static void target_fabric_mappedlun_unlink(
struct config_item *lun_acl_ci,
struct config_item *lun_ci)
{
@@ -146,7 +146,7 @@ static int target_fabric_mappedlun_unlink(
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
- return core_dev_del_initiator_node_lun_acl(lun, lacl);
+ core_dev_del_initiator_node_lun_acl(lun, lacl);
}
static struct se_lun_acl *item_to_lun_acl(struct config_item *item)
@@ -669,7 +669,7 @@ out:
return ret;
}
-static int target_fabric_port_unlink(
+static void target_fabric_port_unlink(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
{
@@ -688,7 +688,6 @@ static int target_fabric_port_unlink(
}
core_dev_del_lun(se_tpg, lun);
- return 0;
}
static void target_fabric_port_release(struct config_item *item)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 372d744315f3..d316ed537d59 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -388,7 +388,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush;
bio->bi_bdev = ib_dev->ibd_bd;
- bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
if (!immed)
bio->bi_private = cmd;
submit_bio(bio);
@@ -686,15 +686,15 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
/*
- * Force writethrough using WRITE_FUA if a volatile write cache
+ * Force writethrough using REQ_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
op = REQ_OP_WRITE;
if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
if (cmd->se_cmd_flags & SCF_FUA)
- op_flags = WRITE_FUA;
+ op_flags = REQ_FUA;
else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
- op_flags = WRITE_FUA;
+ op_flags = REQ_FUA;
}
} else {
op = REQ_OP_READ;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 9125d9358dea..04d7aa7390d0 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -935,13 +935,9 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
bio, page, bytes, off);
- if (rc != bytes)
- goto fail;
-
pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
- bio->bi_vcnt, nr_vecs);
-
- if (bio->bi_vcnt > nr_vecs) {
+ bio_segments(bio), nr_vecs);
+ if (rc != bytes) {
pr_debug("PSCSI: Reached bio->bi_vcnt max:"
" %d i: %d bio: %p, allocating another"
" bio\n", bio->bi_vcnt, i, bio);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 6094a6beddde..7dfefd66df93 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -754,15 +754,7 @@ EXPORT_SYMBOL(target_complete_cmd);
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{
- if (scsi_status != SAM_STAT_GOOD) {
- return;
- }
-
- /*
- * Calculate new residual count based upon length of SCSI data
- * transferred.
- */
- if (length < cmd->data_length) {
+ if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
cmd->residual_count += cmd->data_length - length;
} else {
@@ -771,12 +763,6 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
}
cmd->data_length = length;
- } else if (length > cmd->data_length) {
- cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
- cmd->residual_count = length - cmd->data_length;
- } else {
- cmd->se_cmd_flags &= ~(SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT);
- cmd->residual_count = 0;
}
target_complete_cmd(cmd, scsi_status);
@@ -1706,6 +1692,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+ case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2547,8 +2534,12 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
* fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release.
*/
- if (ack_kref)
- kref_get(&se_cmd->cmd_kref);
+ if (ack_kref) {
+ if (!kref_get_unless_zero(&se_cmd->cmd_kref))
+ return -EINVAL;
+
+ se_cmd->se_cmd_flags |= SCF_ACK_KREF;
+ }
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (se_sess->sess_tearing_down) {
@@ -2627,7 +2618,7 @@ EXPORT_SYMBOL(target_put_sess_cmd);
*/
void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{
- struct se_cmd *se_cmd;
+ struct se_cmd *se_cmd, *tmp_cmd;
unsigned long flags;
int rc;
@@ -2639,14 +2630,16 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
se_sess->sess_tearing_down = 1;
list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
+ list_for_each_entry_safe(se_cmd, tmp_cmd,
+ &se_sess->sess_wait_list, se_cmd_list) {
rc = kref_get_unless_zero(&se_cmd->cmd_kref);
if (rc) {
se_cmd->cmd_wait_set = 1;
spin_lock(&se_cmd->t_state_lock);
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
spin_unlock(&se_cmd->t_state_lock);
- }
+ } else
+ list_del_init(&se_cmd->se_cmd_list);
}
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -2871,6 +2864,12 @@ static const struct sense_info sense_info_table[] = {
.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
.add_sector_info = true,
},
+ [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
+ .key = COPY_ABORTED,
+ .asc = 0x0d,
+ .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
+
+ },
[TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
/*
* Returning ILLEGAL REQUEST would cause immediate IO errors on
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 62bf4fe5704a..2b3c8564ace8 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -96,7 +96,7 @@ struct tcmu_dev {
size_t dev_size;
u32 cmdr_size;
u32 cmdr_last_cleaned;
- /* Offset of data ring from start of mb */
+ /* Offset of data area from start of mb */
/* Must add data_off and mb_addr to get the address */
size_t data_off;
size_t data_size;
@@ -147,8 +147,8 @@ static const struct genl_multicast_group tcmu_mcgrps[] = {
};
/* Our generic netlink family */
-static struct genl_family tcmu_genl_family = {
- .id = GENL_ID_GENERATE,
+static struct genl_family tcmu_genl_family __ro_after_init = {
+ .module = THIS_MODULE,
.hdrsize = 0,
.name = "TCM-USER",
.version = 1,
@@ -349,7 +349,7 @@ static inline size_t spc_bitmap_free(unsigned long *bitmap)
/*
* We can't queue a command until we have space available on the cmd ring *and*
- * space available on the data ring.
+ * space available on the data area.
*
* Called with ring lock held.
*/
@@ -389,7 +389,8 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
return true;
}
-static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+static sense_reason_t
+tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
@@ -405,7 +406,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
* Must be a certain minimum size for response sense info, but
@@ -432,11 +433,14 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
data_length += se_cmd->t_bidi_data_sg->length;
}
- if ((command_size > (udev->cmdr_size / 2))
- || data_length > udev->data_size)
- pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
- "cmd/data ring buffers\n", command_size, data_length,
+ if ((command_size > (udev->cmdr_size / 2)) ||
+ data_length > udev->data_size) {
+ pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
+ "cmd ring/data area\n", command_size, data_length,
udev->cmdr_size, udev->data_size);
+ spin_unlock_irq(&udev->cmdr_lock);
+ return TCM_INVALID_CDB_FIELD;
+ }
while (!is_ring_space_avail(udev, command_size, data_length)) {
int ret;
@@ -450,7 +454,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
finish_wait(&udev->wait_cmdr, &__wait);
if (!ret) {
pr_warn("tcmu: command timed out\n");
- return -ETIMEDOUT;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
spin_lock_irq(&udev->cmdr_lock);
@@ -487,9 +491,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
- /*
- * Fix up iovecs, and handle if allocation in data ring wrapped.
- */
+ /* Handle allocating space from the data area */
iov = &entry->req.iov[0];
iov_cnt = 0;
copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
@@ -526,10 +528,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
mod_timer(&udev->timeout,
round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
- return 0;
+ return TCM_NO_SENSE;
}
-static int tcmu_queue_cmd(struct se_cmd *se_cmd)
+static sense_reason_t
+tcmu_queue_cmd(struct se_cmd *se_cmd)
{
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
@@ -538,10 +541,10 @@ static int tcmu_queue_cmd(struct se_cmd *se_cmd)
tcmu_cmd = tcmu_alloc_cmd(se_cmd);
if (!tcmu_cmd)
- return -ENOMEM;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = tcmu_queue_cmd_ring(tcmu_cmd);
- if (ret < 0) {
+ if (ret != TCM_NO_SENSE) {
pr_err("TCMU: Could not queue command\n");
spin_lock_irq(&udev->commands_lock);
idr_remove(&udev->commands, tcmu_cmd->cmd_id);
@@ -561,7 +564,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
/*
* cmd has been completed already from timeout, just reclaim
- * data ring space and free cmd
+ * data area space and free cmd
*/
free_data_area(udev, cmd);
@@ -1129,20 +1132,9 @@ static sector_t tcmu_get_blocks(struct se_device *dev)
}
static sense_reason_t
-tcmu_pass_op(struct se_cmd *se_cmd)
-{
- int ret = tcmu_queue_cmd(se_cmd);
-
- if (ret != 0)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- else
- return TCM_NO_SENSE;
-}
-
-static sense_reason_t
tcmu_parse_cdb(struct se_cmd *cmd)
{
- return passthrough_parse_cdb(cmd, tcmu_pass_op);
+ return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
}
static const struct target_backend_ops tcmu_ops = {
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 75cd85426ae3..094a1440eacb 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -104,7 +104,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
}
mutex_unlock(&g_device_mutex);
- pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
return -EINVAL;
}
@@ -185,7 +185,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
struct xcopy_op *xop, unsigned char *p,
- unsigned short tdll)
+ unsigned short tdll, sense_reason_t *sense_ret)
{
struct se_device *local_dev = se_cmd->se_dev;
unsigned char *desc = p;
@@ -193,6 +193,8 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
unsigned short start = 0;
bool src = true;
+ *sense_ret = TCM_INVALID_PARAMETER_LIST;
+
if (offset != 0) {
pr_err("XCOPY target descriptor list length is not"
" multiple of %d\n", XCOPY_TARGET_DESC_LEN);
@@ -243,9 +245,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
else
rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
-
- if (rc < 0)
+ /*
+ * If a matching IEEE NAA 0x83 descriptor for the requested device
+ * is not located on this node, return COPY_ABORTED with ASQ/ASQC
+ * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
+ * initiator to fall back to normal copy method.
+ */
+ if (rc < 0) {
+ *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
goto out;
+ }
pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
xop->src_dev, &xop->src_tid_wwn[0]);
@@ -653,6 +662,7 @@ static int target_xcopy_read_source(
rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
remote_port, true);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
@@ -664,6 +674,7 @@ static int target_xcopy_read_source(
rc = target_xcopy_issue_pt_cmd(xpt_cmd);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
@@ -714,6 +725,7 @@ static int target_xcopy_write_destination(
remote_port, false);
if (rc < 0) {
struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
/*
* If the failure happened before the t_mem_list hand-off in
* target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
@@ -729,6 +741,7 @@ static int target_xcopy_write_destination(
rc = target_xcopy_issue_pt_cmd(xpt_cmd);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
transport_generic_free_cmd(se_cmd, 0);
return rc;
@@ -815,9 +828,14 @@ static void target_xcopy_do_work(struct work_struct *work)
out:
xcopy_pt_undepend_remotedev(xop);
kfree(xop);
-
- pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
- ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ /*
+ * Don't override an error scsi status if it has already been set
+ */
+ if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
+ pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
+ " CHECK_CONDITION -> sending response\n", rc);
+ ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ }
target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
}
@@ -875,7 +893,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
tdll, sdll, inline_dl);
- rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
+ rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
if (rc <= 0)
goto out;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 216e18cc9133..9af7842b8178 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -92,7 +92,7 @@ static void ft_free_cmd(struct ft_cmd *cmd)
fp = cmd->req_frame;
lport = fr_dev(fp);
if (fr_seq(fp))
- lport->tt.seq_release(fr_seq(fp));
+ fc_seq_release(fr_seq(fp));
fc_frame_free(fp);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
ft_sess_put(sess); /* undo get from lookup at recv */
@@ -161,11 +161,11 @@ int ft_queue_status(struct se_cmd *se_cmd)
/*
* Send response.
*/
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
- rc = lport->tt.seq_send(lport, cmd->seq, fp);
+ rc = fc_seq_send(lport, cmd->seq, fp);
if (rc) {
pr_info_ratelimited("%s: Failed to send response frame %p, "
"xid <0x%x>\n", __func__, fp, ep->xid);
@@ -177,7 +177,7 @@ int ft_queue_status(struct se_cmd *se_cmd)
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
return -ENOMEM;
}
- lport->tt.exch_done(cmd->seq);
+ fc_exch_done(cmd->seq);
/*
* Drop the extra ACK_KREF reference taken by target_submit_cmd()
* ahead of ft_check_stop_free() -> transport_generic_free_cmd()
@@ -221,7 +221,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
memset(txrdy, 0, sizeof(*txrdy));
txrdy->ft_burst_len = htonl(se_cmd->data_length);
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
@@ -242,7 +242,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
cmd->was_ddp_setup = 1;
}
}
- lport->tt.seq_send(lport, cmd->seq, fp);
+ fc_seq_send(lport, cmd->seq, fp);
return 0;
}
@@ -323,8 +323,8 @@ static void ft_send_resp_status(struct fc_lport *lport,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
sp = fr_seq(fp);
if (sp) {
- lport->tt.seq_send(lport, sp, fp);
- lport->tt.exch_done(sp);
+ fc_seq_send(lport, sp, fp);
+ fc_exch_done(sp);
} else {
lport->tt.frame_send(lport, fp);
}
@@ -461,7 +461,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
cmd->se_cmd.map_tag = tag;
cmd->sess = sess;
- cmd->seq = lport->tt.seq_assign(lport, fp);
+ cmd->seq = fc_seq_assign(lport, fp);
if (!cmd->seq) {
percpu_ida_free(&se_sess->sess_tag_pool, tag);
goto busy;
@@ -563,7 +563,7 @@ static void ft_send_work(struct work_struct *work)
task_attr = TCM_SIMPLE_TAG;
}
- fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
+ fc_seq_set_resp(cmd->seq, ft_recv_seq, cmd);
cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid;
/*
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
@@ -572,10 +572,10 @@ static void ft_send_work(struct work_struct *work)
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
ntohl(fcp->fc_dl), task_attr, data_dir,
- TARGET_SCF_ACK_KREF))
+ TARGET_SCF_ACK_KREF | TARGET_SCF_USE_CPUID))
goto err;
- pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
+ pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);
return;
err:
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 6f7c65abfe2a..1eb1f58e00e4 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -82,7 +82,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
remaining = se_cmd->data_length;
@@ -174,7 +174,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
f_ctl |= FC_FC_END_SEQ;
fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, fh_off);
- error = lport->tt.seq_send(lport, seq, fp);
+ error = fc_seq_send(lport, seq, fp);
if (error) {
pr_info_ratelimited("%s: Failed to send frame %p, "
"xid <0x%x>, remaining %zu, "
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 6ffbb603d912..fd5c3de79470 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -39,6 +39,11 @@
#include "tcm_fc.h"
+#define TFC_SESS_DBG(lport, fmt, args...) \
+ pr_debug("host%u: rport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (lport)->port_id, ##args )
+
static void ft_sess_delete_all(struct ft_tport *);
/*
@@ -167,24 +172,29 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
struct ft_tport *tport;
struct hlist_head *head;
struct ft_sess *sess;
+ char *reason = "no session created";
rcu_read_lock();
tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
- if (!tport)
+ if (!tport) {
+ reason = "not an FCP port";
goto out;
+ }
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
- pr_debug("port_id %x found %p\n", port_id, sess);
+ TFC_SESS_DBG(lport, "port_id %x found %p\n",
+ port_id, sess);
return sess;
}
}
out:
rcu_read_unlock();
- pr_debug("port_id %x not found\n", port_id);
+ TFC_SESS_DBG(lport, "port_id %x not found, %s\n",
+ port_id, reason);
return NULL;
}
@@ -195,7 +205,7 @@ static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
struct ft_tport *tport = sess->tport;
struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
- pr_debug("port_id %x sess %p\n", sess->port_id, sess);
+ TFC_SESS_DBG(tport->lport, "port_id %x sess %p\n", sess->port_id, sess);
hlist_add_head_rcu(&sess->hash, head);
tport->sess_count++;
@@ -223,7 +233,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
if (!sess)
- return NULL;
+ return ERR_PTR(-ENOMEM);
kref_init(&sess->kref); /* ref for table entry */
sess->tport = tport;
@@ -234,8 +244,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
TARGET_PROT_NORMAL, &initiatorname[0],
sess, ft_sess_alloc_cb);
if (IS_ERR(sess->se_sess)) {
+ int rc = PTR_ERR(sess->se_sess);
kfree(sess);
- return NULL;
+ sess = ERR_PTR(rc);
}
return sess;
}
@@ -319,7 +330,7 @@ void ft_sess_close(struct se_session *se_sess)
mutex_unlock(&ft_lport_lock);
return;
}
- pr_debug("port_id %x\n", port_id);
+ TFC_SESS_DBG(sess->tport->lport, "port_id %x close session\n", port_id);
ft_sess_unhash(sess);
mutex_unlock(&ft_lport_lock);
ft_close_sess(sess);
@@ -379,8 +390,13 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
if (!(fcp_parm & FCP_SPPF_INIT_FCN))
return FC_SPP_RESP_CONF;
sess = ft_sess_create(tport, rdata->ids.port_id, rdata);
- if (!sess)
- return FC_SPP_RESP_RES;
+ if (IS_ERR(sess)) {
+ if (PTR_ERR(sess) == -EACCES) {
+ spp->spp_flags &= ~FC_SPP_EST_IMG_PAIR;
+ return FC_SPP_RESP_CONF;
+ } else
+ return FC_SPP_RESP_RES;
+ }
if (!sess->params)
rdata->prli_count++;
sess->params = fcp_parm;
@@ -423,8 +439,8 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
mutex_lock(&ft_lport_lock);
ret = ft_prli_locked(rdata, spp_len, rspp, spp);
mutex_unlock(&ft_lport_lock);
- pr_debug("port_id %x flags %x ret %x\n",
- rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
+ TFC_SESS_DBG(rdata->local_port, "port_id %x flags %x ret %x\n",
+ rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
return ret;
}
@@ -477,11 +493,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
struct ft_sess *sess;
u32 sid = fc_frame_sid(fp);
- pr_debug("sid %x\n", sid);
+ TFC_SESS_DBG(lport, "recv sid %x\n", sid);
sess = ft_sess_get(lport, sid);
if (!sess) {
- pr_debug("sid %x sess lookup failed\n", sid);
+ TFC_SESS_DBG(lport, "sid %x sess lookup failed\n", sid);
/* TBD XXX - if FCP_CMND, send PRLO */
fc_frame_free(fp);
return;
OpenPOWER on IntegriCloud