target: Make all control CDBs scatter-gather

Previously, some control CDBs did not allocate memory in pages for their
data buffer, but just did a kmalloc. This patch makes all cdbs allocate
pages.

This has the benefit of streamlining some paths that had to behave
differently when we used two allocation methods. The downside is that
all accesses to the data buffer need to kmap it before use, and need to
handle data in page-sized chunks if more than a page is needed for a given
command's data buffer.

Finally, note that cdbs with no data buffers are handled a little
differently. Before, SCSI_NON_DATA_CDBs would not call get_mem at all
(they'd be in the final else in transport_allocate_resources) but now
these will make it into generic_get_mem, but just not allocate any
buffers.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
Andy Grover 2011-07-20 19:13:28 +00:00 committed by Nicholas Bellinger
parent e22a7f0752
commit 05d1c7c0d0
10 changed files with 219 additions and 226 deletions

View file

@ -65,10 +65,12 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
struct se_port *port;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char *buf = (unsigned char *)cmd->t_task_buf;
unsigned char *buf;
u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
Target port group descriptor */
buf = transport_kmap_first_data_page(cmd);
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
@ -141,6 +143,8 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
buf[2] = ((rd_len >> 8) & 0xff);
buf[3] = (rd_len & 0xff);
transport_kunmap_first_data_page(cmd);
return 0;
}
@ -157,14 +161,17 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
unsigned char *buf = (unsigned char *)cmd->t_task_buf;
unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
unsigned char *buf;
unsigned char *ptr;
u32 len = 4; /* Skip over RESERVED area in header */
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
if (!(l_port))
return PYX_TRANSPORT_LU_COMM_FAILURE;
buf = transport_kmap_first_data_page(cmd);
/*
* Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
* for the local tg_pt_gp.
@ -172,14 +179,16 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!(l_tg_pt_gp_mem)) {
printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
if (!(l_tg_pt_gp)) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@ -187,9 +196,12 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
if (!(rc)) {
printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
goto out;
}
ptr = &buf[4]; /* Skip over RESERVED area in header */
while (len < cmd->data_length) {
alua_access_state = (ptr[0] & 0x0f);
/*
@ -209,7 +221,8 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
rc = -1;
/*
@ -260,8 +273,10 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
* If not matching target port group ID can be located
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
if (rc != 0)
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
if (rc != 0) {
rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
} else {
/*
* Extact the RELATIVE TARGET PORT IDENTIFIER to identify
@ -295,14 +310,19 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
* be located, throw an exception with ASCQ:
* INVALID_PARAMETER_LIST
*/
if (rc != 0)
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
if (rc != 0) {
rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
}
ptr += 4;
len += 4;
}
out:
transport_kunmap_first_data_page(cmd);
return 0;
}

View file

@ -66,7 +66,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
{
struct se_lun *lun = cmd->se_lun;
struct se_device *dev = cmd->se_dev;
unsigned char *buf = cmd->t_task_buf;
unsigned char *buf;
/*
* Make sure we at least have 6 bytes of INQUIRY response
@ -78,6 +78,8 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
buf[0] = dev->transport->get_device_type(dev);
if (buf[0] == TYPE_TAPE)
buf[1] = 0x80;
@ -91,7 +93,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
if (cmd->data_length < 8) {
buf[4] = 1; /* Set additional length to 1 */
return 0;
goto out;
}
buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
@ -102,7 +104,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
*/
if (cmd->data_length < 36) {
buf[4] = 3; /* Set additional length to 3 */
return 0;
goto out;
}
snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
@ -111,6 +113,9 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
snprintf((unsigned char *)&buf[32], 4, "%s",
&dev->se_sub_dev->t10_wwn.revision[0]);
buf[4] = 31; /* Set additional length to 31 */
out:
transport_kunmap_first_data_page(cmd);
return 0;
}
@ -647,9 +652,9 @@ static int
target_emulate_inquiry(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned char *buf = cmd->t_task_buf;
unsigned char *buf;
unsigned char *cdb = cmd->t_task_cdb;
int p;
int p, ret;
if (!(cdb[1] & 0x1))
return target_emulate_inquiry_std(cmd);
@ -666,14 +671,20 @@ target_emulate_inquiry(struct se_cmd *cmd)
" too small for EVPD=1\n", cmd->data_length);
return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
buf[0] = dev->transport->get_device_type(dev);
for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
if (cdb[2] == evpd_handlers[p].page) {
buf[1] = cdb[2];
return evpd_handlers[p].emulate(cmd, buf);
ret = evpd_handlers[p].emulate(cmd, buf);
transport_kunmap_first_data_page(cmd);
return ret;
}
transport_kunmap_first_data_page(cmd);
printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
return -EINVAL;
}
@ -682,7 +693,7 @@ static int
target_emulate_readcapacity(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned char *buf = cmd->t_task_buf;
unsigned char *buf;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
u32 blocks;
@ -691,6 +702,8 @@ target_emulate_readcapacity(struct se_cmd *cmd)
else
blocks = (u32)blocks_long;
buf = transport_kmap_first_data_page(cmd);
buf[0] = (blocks >> 24) & 0xff;
buf[1] = (blocks >> 16) & 0xff;
buf[2] = (blocks >> 8) & 0xff;
@ -705,6 +718,8 @@ target_emulate_readcapacity(struct se_cmd *cmd)
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
put_unaligned_be32(0xFFFFFFFF, &buf[0]);
transport_kunmap_first_data_page(cmd);
return 0;
}
@ -712,9 +727,11 @@ static int
target_emulate_readcapacity_16(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned char *buf = cmd->t_task_buf;
unsigned char *buf;
unsigned long long blocks = dev->transport->get_blocks(dev);
buf = transport_kmap_first_data_page(cmd);
buf[0] = (blocks >> 56) & 0xff;
buf[1] = (blocks >> 48) & 0xff;
buf[2] = (blocks >> 40) & 0xff;
@ -734,6 +751,8 @@ target_emulate_readcapacity_16(struct se_cmd *cmd)
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
buf[14] = 0x80;
transport_kunmap_first_data_page(cmd);
return 0;
}
@ -848,7 +867,7 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
{
struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb;
unsigned char *rbuf = cmd->t_task_buf;
unsigned char *rbuf;
int type = dev->transport->get_device_type(dev);
int offset = (ten) ? 8 : 4;
int length = 0;
@ -911,7 +930,10 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
if ((offset + 1) > cmd->data_length)
offset = cmd->data_length;
}
rbuf = transport_kmap_first_data_page(cmd);
memcpy(rbuf, buf, offset);
transport_kunmap_first_data_page(cmd);
return 0;
}
@ -920,14 +942,18 @@ static int
target_emulate_request_sense(struct se_cmd *cmd)
{
unsigned char *cdb = cmd->t_task_cdb;
unsigned char *buf = cmd->t_task_buf;
unsigned char *buf;
u8 ua_asc = 0, ua_ascq = 0;
int err = 0;
if (cdb[1] & 0x01) {
printk(KERN_ERR "REQUEST_SENSE description emulation not"
" supported\n");
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
buf = transport_kmap_first_data_page(cmd);
if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
/*
* CURRENT ERROR, UNIT ATTENTION
@ -940,7 +966,8 @@ target_emulate_request_sense(struct se_cmd *cmd)
*/
if (cmd->data_length <= 18) {
buf[7] = 0x00;
return 0;
err = -EINVAL;
goto end;
}
/*
* The Additional Sense Code (ASC) from the UNIT ATTENTION
@ -960,7 +987,8 @@ target_emulate_request_sense(struct se_cmd *cmd)
*/
if (cmd->data_length <= 18) {
buf[7] = 0x00;
return 0;
err = -EINVAL;
goto end;
}
/*
* NO ADDITIONAL SENSE INFORMATION
@ -969,6 +997,9 @@ target_emulate_request_sense(struct se_cmd *cmd)
buf[7] = 0x0A;
}
end:
transport_kunmap_first_data_page(cmd);
return 0;
}
@ -981,11 +1012,11 @@ target_emulate_unmap(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned char *buf = cmd->t_task_buf, *ptr = NULL;
unsigned char *buf, *ptr = NULL;
unsigned char *cdb = &cmd->t_task_cdb[0];
sector_t lba;
unsigned int size = cmd->data_length, range;
int ret, offset;
int ret = 0, offset;
unsigned short dl, bd_dl;
/* First UNMAP block descriptor starts at 8 byte offset */
@ -993,6 +1024,9 @@ target_emulate_unmap(struct se_task *task)
size -= 8;
dl = get_unaligned_be16(&cdb[0]);
bd_dl = get_unaligned_be16(&cdb[2]);
buf = transport_kmap_first_data_page(cmd);
ptr = &buf[offset];
printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
@ -1007,7 +1041,7 @@ target_emulate_unmap(struct se_task *task)
if (ret < 0) {
printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
ret);
return ret;
goto err;
}
ptr += 16;
@ -1016,7 +1050,10 @@ target_emulate_unmap(struct se_task *task)
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0;
err:
transport_kunmap_first_data_page(cmd);
return ret;
}
/*

View file

@ -657,7 +657,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
struct se_lun *se_lun;
struct se_session *se_sess = se_cmd->se_sess;
struct se_task *se_task;
unsigned char *buf = se_cmd->t_task_buf;
unsigned char *buf;
u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
@ -668,6 +668,8 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
buf = transport_kmap_first_data_page(se_cmd);
/*
* If no struct se_session pointer is present, this struct se_cmd is
* coming via a target_core_mod PASSTHROUGH op, and not through
@ -704,6 +706,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
* See SPC3 r07, page 159.
*/
done:
transport_kunmap_first_data_page(se_cmd);
lun_count *= 8;
buf[0] = ((lun_count >> 24) & 0xff);
buf[1] = ((lun_count >> 16) & 0xff);

View file

@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port(
struct list_head tid_dest_list;
struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
struct target_core_fabric_ops *tmp_tf_ops;
unsigned char *buf = (unsigned char *)cmd->t_task_buf;
unsigned char *buf;
unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
u32 tpdl, tid_len = 0;
@ -1524,6 +1524,8 @@ static int core_scsi3_decode_spec_i_port(
*/
tidh_new->dest_local_nexus = 1;
list_add_tail(&tidh_new->dest_list, &tid_dest_list);
buf = transport_kmap_first_data_page(cmd);
/*
* For a PERSISTENT RESERVE OUT specify initiator ports payload,
* first extract TransportID Parameter Data Length, and make sure
@ -1760,6 +1762,9 @@ static int core_scsi3_decode_spec_i_port(
tid_len = 0;
}
transport_kunmap_first_data_page(cmd);
/*
* Go ahead and create a registrations from tid_dest_list for the
* SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl
@ -1806,6 +1811,7 @@ static int core_scsi3_decode_spec_i_port(
return 0;
out:
transport_kunmap_first_data_page(cmd);
/*
* For the failure case, release everything from tid_dest_list
* including *dest_pr_reg and the configfs dependances..
@ -3307,7 +3313,7 @@ static int core_scsi3_emulate_pro_register_and_move(
struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
unsigned char *buf = (unsigned char *)cmd->t_task_buf;
unsigned char *buf;
unsigned char *initiator_str;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
u32 tid_len, tmp_tid_len;
@ -3357,17 +3363,21 @@ static int core_scsi3_emulate_pro_register_and_move(
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
/*
* Determine the Relative Target Port Identifier where the reservation
* will be moved to for the TransportID containing SCSI initiator WWN
* information.
*/
buf = transport_kmap_first_data_page(cmd);
rtpi = (buf[18] & 0xff) << 8;
rtpi |= buf[19] & 0xff;
tid_len = (buf[20] & 0xff) << 24;
tid_len |= (buf[21] & 0xff) << 16;
tid_len |= (buf[22] & 0xff) << 8;
tid_len |= buf[23] & 0xff;
transport_kunmap_first_data_page(cmd);
buf = NULL;
if ((tid_len + 24) != cmd->data_length) {
printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header"
@ -3414,6 +3424,8 @@ static int core_scsi3_emulate_pro_register_and_move(
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
buf = transport_kmap_first_data_page(cmd);
proto_ident = (buf[24] & 0x0f);
#if 0
printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
@ -3444,6 +3456,9 @@ static int core_scsi3_emulate_pro_register_and_move(
goto out;
}
transport_kunmap_first_data_page(cmd);
buf = NULL;
printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s"
" %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
"port" : "device", initiator_str, (iport_ptr != NULL) ?
@ -3696,9 +3711,13 @@ static int core_scsi3_emulate_pro_register_and_move(
" REGISTER_AND_MOVE\n");
}
transport_kunmap_first_data_page(cmd);
core_scsi3_put_pr_reg(dest_pr_reg);
return 0;
out:
if (buf)
transport_kunmap_first_data_page(cmd);
if (dest_se_deve)
core_scsi3_lunacl_undepend_item(dest_se_deve);
if (dest_node_acl)
@ -3723,7 +3742,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
*/
static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
{
unsigned char *buf = (unsigned char *)cmd->t_task_buf;
unsigned char *buf;
u64 res_key, sa_res_key;
int sa, scope, type, aptpl;
int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
@ -3745,6 +3764,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
sa = (cdb[1] & 0x1f);
scope = (cdb[2] & 0xf0);
type = (cdb[2] & 0x0f);
buf = transport_kmap_first_data_page(cmd);
/*
* From PERSISTENT_RESERVE_OUT parameter list (payload)
*/
@ -3762,6 +3783,9 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
aptpl = (buf[17] & 0x01);
unreg = (buf[17] & 0x02);
}
transport_kunmap_first_data_page(cmd);
buf = NULL;
/*
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
@ -3830,7 +3854,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
struct se_device *se_dev = cmd->se_dev;
struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
unsigned char *buf = (unsigned char *)cmd->t_task_buf;
unsigned char *buf;
u32 add_len = 0, off = 8;
if (cmd->data_length < 8) {
@ -3839,6 +3863,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
buf = transport_kmap_first_data_page(cmd);
buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
@ -3872,6 +3897,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
buf[6] = ((add_len >> 8) & 0xff);
buf[7] = (add_len & 0xff);
transport_kunmap_first_data_page(cmd);
return 0;
}
@ -3885,7 +3912,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
struct se_device *se_dev = cmd->se_dev;
struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
unsigned char *buf = (unsigned char *)cmd->t_task_buf;
unsigned char *buf;
u64 pr_res_key;
u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
@ -3895,6 +3922,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
buf = transport_kmap_first_data_page(cmd);
buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
@ -3911,10 +3939,9 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
buf[6] = ((add_len >> 8) & 0xff);
buf[7] = (add_len & 0xff);
if (cmd->data_length < 22) {
spin_unlock(&se_dev->dev_reservation_lock);
return 0;
}
if (cmd->data_length < 22)
goto err;
/*
* Set the Reservation key.
*
@ -3951,7 +3978,10 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
buf[21] = (pr_reg->pr_res_scope & 0xf0) |
(pr_reg->pr_res_type & 0x0f);
}
err:
spin_unlock(&se_dev->dev_reservation_lock);
transport_kunmap_first_data_page(cmd);
return 0;
}
@ -3965,7 +3995,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
unsigned char *buf = (unsigned char *)cmd->t_task_buf;
unsigned char *buf;
u16 add_len = 8; /* Hardcoded to 8. */
if (cmd->data_length < 6) {
@ -3974,6 +4004,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
buf = transport_kmap_first_data_page(cmd);
buf[0] = ((add_len << 8) & 0xff);
buf[1] = (add_len & 0xff);
buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
@ -4004,6 +4036,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
transport_kunmap_first_data_page(cmd);
return 0;
}
@ -4020,7 +4054,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
unsigned char *buf = (unsigned char *)cmd->t_task_buf;
unsigned char *buf;
u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
u32 off = 8; /* off into first Full Status descriptor */
int format_code = 0;
@ -4031,6 +4065,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
buf = transport_kmap_first_data_page(cmd);
buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
@ -4150,6 +4186,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
buf[6] = ((add_len >> 8) & 0xff);
buf[7] = (add_len & 0xff);
transport_kunmap_first_data_page(cmd);
return 0;
}

View file

@ -695,7 +695,7 @@ static int pscsi_transport_complete(struct se_task *task)
if (task->task_se_cmd->se_deve->lun_flags &
TRANSPORT_LUNFLAGS_READ_ONLY) {
unsigned char *buf = task->task_se_cmd->t_task_buf;
unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd);
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
@ -704,6 +704,8 @@ static int pscsi_transport_complete(struct se_task *task)
if (!(buf[2] & 0x80))
buf[2] |= 0x80;
}
transport_kunmap_first_data_page(task->task_se_cmd);
}
}
after_mode_sense:
@ -1246,33 +1248,6 @@ static int pscsi_map_task_SG(struct se_task *task)
return 0;
}
/* pscsi_map_task_non_SG():
*
*
*/
static int pscsi_map_task_non_SG(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
int ret = 0;
if (pscsi_blk_get_request(task) < 0)
return PYX_TRANSPORT_LU_COMM_FAILURE;
if (!task->task_size)
return 0;
ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
pt->pscsi_req, cmd->t_task_buf,
task->task_size, GFP_KERNEL);
if (ret < 0) {
printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
return 0;
}
static int pscsi_CDB_none(struct se_task *task)
{
return pscsi_blk_get_request(task);
@ -1392,7 +1367,6 @@ static struct se_subsystem_api pscsi_template = {
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
.cdb_none = pscsi_CDB_none,
.map_task_non_SG = pscsi_map_task_non_SG,
.map_task_SG = pscsi_map_task_SG,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,

View file

@ -213,7 +213,7 @@ static u32 transport_allocate_tasks(struct se_cmd *cmd,
unsigned long long starting_lba, u32 sectors,
enum dma_data_direction data_direction,
struct list_head *mem_list, int set_counts);
static int transport_generic_get_mem(struct se_cmd *cmd, u32 length);
static int transport_generic_get_mem(struct se_cmd *cmd);
static int transport_generic_remove(struct se_cmd *cmd,
int session_reinstatement);
static int transport_cmd_get_valid_sectors(struct se_cmd *cmd);
@ -2233,23 +2233,6 @@ static void transport_generic_request_timeout(struct se_cmd *cmd)
transport_generic_remove(cmd, 0);
}
static int
transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
{
unsigned char *buf;
buf = kzalloc(data_length, GFP_KERNEL);
if (!(buf)) {
printk(KERN_ERR "Unable to allocate memory for buffer\n");
return -ENOMEM;
}
cmd->t_tasks_se_num = 0;
cmd->t_task_buf = buf;
return 0;
}
static inline u32 transport_lba_21(unsigned char *cdb)
{
return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
@ -2966,19 +2949,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
return -1;
}
static int transport_allocate_resources(struct se_cmd *cmd)
{
u32 length = cmd->data_length;
if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
return transport_generic_get_mem(cmd, length);
else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
return transport_generic_allocate_buf(cmd, length);
else
return 0;
}
static int
transport_handle_reservation_conflict(struct se_cmd *cmd)
{
@ -3265,7 +3235,7 @@ static int transport_generic_cmd_sequencer(
/* GPCMD_SEND_KEY from multi media commands */
size = (cdb[8] << 8) + cdb[9];
}
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case MODE_SELECT:
size = cdb[4];
@ -3277,7 +3247,7 @@ static int transport_generic_cmd_sequencer(
break;
case MODE_SENSE:
size = cdb[4];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case MODE_SENSE_10:
case GPCMD_READ_BUFFER_CAPACITY:
@ -3285,11 +3255,11 @@ static int transport_generic_cmd_sequencer(
case LOG_SELECT:
case LOG_SENSE:
size = (cdb[7] << 8) + cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_BLOCK_LIMITS:
size = READ_BLOCK_LEN;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case GPCMD_GET_CONFIGURATION:
case GPCMD_READ_FORMAT_CAPACITIES:
@ -3305,7 +3275,7 @@ static int transport_generic_cmd_sequencer(
SPC3_PERSISTENT_RESERVATIONS) ?
core_scsi3_emulate_pr : NULL;
size = (cdb[7] << 8) + cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case GPCMD_MECHANISM_STATUS:
case GPCMD_READ_DVD_STRUCTURE:
@ -3314,7 +3284,7 @@ static int transport_generic_cmd_sequencer(
break;
case READ_POSITION:
size = READ_POSITION_LEN;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case MAINTENANCE_OUT:
if (dev->transport->get_device_type(dev) != TYPE_ROM) {
@ -3336,7 +3306,7 @@ static int transport_generic_cmd_sequencer(
/* GPCMD_REPORT_KEY from multi media commands */
size = (cdb[8] << 8) + cdb[9];
}
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case INQUIRY:
size = (cdb[3] << 8) + cdb[4];
@ -3346,21 +3316,21 @@ static int transport_generic_cmd_sequencer(
*/
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_BUFFER:
size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_CAPACITY:
size = READ_CAP_LEN;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_MEDIA_SERIAL_NUMBER:
case SECURITY_PROTOCOL_IN:
case SECURITY_PROTOCOL_OUT:
size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case SERVICE_ACTION_IN:
case ACCESS_CONTROL_IN:
@ -3371,36 +3341,36 @@ static int transport_generic_cmd_sequencer(
case WRITE_ATTRIBUTE:
size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
size = (cdb[3] << 8) | cdb[4];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
#if 0
case GPCMD_READ_CD:
sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
size = (2336 * sectors);
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
#endif
case READ_TOC:
size = cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case REQUEST_SENSE:
size = cdb[4];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_ELEMENT_STATUS:
size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case WRITE_BUFFER:
size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case RESERVE:
case RESERVE_10:
@ -3480,7 +3450,7 @@ static int transport_generic_cmd_sequencer(
break;
case UNMAP:
size = get_unaligned_be16(&cdb[7]);
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case WRITE_SAME_16:
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
@ -3547,7 +3517,7 @@ static int transport_generic_cmd_sequencer(
*/
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
default:
printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
@ -3804,16 +3774,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
/*
* If enabled by TCM fabric module pre-registered SGL
* memory, perform the memcpy() from the TCM internal
* contiguous buffer back to the original SGL.
*/
if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
sg_copy_from_buffer(cmd->t_task_pt_sgl,
cmd->t_task_pt_sgl_num,
cmd->t_task_buf,
cmd->data_length);
ret = cmd->se_tfo->queue_data_in(cmd);
if (ret == -EAGAIN)
@ -3899,12 +3859,6 @@ static inline void transport_free_pages(struct se_cmd *cmd)
if (cmd->se_dev->transport->do_se_mem_map)
free_page = 0;
if (cmd->t_task_buf) {
kfree(cmd->t_task_buf);
cmd->t_task_buf = NULL;
return;
}
list_for_each_entry_safe(se_mem, se_mem_tmp,
&cmd->t_mem_list, se_list) {
/*
@ -4068,25 +4022,6 @@ int transport_generic_map_mem_to_cmd(
cmd->t_tasks_se_bidi_num = ret;
}
cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
} else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
if (sgl_bidi || sgl_bidi_count) {
printk(KERN_ERR "BIDI-Commands not supported using "
"SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
return -ENOSYS;
}
/*
* For incoming CDBs using a contiguous buffer internal with TCM,
* save the passed struct scatterlist memory. After TCM storage object
* processing has completed for this struct se_cmd, TCM core will call
* transport_memcpy_[write,read]_contig() as necessary from
* transport_generic_complete_ok() and transport_write_pending() in order
* to copy the TCM buffer to/from the original passed *mem in SGL ->
* struct scatterlist format.
*/
cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
cmd->t_task_pt_sgl = sgl;
cmd->t_task_pt_sgl_num = sgl_count;
}
return 0;
@ -4184,17 +4119,49 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
return 0;
}
static int
transport_generic_get_mem(struct se_cmd *cmd, u32 length)
void *transport_kmap_first_data_page(struct se_cmd *cmd)
{
struct se_mem *se_mem;
BUG_ON(list_empty(&cmd->t_mem_list));
se_mem = list_first_entry(&cmd->t_mem_list, struct se_mem, se_list);
/*
* 1st se_mem should point to a page, and we shouldn't need more than
* that for this cmd
*/
BUG_ON(cmd->data_length > PAGE_SIZE);
return kmap(se_mem->se_page);
}
EXPORT_SYMBOL(transport_kmap_first_data_page);
void transport_kunmap_first_data_page(struct se_cmd *cmd)
{
struct se_mem *se_mem;
BUG_ON(list_empty(&cmd->t_mem_list));
se_mem = list_first_entry(&cmd->t_mem_list, struct se_mem, se_list);
kunmap(se_mem->se_page);
}
EXPORT_SYMBOL(transport_kunmap_first_data_page);
static int
transport_generic_get_mem(struct se_cmd *cmd)
{
struct se_mem *se_mem;
int length = cmd->data_length;
/*
* If the device uses memory mapping this is enough.
*/
if (cmd->se_dev->transport->do_se_mem_map)
return 0;
/* Even cmds with length 0 will get here, btw */
while (length) {
se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
if (!(se_mem)) {
@ -4851,10 +4818,6 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
if (dev->transport->map_task_SG)
return dev->transport->map_task_SG(task);
return 0;
} else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
if (dev->transport->map_task_non_SG)
return dev->transport->map_task_non_SG(task);
return 0;
} else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
if (dev->transport->cdb_none)
return dev->transport->cdb_none(task);
@ -4887,7 +4850,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
* cmd->t_mem_list of struct se_mem->se_page
*/
if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
ret = transport_allocate_resources(cmd);
ret = transport_generic_get_mem(cmd);
if (ret < 0)
return ret;
}
@ -5029,17 +4992,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
cmd->transport_qf_callback = NULL;
return 0;
}
/*
* For the TCM control CDBs using a contiguous buffer, do the memcpy
* from the passed Linux/SCSI struct scatterlist located at
* se_cmd->t_task_pt_sgl to the contiguous buffer at
* se_cmd->t_task_buf.
*/
if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
sg_copy_to_buffer(cmd->t_task_pt_sgl,
cmd->t_task_pt_sgl_num,
cmd->t_task_buf,
cmd->data_length);
/*
* Clear the se_cmd for WRITE_PENDING status in order to set
* cmd->t_transport_active=0 so that transport_generic_handle_data

View file

@ -71,9 +71,9 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
caller, cmd, cmd->cdb);
printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
printk(KERN_INFO "%s: cmd %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
printk(KERN_INFO "%s: cmd %p se_num %u len %u se_cmd_flags <0x%x>\n",
caller, cmd, se_cmd->t_tasks_se_num,
se_cmd->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
se_cmd->data_length, se_cmd->se_cmd_flags);
list_for_each_entry(mem, &se_cmd->t_mem_list, se_list)
printk(KERN_INFO "%s: cmd %p mem %p page %p "

View file

@ -92,19 +92,15 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
remaining = se_cmd->data_length;
/*
* Setup to use first mem list entry if any.
* Setup to use first mem list entry, unless no data.
*/
if (se_cmd->t_tasks_se_num) {
BUG_ON(remaining && list_empty(&se_cmd->t_mem_list));
if (remaining) {
mem = list_first_entry(&se_cmd->t_mem_list,
struct se_mem, se_list);
mem_len = mem->se_len;
mem_off = mem->se_off;
page = mem->se_page;
} else {
mem = NULL;
mem_len = remaining;
mem_off = 0;
page = NULL;
}
/* no scatter/gather in skb for odd word length due to fc_seq_send() */
@ -145,18 +141,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
tlen = min(mem_len, frame_len);
if (use_sg) {
if (!mem) {
BUG_ON(!se_cmd->t_task_buf);
page_addr = se_cmd->t_task_buf + mem_off;
/*
* In this case, offset is 'offset_in_page' of
* (t_task_buf + mem_off) instead of 'mem_off'.
*/
off_in_page = offset_in_page(page_addr);
page = virt_to_page(page_addr);
tlen = min(tlen, PAGE_SIZE - off_in_page);
} else
off_in_page = mem_off;
off_in_page = mem_off;
BUG_ON(!page);
get_page(page);
skb_fill_page_desc(fp_skb(fp),
@ -166,7 +151,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
fp_skb(fp)->data_len += tlen;
fp_skb(fp)->truesize +=
PAGE_SIZE << compound_order(page);
} else if (mem) {
} else {
BUG_ON(!page);
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
KM_SOFTIRQ0);
@ -177,10 +162,6 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
memcpy(to, from, tlen);
kunmap_atomic(page_addr, KM_SOFTIRQ0);
to += tlen;
} else {
from = se_cmd->t_task_buf + mem_off;
memcpy(to, from, tlen);
to += tlen;
}
mem_off += tlen;
@ -305,19 +286,15 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
frame_len = se_cmd->data_length - rel_off;
/*
* Setup to use first mem list entry if any.
* Setup to use first mem list entry, unless no data.
*/
if (se_cmd->t_tasks_se_num) {
BUG_ON(frame_len && list_empty(&se_cmd->t_mem_list));
if (frame_len) {
mem = list_first_entry(&se_cmd->t_mem_list,
struct se_mem, se_list);
mem_len = mem->se_len;
mem_off = mem->se_off;
page = mem->se_page;
} else {
mem = NULL;
page = NULL;
mem_off = 0;
mem_len = frame_len;
}
while (frame_len) {
@ -340,19 +317,15 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
tlen = min(mem_len, frame_len);
if (mem) {
to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
KM_SOFTIRQ0);
page_addr = to;
to += mem_off & ~PAGE_MASK;
tlen = min(tlen, (size_t)(PAGE_SIZE -
(mem_off & ~PAGE_MASK)));
memcpy(to, from, tlen);
kunmap_atomic(page_addr, KM_SOFTIRQ0);
} else {
to = se_cmd->t_task_buf + mem_off;
memcpy(to, from, tlen);
}
to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
KM_SOFTIRQ0);
page_addr = to;
to += mem_off & ~PAGE_MASK;
tlen = min(tlen, (size_t)(PAGE_SIZE -
(mem_off & ~PAGE_MASK)));
memcpy(to, from, tlen);
kunmap_atomic(page_addr, KM_SOFTIRQ0);
from += tlen;
frame_len -= tlen;
mem_off += tlen;

View file

@ -109,7 +109,6 @@ enum se_cmd_flags_table {
SCF_EMULATED_TASK_SENSE = 0x00000004,
SCF_SCSI_DATA_SG_IO_CDB = 0x00000008,
SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010,
SCF_SCSI_CONTROL_NONSG_IO_CDB = 0x00000020,
SCF_SCSI_NON_DATA_CDB = 0x00000040,
SCF_SCSI_CDB_EXCEPTION = 0x00000080,
SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
@ -123,7 +122,6 @@ enum se_cmd_flags_table {
SCF_ALUA_NON_OPTIMIZED = 0x00040000,
SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000,
SCF_UNUSED = 0x00100000,
SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
SCF_EMULATE_CDB_ASYNC = 0x01000000,
SCF_EMULATE_QUEUE_FULL = 0x02000000,
@ -516,8 +514,7 @@ struct se_cmd {
struct completion transport_lun_fe_stop_comp;
struct completion transport_lun_stop_comp;
struct scatterlist *t_tasks_sg_chained;
struct scatterlist t_tasks_sg_bounce;
void *t_task_buf;
/*
* Used for pre-registered fabric SGL passthrough WRITE and READ
* with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop

View file

@ -165,6 +165,8 @@ extern void transport_init_se_cmd(struct se_cmd *,
struct target_core_fabric_ops *,
struct se_session *, u32, int, int,
unsigned char *);
void *transport_kmap_first_data_page(struct se_cmd *cmd);
void transport_kunmap_first_data_page(struct se_cmd *cmd);
extern void transport_free_se_cmd(struct se_cmd *);
extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
extern int transport_generic_handle_cdb(struct se_cmd *);
@ -236,10 +238,6 @@ struct se_subsystem_api {
* For SCF_SCSI_NON_DATA_CDB
*/
int (*cdb_none)(struct se_task *);
/*
* For SCF_SCSI_CONTROL_NONSG_IO_CDB
*/
int (*map_task_non_SG)(struct se_task *);
/*
* For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB
*/