scsi: ibmvfc: Add alloc/dealloc routines for SCSI Sub-CRQ Channels

Allocate a set of Sub-CRQs in advance. During channel setup the client and
VIOS negotiate the number of queues the VIOS supports and the number that
the client desires to request. Its possible that the final channel
resources allocated is less than requested, but the client is still
responsible for sending handles for every queue it is hoping for.

Also, provide deallocation cleanup routines.

Link: https://lore.kernel.org/r/20210114203148.246656-8-tyreld@linux.ibm.com
Reviewed-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: Tyrel Datwyler <tyreld@linux.ibm.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Tyrel Datwyler 2021-01-14 14:31:34 -06:00 committed by Martin K. Petersen
parent 6d07f129dc
commit 3034ebe263
2 changed files with 126 additions and 0 deletions

View file

@ -895,6 +895,8 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
unsigned long flags; unsigned long flags;
struct vio_dev *vdev = to_vio_dev(vhost->dev); struct vio_dev *vdev = to_vio_dev(vhost->dev);
struct ibmvfc_queue *crq = &vhost->crq; struct ibmvfc_queue *crq = &vhost->crq;
struct ibmvfc_queue *scrq;
int i;
/* Close the CRQ */ /* Close the CRQ */
do { do {
@ -912,6 +914,16 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
memset(crq->msgs.crq, 0, PAGE_SIZE); memset(crq->msgs.crq, 0, PAGE_SIZE);
crq->cur = 0; crq->cur = 0;
if (vhost->scsi_scrqs.scrqs) {
for (i = 0; i < IBMVFC_SCSI_HW_QUEUES; i++) {
scrq = &vhost->scsi_scrqs.scrqs[i];
spin_lock(scrq->q_lock);
memset(scrq->msgs.scrq, 0, PAGE_SIZE);
scrq->cur = 0;
spin_unlock(scrq->q_lock);
}
}
/* And re-open it again */ /* And re-open it again */
rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
crq->msg_token, PAGE_SIZE); crq->msg_token, PAGE_SIZE);
@ -5045,6 +5057,11 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
case IBMVFC_ASYNC_FMT: case IBMVFC_ASYNC_FMT:
fmt_size = sizeof(*queue->msgs.async); fmt_size = sizeof(*queue->msgs.async);
break; break;
case IBMVFC_SUB_CRQ_FMT:
fmt_size = sizeof(*queue->msgs.scrq);
/* We need one extra event for Cancel Commands */
pool_size = max_requests + 1;
break;
default: default:
dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt); dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
return -EINVAL; return -EINVAL;
@ -5136,6 +5153,107 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
return retrc; return retrc;
} }
static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
int index)
{
struct device *dev = vhost->dev;
struct vio_dev *vdev = to_vio_dev(dev);
struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
int rc = -ENOMEM;
ENTER;
if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT))
return -ENOMEM;
rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
&scrq->cookie, &scrq->hw_irq);
if (rc) {
dev_warn(dev, "Error registering sub-crq: %d\n", rc);
if (rc == H_PARAMETER)
dev_warn_once(dev, "Firmware may not support MQ\n");
goto reg_failed;
}
scrq->hwq_id = index;
scrq->vhost = vhost;
LEAVE;
return 0;
reg_failed:
ibmvfc_free_queue(vhost, scrq);
LEAVE;
return rc;
}
static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
{
struct device *dev = vhost->dev;
struct vio_dev *vdev = to_vio_dev(dev);
struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
long rc;
ENTER;
do {
rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
scrq->cookie);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
if (rc)
dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
ibmvfc_free_queue(vhost, scrq);
LEAVE;
}
static int ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
{
int i, j;
ENTER;
vhost->scsi_scrqs.scrqs = kcalloc(IBMVFC_SCSI_HW_QUEUES,
sizeof(*vhost->scsi_scrqs.scrqs),
GFP_KERNEL);
if (!vhost->scsi_scrqs.scrqs)
return -1;
for (i = 0; i < IBMVFC_SCSI_HW_QUEUES; i++) {
if (ibmvfc_register_scsi_channel(vhost, i)) {
for (j = i; j > 0; j--)
ibmvfc_deregister_scsi_channel(vhost, j - 1);
kfree(vhost->scsi_scrqs.scrqs);
vhost->scsi_scrqs.scrqs = NULL;
vhost->scsi_scrqs.active_queues = 0;
LEAVE;
return -1;
}
}
LEAVE;
return 0;
}
static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
{
int i;
ENTER;
if (!vhost->scsi_scrqs.scrqs)
return;
for (i = 0; i < IBMVFC_SCSI_HW_QUEUES; i++)
ibmvfc_deregister_scsi_channel(vhost, i);
kfree(vhost->scsi_scrqs.scrqs);
vhost->scsi_scrqs.scrqs = NULL;
vhost->scsi_scrqs.active_queues = 0;
LEAVE;
}
/** /**
* ibmvfc_free_mem - Free memory for vhost * ibmvfc_free_mem - Free memory for vhost
* @vhost: ibmvfc host struct * @vhost: ibmvfc host struct
@ -5371,6 +5489,12 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto remove_shost; goto remove_shost;
} }
if (vhost->mq_enabled) {
rc = ibmvfc_init_sub_crqs(vhost);
if (rc)
dev_warn(dev, "Failed to allocate Sub-CRQs. rc=%d\n", rc);
}
if (shost_to_fc_host(shost)->rqst_q) if (shost_to_fc_host(shost)->rqst_q)
blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1); blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
dev_set_drvdata(dev, vhost); dev_set_drvdata(dev, vhost);
@ -5427,6 +5551,7 @@ static int ibmvfc_remove(struct vio_dev *vdev)
list_splice_init(&vhost->purge, &purge); list_splice_init(&vhost->purge, &purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags); spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_complete_purge(&purge); ibmvfc_complete_purge(&purge);
ibmvfc_release_sub_crqs(vhost);
ibmvfc_release_crq_queue(vhost); ibmvfc_release_crq_queue(vhost);
ibmvfc_free_mem(vhost); ibmvfc_free_mem(vhost);

View file

@ -850,6 +850,7 @@ struct ibmvfc_host {
mempool_t *tgt_pool; mempool_t *tgt_pool;
struct ibmvfc_queue crq; struct ibmvfc_queue crq;
struct ibmvfc_queue async_crq; struct ibmvfc_queue async_crq;
struct ibmvfc_scsi_channels scsi_scrqs;
struct ibmvfc_npiv_login login_info; struct ibmvfc_npiv_login login_info;
union ibmvfc_npiv_login_data *login_buf; union ibmvfc_npiv_login_data *login_buf;
dma_addr_t login_buf_dma; dma_addr_t login_buf_dma;