scsi: lpfc: Add an internal trace log buffer

The current logging methods typically end up requesting a reproduction with
a different logging level set to figure out what happened. This was mainly
by design to not clutter the kernel log messages with things that were
typically not interesting and the messages themselves could cause other
issues.

When looking to make a better system, it was seen that in many cases when
more data was wanted was when another message, usually at KERN_ERR level,
was logged.  And in most cases, what the additional logging that was then
enabled was typically. Most of these areas fell into the discovery machine.

Based on this summary, the following design has been put in place: The
driver will maintain an internal log (256 elements of 256 bytes).  The
"additional logging" messages that are usually enabled in a reproduction
will be changed to now log all the time to the internal log.  A new logging
level is defined - LOG_TRACE_EVENT.  When this level is set (it is not by
default) and a message marked as KERN_ERR is logged, all the messages in
the internal log will be dumped to the kernel log before the KERN_ERR
message is logged.

There is a timestamp on each message added to the internal log. However,
this timestamp is not converted to wall time when logged. The value of the
timestamp is solely to give a crude time reference for the messages.

Link: https://lore.kernel.org/r/20200630215001.70793-14-jsmart2021@gmail.com
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Dick Kennedy 2020-06-30 14:50:00 -07:00 committed by Martin K. Petersen
parent 317aeb83c9
commit 372c187b8a
13 changed files with 928 additions and 792 deletions

View file

@ -627,6 +627,14 @@ struct lpfc_ras_fwlog {
enum ras_state state; /* RAS logging running state */
};
#define DBG_LOG_STR_SZ 256
#define DBG_LOG_SZ 256
struct dbg_log_ent {
char log[DBG_LOG_STR_SZ];
u64 t_ns;
};
enum lpfc_irq_chann_mode {
/* Assign IRQs to all possible cpus that have hardware queues */
NORMAL_MODE,
@ -1240,6 +1248,10 @@ struct lpfc_hba {
struct scsi_host_template port_template;
/* SCSI host template information - for all vports */
struct scsi_host_template vport_template;
atomic_t dbg_log_idx;
atomic_t dbg_log_cnt;
atomic_t dbg_log_dmping;
struct dbg_log_ent dbg_log[DBG_LOG_SZ];
};
static inline struct Scsi_Host *

View file

@ -386,7 +386,7 @@ void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp);
int lpfc_link_reset(struct lpfc_vport *vport);
/* Function prototypes. */
int lpfc_check_pci_resettable(const struct lpfc_hba *phba);
int lpfc_check_pci_resettable(struct lpfc_hba *phba);
const char* lpfc_info(struct Scsi_Host *);
int lpfc_scan_finished(struct Scsi_Host *, unsigned long);

View file

@ -750,7 +750,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0257 GID_FT Query error: 0x%x 0x%x\n",
irsp->ulpStatus, vport->fc_ns_retry);
} else {
@ -811,7 +811,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
} else {
/* NameServer Rsp Error */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0241 NameServer Rsp Error "
"Data: x%x x%x x%x x%x\n",
CTrsp->CommandResponse.bits.CmdRsp,
@ -951,7 +951,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"4103 GID_FT Query error: 0x%x 0x%x\n",
irsp->ulpStatus, vport->fc_ns_retry);
} else {
@ -1012,7 +1012,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
} else {
/* NameServer Rsp Error */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"4109 NameServer Rsp Error "
"Data: x%x x%x x%x x%x\n",
CTrsp->CommandResponse.bits.CmdRsp,
@ -1143,7 +1143,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
}
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0267 NameServer GFF Rsp "
"x%x Error (%d %d) Data: x%x x%x\n",
did, irsp->ulpStatus, irsp->un.ulpWord[4],
@ -1271,7 +1271,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
}
} else
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
lpfc_ct_free_iocb(phba, cmdiocb);
@ -1320,7 +1320,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode);
if (irsp->ulpStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0268 NS cmd x%x Error (x%x x%x)\n",
cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
@ -1843,7 +1843,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
ns_cmd_free_mp:
kfree(mp);
ns_cmd_exit:
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0266 Issue NameServer Req x%x err %d Data: x%x x%x\n",
cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt);
return 1;

View file

@ -100,7 +100,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
return 0;
/* Pending Link Event during Discovery */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0237 Pending Link Event during "
"Discovery: State x%x\n",
phba->pport->port_state);
@ -440,8 +440,9 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
fail:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0249 Cannot issue Register Fabric login: Err %d\n", err);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0249 Cannot issue Register Fabric login: Err %d\n",
err);
return -ENXIO;
}
@ -524,8 +525,8 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0289 Issue Register VFI failed: Err %d\n", rc);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0289 Issue Register VFI failed: Err %d\n", rc);
return rc;
}
@ -550,7 +551,7 @@ lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2556 UNREG_VFI mbox allocation failed"
"HBA state x%x\n", phba->pport->port_state);
return -ENOMEM;
@ -562,7 +563,7 @@ lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2557 UNREG_VFI issue mbox failed rc x%x "
"HBA state x%x\n",
rc, phba->pport->port_state);
@ -1041,18 +1042,18 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"2858 FLOGI failure Status:x%x/x%x "
"TMO:x%x Data x%x x%x\n",
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout, phba->hba_flag,
phba->fcf.fcf_flag);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2858 FLOGI failure Status:x%x/x%x TMO"
":x%x Data x%x x%x\n",
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout, phba->hba_flag,
phba->fcf.fcf_flag);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
"0150 FLOGI failure Status:x%x/x%x "
"xri x%x TMO:x%x\n",
irsp->ulpStatus, irsp->un.ulpWord[4],
@ -1132,8 +1133,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else if (!(phba->hba_flag & HBA_FCOE_MODE))
rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
else {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_FIP | LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2831 FLOGI response with cleared Fabric "
"bit fcf_index 0x%x "
"Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
@ -1934,7 +1934,7 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2882 RRQ completes to NPort x%x "
"with no ndlp. Data: x%x x%x x%x\n",
irsp->un.elsreq64.remoteID,
@ -1957,10 +1957,11 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
(phba)->pport->cfg_log_verbose & LOG_ELS)
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"2881 RRQ failure DID:%06X Status:x%x/x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2881 RRQ failure DID:%06X Status:"
"x%x/x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
}
out:
if (rrq)
@ -2010,7 +2011,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0136 PLOGI completes to NPort x%x "
"with no ndlp. Data: x%x x%x x%x\n",
irsp->un.elsreq64.remoteID,
@ -2059,7 +2060,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
(phba)->pport->cfg_log_verbose & LOG_ELS)
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
@ -2237,6 +2238,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
char *mode;
u32 loglevel;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@ -2278,13 +2280,16 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* could be expected.
*/
if ((vport->fc_flag & FC_FABRIC) ||
(vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH))
(vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) {
mode = KERN_ERR;
else
loglevel = LOG_TRACE_EVENT;
} else {
mode = KERN_INFO;
loglevel = LOG_ELS;
}
/* PRLI failed */
lpfc_printf_vlog(vport, mode, LOG_ELS,
lpfc_printf_vlog(vport, mode, loglevel,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
"data: x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
@ -2695,7 +2700,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
/* ADISC failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2755 ADISC failure DID:%06X Status:x%x/x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
@ -2853,7 +2858,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
if (irsp->ulpStatus) {
/* LOGO failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
@ -3734,7 +3739,7 @@ lpfc_link_reset(struct lpfc_vport *vport)
"2851 Attempt link reset\n");
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2852 Failed to allocate mbox memory");
return 1;
}
@ -3756,7 +3761,7 @@ lpfc_link_reset(struct lpfc_vport *vport)
mbox->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2853 Failed to issue INIT_LINK "
"mbox command, rc:x%x\n", rc);
mempool_free(mbox, phba->mbox_mem_pool);
@ -3860,7 +3865,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case IOERR_ILLEGAL_COMMAND:
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0124 Retry illegal cmd x%x "
"retry:x%x delay:x%x\n",
cmd, cmdiocb->retry, delay);
@ -3970,7 +3975,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(cmd == ELS_CMD_FDISC) &&
(stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0125 FDISC Failed (x%x). "
"Fabric out of resources\n",
stat.un.lsRjtError);
@ -4009,7 +4015,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
LSEXP_NOTHING_MORE) {
vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
retry = 1;
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0820 FLOGI Failed (x%x). "
"BBCredit Not Supported\n",
stat.un.lsRjtError);
@ -4022,7 +4029,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
(stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0122 FDISC Failed (x%x). "
"Fabric Detected Bad WWN\n",
stat.un.lsRjtError);
@ -4200,7 +4208,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/* No retry ELS command <elsCmd> to remote NPORT <did> */
if (logerr) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0137 No retry ELS command x%x to remote "
"NPORT x%x: Out of Resources: Error:x%x/%x\n",
cmd, did, irsp->ulpStatus,
@ -4499,7 +4507,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &rspiocb->iocb;
if (!vport) {
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3177 ELS response failed\n");
goto out;
}
@ -4605,7 +4613,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
/* ELS rsp: Cannot issue reg_login for <NPortid> */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0138 ELS rsp: Cannot issue reg_login for x%x "
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
@ -6411,8 +6419,8 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lcb_context->rx_id = cmdiocb->iocb.ulpContext;
lcb_context->ndlp = lpfc_nlp_get(ndlp);
if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
LOG_ELS, "0193 failed to send mail box");
lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT,
"0193 failed to send mail box");
kfree(lcb_context);
lpfc_nlp_put(ndlp);
rjt_err = LSRJT_UNABLE_TPC;
@ -6621,7 +6629,7 @@ lpfc_send_rscn_event(struct lpfc_vport *vport,
rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
payload_len, GFP_KERNEL);
if (!rscn_event_data) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0147 Failed to allocate memory for RSCN event\n");
return;
}
@ -6998,7 +7006,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* An FLOGI ELS command <elsCmd> was received from DID <did> in
Loop Mode */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0113 An FLOGI ELS command x%x was "
"received from DID x%x in Loop Mode\n",
cmd, did);
@ -7988,7 +7996,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
cmd = &piocb->iocb;
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0127 ELS timeout Data: x%x x%x x%x "
"x%x\n", els_command,
remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
@ -8098,7 +8106,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
if (!list_empty(&abort_list))
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"3387 abort list for txq not empty\n");
INIT_LIST_HEAD(&abort_list);
@ -8269,7 +8277,7 @@ lpfc_send_els_event(struct lpfc_vport *vport,
if (*payload == ELS_CMD_LOGO) {
logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
if (!logo_data) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0148 Failed to allocate memory "
"for LOGO event\n");
return;
@ -8279,7 +8287,7 @@ lpfc_send_els_event(struct lpfc_vport *vport,
els_data = kmalloc(sizeof(struct lpfc_els_event_header),
GFP_KERNEL);
if (!els_data) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0149 Failed to allocate memory "
"for ELS event\n");
return;
@ -8396,7 +8404,7 @@ lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin,
break;
default:
dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"4678 skipped FPIN descriptor[%d]: "
"tag x%x (%s)\n",
desc_cnt, dtag, dtag_nm);
@ -8811,7 +8819,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
/* Unknown ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0115 Unknown ELS command x%x "
"received from NPORT x%x\n", cmd, did);
if (newnode)
@ -8856,7 +8864,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dropit:
if (vport && !(vport->load_flag & FC_UNLOADING))
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0111 Dropping received ELS cmd "
"Data: x%x x%x x%x\n",
icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
@ -9006,7 +9014,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
spin_lock_irq(shost->host_lock);
if (vport->fc_flag & FC_DISC_DELAYED) {
spin_unlock_irq(shost->host_lock);
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3334 Delay fc port discovery for %d seconds\n",
phba->fc_ratov);
mod_timer(&vport->delayed_disc_tmo,
@ -9024,7 +9032,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
return;
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0251 NameServer login: no memory\n");
return;
}
@ -9036,7 +9044,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
return;
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0348 NameServer login: node freed\n");
return;
}
@ -9047,7 +9055,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0252 Cannot issue NameServer login\n");
return;
}
@ -9084,7 +9092,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_unlock_irq(shost->host_lock);
if (mb->mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0915 Register VPI failed : Status: x%x"
" upd bit: x%x \n", mb->mbxStatus,
mb->un.varRegVpi.upd);
@ -9114,8 +9122,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
rc = lpfc_sli_issue_mbox(phba, pmb,
MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_vlog(vport,
KERN_ERR, LOG_MBOX,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"2732 Failed to issue INIT_VPI"
" mailbox command\n");
} else {
@ -9203,12 +9211,12 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
lpfc_nlp_put(ndlp);
mempool_free(mbox, phba->mbox_mem_pool);
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0253 Register VPI: Can't send mbox\n");
goto mbox_err_exit;
}
} else {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0254 Register VPI: no memory\n");
goto mbox_err_exit;
}
@ -9370,7 +9378,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
/* FDISC failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0126 FDISC failed. (x%x/x%x)\n",
irsp->ulpStatus, irsp->un.ulpWord[4]);
goto fdisc_failed;
@ -9492,7 +9500,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ELS_CMD_FDISC);
if (!elsiocb) {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0255 Issue FDISC: no IOCB\n");
return 1;
}
@ -9546,7 +9554,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0256 Issue FDISC: Cannot send IOCB\n");
return 1;
}
@ -10127,8 +10135,7 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
"rport in state 0x%x\n", ndlp->nlp_state);
return;
}
lpfc_printf_log(phba, KERN_ERR,
LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3094 Start rport recovery on shost id 0x%x "
"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
"flags 0x%x\n",

View file

@ -155,17 +155,17 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
return;
if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
"6789 rport name %llx != node port name %llx",
rport->port_name,
wwn_to_u64(ndlp->nlp_portname.u.wwn));
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6789 rport name %llx != node port name %llx",
rport->port_name,
wwn_to_u64(ndlp->nlp_portname.u.wwn));
evtp = &ndlp->dev_loss_evt;
if (!list_empty(&evtp->evt_listp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
"6790 rport name %llx dev_loss_evt pending",
rport->port_name);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6790 rport name %llx dev_loss_evt pending",
rport->port_name);
return;
}
@ -295,7 +295,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
}
if (warn_on) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0203 Devloss timeout on "
"WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
"NPort x%06x Data: x%x x%x x%x\n",
@ -304,7 +304,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
"0204 Devloss timeout on "
"WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
"NPort x%06x Data: x%x x%x x%x\n",
@ -755,7 +755,7 @@ lpfc_do_work(void *p)
|| kthread_should_stop()));
/* Signal wakeup shall terminate the worker thread */
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0433 Wakeup on signal: rc=x%x\n", rc);
break;
}
@ -1092,7 +1092,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Check for error */
if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0320 CLEAR_LA mbxStatus error x%x hba "
"state x%x\n",
mb->mbxStatus, vport->port_state);
@ -1180,7 +1180,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
out:
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0306 CONFIG_LINK mbxStatus error x%x "
"HBA state x%x\n",
pmb->u.mb.mbxStatus, vport->port_state);
@ -1188,7 +1188,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_linkdown(phba);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0200 CONFIG_LINK bad hba state x%x\n",
vport->port_state);
@ -1224,10 +1224,10 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct lpfc_vport *vport = mboxq->vport;
if (mboxq->u.mb.mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"2017 REG_FCFI mbxStatus error x%x "
"HBA state x%x\n",
mboxq->u.mb.mbxStatus, vport->port_state);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2017 REG_FCFI mbxStatus error x%x "
"HBA state x%x\n", mboxq->u.mb.mbxStatus,
vport->port_state);
goto fail_out;
}
@ -1848,7 +1848,7 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
*/
lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
if (unlikely(!mboxq->sge_array)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2524 Failed to get the non-embedded SGE "
"virtual address\n");
return NULL;
@ -1864,11 +1864,12 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
if (shdr_status || shdr_add_status) {
if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
if_type == LPFC_SLI_INTF_IF_TYPE_2)
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
lpfc_printf_log(phba, KERN_ERR,
LOG_TRACE_EVENT,
"2726 READ_FCF_RECORD Indicates empty "
"FCF table.\n");
else
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2521 READ_FCF_RECORD mailbox failed "
"with status x%x add_status x%x, "
"mbx\n", shdr_status, shdr_add_status);
@ -2246,7 +2247,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
&next_fcf_index);
if (!new_fcf_record) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2765 Mailbox command READ_FCF_RECORD "
"failed to retrieve a FCF record.\n");
/* Let next new FCF event trigger fast failover */
@ -2290,7 +2291,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
phba->fcf.current_rec.fcf_indx) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
lpfc_printf_log(phba, KERN_ERR,
LOG_TRACE_EVENT,
"2862 FCF (x%x) matches property "
"of in-use FCF (x%x)\n",
bf_get(lpfc_fcf_record_fcf_index,
@ -2360,7 +2362,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->pport->fc_flag);
goto out;
} else
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2863 New FCF (x%x) matches "
"property of in-use FCF (x%x)\n",
bf_get(lpfc_fcf_record_fcf_index,
@ -2774,10 +2776,9 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_0) &&
mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_MBOX,
"2891 Init VFI mailbox failed 0x%x\n",
mboxq->u.mb.mbxStatus);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2891 Init VFI mailbox failed 0x%x\n",
mboxq->u.mb.mbxStatus);
mempool_free(mboxq, phba->mbox_mem_pool);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
@ -2805,7 +2806,7 @@ lpfc_issue_init_vfi(struct lpfc_vport *vport)
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_MBOX, "2892 Failed to allocate "
LOG_TRACE_EVENT, "2892 Failed to allocate "
"init_vfi mailbox\n");
return;
}
@ -2813,8 +2814,8 @@ lpfc_issue_init_vfi(struct lpfc_vport *vport)
mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2893 Failed to issue init_vfi mailbox\n");
mempool_free(mboxq, vport->phba->mbox_mem_pool);
}
}
@ -2834,10 +2835,9 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (mboxq->u.mb.mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_MBOX,
"2609 Init VPI mailbox failed 0x%x\n",
mboxq->u.mb.mbxStatus);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2609 Init VPI mailbox failed 0x%x\n",
mboxq->u.mb.mbxStatus);
mempool_free(mboxq, phba->mbox_mem_pool);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
@ -2851,7 +2851,7 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp)
lpfc_printf_vlog(vport, KERN_ERR,
LOG_DISCOVERY,
LOG_TRACE_EVENT,
"2731 Cannot find fabric "
"controller node\n");
else
@ -2864,7 +2864,7 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_initial_fdisc(vport);
else {
lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2606 No NPIV Fabric support\n");
}
mempool_free(mboxq, phba->mbox_mem_pool);
@ -2887,8 +2887,7 @@ lpfc_issue_init_vpi(struct lpfc_vport *vport)
if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
vpi = lpfc_alloc_vpi(vport->phba);
if (!vpi) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_MBOX,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"3303 Failed to obtain vport vpi\n");
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
@ -2899,7 +2898,7 @@ lpfc_issue_init_vpi(struct lpfc_vport *vport)
mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_MBOX, "2607 Failed to allocate "
LOG_TRACE_EVENT, "2607 Failed to allocate "
"init_vpi mailbox\n");
return;
}
@ -2908,8 +2907,8 @@ lpfc_issue_init_vpi(struct lpfc_vport *vport)
mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2608 Failed to issue init_vpi mailbox\n");
mempool_free(mboxq, vport->phba->mbox_mem_pool);
}
}
@ -2953,7 +2952,7 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
lpfc_vport_set_state(vports[i],
FC_VPORT_NO_FABRIC_SUPP);
lpfc_printf_vlog(vports[i], KERN_ERR,
LOG_ELS,
LOG_TRACE_EVENT,
"0259 No NPIV "
"Fabric support\n");
}
@ -2977,10 +2976,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_0) &&
mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"2018 REG_VFI mbxStatus error x%x "
"HBA state x%x\n",
mboxq->u.mb.mbxStatus, vport->port_state);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2018 REG_VFI mbxStatus error x%x "
"HBA state x%x\n",
mboxq->u.mb.mbxStatus, vport->port_state);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/* FLOGI failed, use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@ -3067,7 +3066,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Check for error */
if (mb->mbxStatus) {
/* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0319 READ_SPARAM mbxStatus error x%x "
"hba state x%x>\n",
mb->mbxStatus, vport->port_state);
@ -3286,7 +3285,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
GFP_KERNEL);
if (unlikely(!fcf_record)) {
lpfc_printf_log(phba, KERN_ERR,
LOG_MBOX | LOG_SLI,
LOG_TRACE_EVENT,
"2554 Could not allocate memory for "
"fcf record\n");
rc = -ENODEV;
@ -3298,7 +3297,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR,
LOG_MBOX | LOG_SLI,
LOG_TRACE_EVENT,
"2013 Could not manually add FCF "
"record 0, status %d\n", rc);
rc = -ENODEV;
@ -3344,7 +3343,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
return;
out:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
vport->port_state, sparam_mbox, cfglink_mbox);
lpfc_issue_clear_la(phba, vport);
@ -3617,7 +3616,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
break;
/* If VPI is busy, reset the HBA */
case 0x9700:
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
vport->vpi, mb->mbxStatus);
if (!(phba->pport->load_flag & FC_UNLOADING))
@ -3655,7 +3654,7 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1800 Could not issue unreg_vpi\n");
mempool_free(mbox, phba->mbox_mem_pool);
vport->unreg_vpi_cmpl = VPORT_ERROR;
@ -3742,7 +3741,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0542 lpfc_create_static_vport failed to"
" allocate mailbox memory\n");
return;
@ -3752,7 +3751,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
if (!vport_info) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0543 lpfc_create_static_vport failed to"
" allocate vport_info\n");
mempool_free(pmb, phba->mbox_mem_pool);
@ -3813,11 +3812,12 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
!= VPORT_INFO_REV)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0545 lpfc_create_static_vport bad"
" information header 0x%x 0x%x\n",
le32_to_cpu(vport_info->signature),
le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0545 lpfc_create_static_vport bad"
" information header 0x%x 0x%x\n",
le32_to_cpu(vport_info->signature),
le32_to_cpu(vport_info->rev) &
VPORT_INFO_REV_MASK);
goto out;
}
@ -3881,7 +3881,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->ctx_buf = NULL;
if (mb->mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0258 Register Fabric login error: 0x%x\n",
mb->mbxStatus);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@ -3954,7 +3954,8 @@ lpfc_issue_gidft(struct lpfc_vport *vport)
/* Cannot issue NameServer FCP Query, so finish up
* discovery
*/
lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0604 %s FC TYPE %x %s\n",
"Failed to issue GID_FT to ",
FC_TYPE_FCP,
@ -3970,7 +3971,8 @@ lpfc_issue_gidft(struct lpfc_vport *vport)
/* Cannot issue NameServer NVME Query, so finish up
* discovery
*/
lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0605 %s FC_TYPE %x %s %d\n",
"Failed to issue GID_FT to ",
FC_TYPE_NVME,
@ -4002,7 +4004,7 @@ lpfc_issue_gidpt(struct lpfc_vport *vport)
/* Cannot issue NameServer FCP Query, so finish up
* discovery
*/
lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0606 %s Port TYPE %x %s\n",
"Failed to issue GID_PT to ",
GID_PT_N_PORT,
@ -4032,7 +4034,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->gidft_inp = 0;
if (mb->mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0260 Register NameServer error: 0x%x\n",
mb->mbxStatus);
@ -4344,7 +4346,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
GFP_KERNEL);
if (!ndlp->lat_data)
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0286 lpfc_nlp_state_cleanup failed to "
"allocate statistical data buffer DID "
"0x%x\n", ndlp->nlp_DID);
@ -5013,8 +5015,8 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
vports = lpfc_create_vport_work_array(phba);
if (!vports) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"2884 Vport array allocation failed \n");
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2884 Vport array allocation failed \n");
return;
}
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
@ -5057,9 +5059,10 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
mempool_free(mbox, phba->mbox_mem_pool);
if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
"1836 Could not issue "
"unreg_login(all_rpis) status %d\n", rc);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1836 Could not issue "
"unreg_login(all_rpis) status %d\n",
rc);
}
}
@ -5086,7 +5089,7 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
mempool_free(mbox, phba->mbox_mem_pool);
if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1815 Could not issue "
"unreg_did (default rpis) status %d\n",
rc);
@ -5907,7 +5910,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_FLOGI:
/* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
/* Initial FLOGI timeout */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0222 Initial %s timeout\n",
vport->vpi ? "FDISC" : "FLOGI");
@ -5925,7 +5929,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_FABRIC_CFG_LINK:
/* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
NameServer login */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0223 Timeout while waiting for "
"NameServer login\n");
/* Next look for NameServer ndlp */
@ -5938,7 +5943,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_NS_QRY:
/* Check for wait for NameServer Rsp timeout */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0224 NameServer Query timeout "
"Data: x%x x%x\n",
vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
@ -5971,7 +5977,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
/* Setup and issue mailbox INITIALIZE LINK command */
initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!initlinkmbox) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0206 Device Discovery "
"completion error\n");
phba->link_state = LPFC_HBA_ERROR;
@ -5993,7 +6000,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_DISC_AUTH:
/* Node Authentication timeout */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0227 Node Authentication timeout\n");
lpfc_disc_flush_list(vport);
@ -6013,7 +6021,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_VPORT_READY:
if (vport->fc_flag & FC_RSCN_MODE) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0231 RSCN timeout Data: x%x "
"x%x\n",
vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
@ -6027,7 +6036,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
break;
default:
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0273 Unexpected discovery timeout, "
"vport State x%x\n", vport->port_state);
break;
@ -6036,7 +6046,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
switch (phba->link_state) {
case LPFC_CLEAR_LA:
/* CLEAR LA timeout */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0228 CLEAR LA timeout\n");
clrlaerr = 1;
break;
@ -6050,7 +6061,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_INIT_MBX_CMDS:
case LPFC_LINK_DOWN:
case LPFC_HBA_ERROR:
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0230 Unexpected timeout, hba link "
"state x%x\n", phba->link_state);
clrlaerr = 1;
@ -6241,9 +6253,9 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
}
if (i >= phba->max_vpi) {
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"2936 Could not find Vport mapped "
"to vpi %d\n", vpi);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2936 Could not find Vport mapped "
"to vpi %d\n", vpi);
return NULL;
}
}
@ -6547,10 +6559,10 @@ lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (mboxq->u.mb.mbxStatus) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
"2555 UNREG_VFI mbxStatus error x%x "
"HBA state x%x\n",
mboxq->u.mb.mbxStatus, vport->port_state);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2555 UNREG_VFI mbxStatus error x%x "
"HBA state x%x\n",
mboxq->u.mb.mbxStatus, vport->port_state);
}
spin_lock_irq(shost->host_lock);
phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
@ -6572,10 +6584,10 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct lpfc_vport *vport = mboxq->vport;
if (mboxq->u.mb.mbxStatus) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
"2550 UNREG_FCFI mbxStatus error x%x "
"HBA state x%x\n",
mboxq->u.mb.mbxStatus, vport->port_state);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2550 UNREG_FCFI mbxStatus error x%x "
"HBA state x%x\n",
mboxq->u.mb.mbxStatus, vport->port_state);
}
mempool_free(mboxq, phba->mbox_mem_pool);
return;
@ -6664,7 +6676,7 @@ lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2551 UNREG_FCFI mbox allocation failed"
"HBA state x%x\n", phba->pport->port_state);
return -ENOMEM;
@ -6675,7 +6687,7 @@ lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2552 Unregister FCFI command failed rc x%x "
"HBA state x%x\n",
rc, phba->pport->port_state);
@ -6699,7 +6711,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
/* Preparation for unregistering fcf */
rc = lpfc_unregister_fcf_prep(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2748 Failed to prepare for unregistering "
"HBA's FCF record: rc=%d\n", rc);
return;
@ -6735,7 +6747,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2553 lpfc_unregister_unused_fcf failed "
"to read FCF record HBA state x%x\n",
phba->pport->port_state);
@ -6757,7 +6769,7 @@ lpfc_unregister_fcf(struct lpfc_hba *phba)
/* Preparation for unregistering fcf */
rc = lpfc_unregister_fcf_prep(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2749 Failed to prepare for unregistering "
"HBA's FCF record: rc=%d\n", rc);
return;
@ -6844,9 +6856,9 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
GFP_KERNEL);
if (!conn_entry) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2566 Failed to allocate connection"
" table entry\n");
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2566 Failed to allocate connection"
" table entry\n");
return;
}
@ -6990,7 +7002,7 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
/* Check the region signature first */
if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2567 Config region 23 has bad signature\n");
return;
}
@ -6999,8 +7011,8 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
/* Check the data structure version */
if (buff[offset] != LPFC_REGION23_VERSION) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2568 Config region 23 has bad version\n");
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2568 Config region 23 has bad version\n");
return;
}
offset += 4;

File diff suppressed because it is too large Load diff

View file

@ -44,7 +44,11 @@
#define LOG_NVME_DISC 0x00200000 /* NVME Discovery/Connect events. */
#define LOG_NVME_ABTS 0x00400000 /* NVME ABTS events. */
#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
#define LOG_TRACE_EVENT 0x80000000 /* Dmp the DBG log on this err */
#define LOG_ALL_MSG 0x7fffffff /* LOG all messages */
void lpfc_dmp_dbg(struct lpfc_hba *phba);
void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...);
/* generate message by verbose log setting or severity */
#define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \
@ -65,9 +69,15 @@ do { \
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
do { \
{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) { \
if ((mask) & LOG_TRACE_EVENT) \
lpfc_dmp_dbg((vport)->phba); \
dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
fmt, (vport)->phba->brd_no, vport->vpi, ##arg); \
} else if (!(vport)->cfg_log_verbose) \
lpfc_dbg_print((vport)->phba, "%d:(%d):" fmt, \
(vport)->phba->brd_no, (vport)->vpi, ##arg); \
} \
} while (0)
#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
@ -75,8 +85,12 @@ do { \
{ uint32_t log_verbose = (phba)->pport ? \
(phba)->pport->cfg_log_verbose : \
(phba)->cfg_log_verbose; \
if (((mask) & log_verbose) || (level[1] <= '3')) \
if (((mask) & log_verbose) || (level[1] <= '3')) { \
if ((mask) & LOG_TRACE_EVENT) \
lpfc_dmp_dbg(phba); \
dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
fmt, phba->brd_no, ##arg); \
fmt, phba->brd_no, ##arg); \
} else if (!(phba)->cfg_log_verbose)\
lpfc_dbg_print(phba, "%d:" fmt, phba->brd_no, ##arg); \
} \
} while (0)

View file

@ -152,7 +152,7 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
return 1;
bad_service_param:
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0207 Device %x "
"(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
"invalid service parameters. Ignoring device.\n",
@ -301,7 +301,7 @@ lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
/* Check for CONFIG_LINK error */
if (mb->mbxStatus) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"4575 CONFIG_LINK fails pt2pt discovery: %x\n",
mb->mbxStatus);
mempool_free(login_mbox, phba->mbox_mem_pool);
@ -316,7 +316,7 @@ lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
save_iocb, ndlp, login_mbox);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"4576 PLOGI ACC fails pt2pt discovery: %x\n",
rc);
mempool_free(login_mbox, phba->mbox_mem_pool);
@ -361,7 +361,7 @@ lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
if (!piocb) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"4578 PLOGI ACC fail\n");
if (mbox)
mempool_free(mbox, phba->mbox_mem_pool);
@ -370,7 +370,7 @@ lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
if (rc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"4579 PLOGI ACC fail %x\n", rc);
if (mbox)
mempool_free(mbox, phba->mbox_mem_pool);
@ -405,7 +405,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lp = (uint32_t *) pcmd->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
if (wwn_to_u64(sp->portName.u.wwn) == 0) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0140 PLOGI Reject: invalid nname\n");
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
@ -414,7 +414,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0141 PLOGI Reject: invalid pname\n");
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
@ -481,7 +481,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
if (nlp_portwwn != 0 &&
nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0143 PLOGI recv'd from DID: x%x "
"WWPN changed: old %llx new %llx\n",
ndlp->nlp_DID,
@ -689,7 +689,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 1;
out:
if (defer_acc)
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"4577 discovery failure: %p %p %p\n",
save_iocb, link_mbox, login_mbox);
kfree(save_iocb);
@ -1097,8 +1097,8 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!pmb)
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"2796 mailbox memory allocation failed \n");
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2796 mailbox memory allocation failed \n");
else {
lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@ -1136,7 +1136,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
rpi = pmb->u.mb.un.varWords[0];
lpfc_release_rpi(phba, vport, ndlp, rpi);
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0271 Illegal State Transition: node x%x "
"event x%x, state x%x Data: x%x x%x\n",
ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
@ -1154,11 +1154,11 @@ lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* to stop it.
*/
if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0272 Illegal State Transition: node x%x "
"event x%x, state x%x Data: x%x x%x\n",
ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
ndlp->nlp_flag);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0272 Illegal State Transition: node x%x "
"event x%x, state x%x Data: x%x x%x\n",
ndlp->nlp_DID, evt, ndlp->nlp_state,
ndlp->nlp_rpi, ndlp->nlp_flag);
}
return ndlp->nlp_state;
}
@ -1378,7 +1378,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
if ((ndlp->nlp_DID != FDMI_DID) &&
(wwn_to_u64(sp->portName.u.wwn) == 0 ||
wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0142 PLOGI RSP: Invalid WWN.\n");
goto out;
}
@ -1440,7 +1440,8 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
} else {
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0133 PLOGI: no memory "
"for config_link "
"Data: x%x x%x x%x x%x\n",
@ -1465,7 +1466,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0018 PLOGI: no memory for reg_login "
"Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_state,
@ -1505,7 +1506,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
kfree(mp);
mempool_free(mbox, phba->mbox_mem_pool);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0134 PLOGI: cannot issue reg_login "
"Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_state,
@ -1513,7 +1514,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
} else {
mempool_free(mbox, phba->mbox_mem_pool);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0135 PLOGI: cannot format reg_login "
"Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_state,
@ -1524,7 +1525,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
out:
if (ndlp->nlp_DID == NameServer_DID) {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0261 Cannot Register NameServer login\n");
}
@ -1946,8 +1947,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
if (mb->mbxStatus) {
/* RegLogin failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0246 RegLogin failed Data: x%x x%x x%x x%x "
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0246 RegLogin failed Data: x%x x%x x%x x%x "
"x%x\n",
did, mb->mbxStatus, vport->port_state,
mb->un.varRegLogin.vpi,

View file

@ -498,7 +498,7 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
if (pnvme_lsreq->done)
pnvme_lsreq->done(pnvme_lsreq, status);
else
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6046 NVMEx cmpl without done call back? "
"Data %px DID %x Xri: %x status %x\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
@ -647,7 +647,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
if (rc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC | LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6045 Issue GEN REQ WQE to NPORT x%x "
"Data: x%x x%x rc x%x\n",
ndlp->nlp_DID, genwqe->iotag,
@ -693,8 +693,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint16_t ntype, nstate;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE | LOG_NVME_IOERR,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
"LS Req\n",
ndlp);
@ -705,8 +704,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
nstate = ndlp->nlp_state;
if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
(ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE | LOG_NVME_IOERR,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6088 NVMEx LS REQ: Fail DID x%06x not "
"ready for IO. Type x%x, State x%x\n",
ndlp->nlp_DID, ntype, nstate);
@ -727,9 +725,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
if (!bmp) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NVME_IOERR,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6044 NVMEx LS REQ: Could not alloc LS buf "
"for DID %x\n",
ndlp->nlp_DID);
@ -738,8 +734,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
if (!bmp->virt) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NVME_IOERR,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6042 NVMEx LS REQ: Could not alloc mbuf "
"for DID %x\n",
ndlp->nlp_DID);
@ -774,8 +769,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
pnvme_lsreq, gen_req_cmp, ndlp, 2,
LPFC_NVME_LS_TIMEOUT, 0);
if (ret != WQE_SUCCESS) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NVME_IOERR,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6052 NVMEx REQ: EXIT. issue ls wqe failed "
"lsreq x%px Status %x DID %x\n",
pnvme_lsreq, ret, ndlp->nlp_DID);
@ -853,9 +847,7 @@ __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
bool foundit = false;
if (!ndlp) {
lpfc_printf_log(phba, KERN_ERR,
LOG_NVME_DISC | LOG_NODE |
LOG_NVME_IOERR | LOG_NVME_ABTS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
"x%06x, Failing LS Req\n",
ndlp, ndlp ? ndlp->nlp_DID : 0);
@ -1099,8 +1091,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
/* Sanity check on return of outstanding command */
if (!lpfc_ncmd) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NODE | LOG_NVME_IOERR,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6071 Null lpfc_ncmd pointer. No "
"release, skip completion\n");
return;
@ -1111,7 +1102,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
if (!lpfc_ncmd->nvmeCmd) {
spin_unlock(&lpfc_ncmd->buf_lock);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
"nvmeCmd x%px\n",
lpfc_ncmd, lpfc_ncmd->nvmeCmd);
@ -1144,7 +1135,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
*/
ndlp = lpfc_ncmd->ndlp;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6062 Ignoring NVME cmpl. No ndlp\n");
goto out_err;
}
@ -1215,7 +1206,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
/* Sanity check */
if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
break;
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6081 NVME Completion Protocol Error: "
"xri %x status x%x result x%x "
"placed x%x\n",
@ -1459,7 +1450,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6058 Too many sg segments from "
"NVME Transport. Max %d, "
"nvmeIO sg_cnt %d\n",
@ -1482,7 +1473,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
j = 2;
for (i = 0; i < nseg; i++) {
if (data_sg == NULL) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6059 dptr err %d, nseg %d\n",
i, nseg);
lpfc_ncmd->seg_cnt = 0;
@ -1583,7 +1574,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
* and sg_cnt must zero.
*/
if (nCmd->payload_length != 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6063 NVME DMA Prep Err: sg_cnt %d "
"payload_length x%x\n",
nCmd->sg_cnt, nCmd->payload_length);
@ -1946,7 +1937,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6139 Driver in reset cleanup - flushing "
"NVME Req now. hba_flag x%x\n",
phba->hba_flag);
@ -1956,13 +1947,13 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
lpfc_nbuf = freqpriv->nvme_buf;
if (!lpfc_nbuf) {
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6140 NVME IO req has no matching lpfc nvme "
"io buffer. Skipping abort req.\n");
return;
} else if (!lpfc_nbuf->nvmeCmd) {
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6141 lpfc NVME IO req has no nvme_fcreq "
"io buffer. Skipping abort req.\n");
return;
@ -1980,7 +1971,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
* has not seen it yet.
*/
if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6143 NVME req mismatch: "
"lpfc_nbuf x%px nvmeCmd x%px, "
"pnvme_fcreq x%px. Skipping Abort xri x%x\n",
@ -1991,7 +1982,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Don't abort IOs no longer on the pending queue. */
if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6142 NVME IO req x%px not queued - skipping "
"abort req xri x%x\n",
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
@ -2005,7 +1996,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Outstanding abort is in progress */
if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6144 Outstanding NVME I/O Abort Request "
"still pending on nvme_fcreq x%px, "
"lpfc_ncmd %px xri x%x\n",
@ -2016,7 +2007,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
abts_buf = __lpfc_sli_get_iocbq(phba);
if (!abts_buf) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6136 No available abort wqes. Skipping "
"Abts req for nvme_fcreq x%px xri x%x\n",
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
@ -2037,7 +2028,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
spin_unlock(&lpfc_nbuf->buf_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (ret_val) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6137 Failed abts issue_wqe with status x%x "
"for nvme_fcreq x%px.\n",
ret_val, pnvme_fcreq);
@ -2310,7 +2301,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
if (pring->txcmplq_cnt)
pending += pring->txcmplq_cnt;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6176 Lport x%px Localport x%px wait "
"timed out. Pending %d. Renewing.\n",
lport, vport->localport, pending);
@ -2528,7 +2519,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp, prev_ndlp);
} else {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE,
LOG_TRACE_EVENT,
"6031 RemotePort Registration failed "
"err: %d, DID x%06x\n",
ret, ndlp->nlp_DID);
@ -2574,7 +2565,7 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
nvme_fc_rescan_remoteport(remoteport);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6172 NVME rescanned DID x%06x "
"port_state x%x\n",
ndlp->nlp_DID, remoteport->port_state);
@ -2657,7 +2648,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ret = nvme_fc_unregister_remoteport(remoteport);
if (ret != 0) {
lpfc_nlp_put(ndlp);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6167 NVME unregister failed %d "
"port_state x%x\n",
ret, remoteport->port_state);
@ -2667,7 +2658,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
input_err:
#endif
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6168 State error: lport x%px, rport x%px FCID x%06x\n",
vport->localport, ndlp->rport, ndlp->nlp_DID);
}
@ -2752,7 +2743,7 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
* dump a message. Something is wrong.
*/
if ((wait_cnt % 1000) == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6178 NVME IO not empty, "
"cnt %d\n", wait_cnt);
}

View file

@ -303,7 +303,7 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
result = wcqe->parameter;
if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6410 NVMEx LS cmpl state mismatch IO x%x: "
"%d %d\n",
axchg->oxid, axchg->state, axchg->entry_cnt);
@ -395,7 +395,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
unsigned long iflag;
if (ctxp->state == LPFC_NVME_STE_FREE) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6411 NVMET free, already free IO x%x: %d %d\n",
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
}
@ -474,7 +474,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6181 Unable to queue deferred work "
"for oxid x%x. "
"FCP Drop IO [x%x x%x x%x]\n",
@ -879,7 +879,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
"6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6412 NVMEx LS rsp state mismatch "
"oxid x%x: %d %d\n",
axchg->oxid, axchg->state, axchg->entry_cnt);
@ -891,8 +891,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
ls_rsp->rsplen);
if (nvmewqeq == NULL) {
lpfc_printf_log(phba, KERN_ERR,
LOG_NVME_DISC | LOG_NVME_IOERR | LOG_NVME_ABTS,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6150 NVMEx LS Drop Rsp x%x: Prep\n",
axchg->oxid);
rc = -ENOMEM;
@ -936,8 +935,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
return 0;
}
lpfc_printf_log(phba, KERN_ERR,
LOG_NVME_DISC | LOG_NVME_IOERR | LOG_NVME_ABTS,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6151 NVMEx LS RSP x%x: failed to transmit %d\n",
axchg->oxid, rc);
@ -1058,7 +1056,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
(ctxp->state == LPFC_NVME_STE_ABORT)) {
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6102 IO oxid x%x aborted\n",
ctxp->oxid);
rc = -ENXIO;
@ -1068,7 +1066,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
if (nvmewqeq == NULL) {
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6152 FCP Drop IO x%x: Prep\n",
ctxp->oxid);
rc = -ENXIO;
@ -1116,7 +1114,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
/* Give back resources */
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6153 FCP Drop IO x%x: Issue: %d\n",
ctxp->oxid, rc);
@ -1216,7 +1214,7 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
ctxp->flag, ctxp->oxid);
else if (ctxp->state != LPFC_NVME_STE_DONE &&
ctxp->state != LPFC_NVME_STE_ABORT)
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6413 NVMET release bad state %d %d oxid x%x\n",
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
@ -1395,7 +1393,7 @@ lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
phba = tgtp->phba;
rc = lpfc_issue_els_rscn(phba->pport, 0);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6420 NVMET subsystem change: Notification %s\n",
(rc) ? "Failed" : "Sent");
}
@ -1493,7 +1491,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
if (!phba->sli4_hba.nvmet_ctx_info) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6419 Failed allocate memory for "
"nvmet context lists\n");
return -ENOMEM;
@ -1551,7 +1549,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
if (!ctx_buf) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6404 Ran out of memory for NVMET\n");
return -ENOMEM;
}
@ -1560,7 +1558,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
GFP_KERNEL);
if (!ctx_buf->context) {
kfree(ctx_buf);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6405 Ran out of NVMET "
"context memory\n");
return -ENOMEM;
@ -1572,7 +1570,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
if (!ctx_buf->iocbq) {
kfree(ctx_buf->context);
kfree(ctx_buf);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6406 Ran out of NVMET iocb/WQEs\n");
return -ENOMEM;
}
@ -1591,7 +1589,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
kfree(ctx_buf->context);
kfree(ctx_buf);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6407 Ran out of NVMET XRIs\n");
return -ENOMEM;
}
@ -1670,7 +1668,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
error = -ENOENT;
#endif
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6025 Cannot register NVME targetport x%x: "
"portnm %llx nodenm %llx segs %d qs %d\n",
error,
@ -2114,7 +2112,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
nvmet_fc_unregister_targetport(phba->targetport);
if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6179 Unreg targetport x%px timeout "
"reached.\n", phba->targetport);
lpfc_nvmet_cleanup_io_context(phba);
@ -2187,7 +2185,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
unsigned long iflags;
if (!nvmebuf) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6159 process_rcv_fcp_req, nvmebuf is NULL, "
"oxid: x%x flg: x%x state: x%x\n",
ctxp->oxid, ctxp->flag, ctxp->state);
@ -2200,7 +2198,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
}
if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6324 IO oxid x%x aborted\n",
ctxp->oxid);
return;
@ -2264,7 +2262,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
}
ctxp->flag &= ~LPFC_NVME_TNOTIFY;
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
ctxp->oxid, rc,
atomic_read(&tgtp->rcv_fcp_cmd_in),
@ -2383,7 +2381,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctx_buf = NULL;
if (!nvmebuf || !phba->targetport) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6157 NVMET FCP Drop IO\n");
if (nvmebuf)
lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
@ -2456,7 +2454,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
if (ctxp->state != LPFC_NVME_STE_FREE) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6414 NVMET Context corrupt %d %d oxid x%x\n",
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
}
@ -2498,7 +2496,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6325 Unable to queue work for oxid x%x. "
"FCP Drop IO [x%x x%x x%x]\n",
ctxp->oxid,
@ -2535,7 +2533,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
uint8_t cqflag)
{
if (!nvmebuf) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3167 NVMET FCP Drop IO\n");
return;
}
@ -2581,7 +2579,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
union lpfc_wqe128 *wqe;
if (!lpfc_is_link_up(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6104 NVMET prep LS wqe: link err: "
"NPORT x%x oxid:x%x ste %d\n",
ctxp->sid, ctxp->oxid, ctxp->state);
@ -2591,7 +2589,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
/* Allocate buffer for command wqe */
nvmewqe = lpfc_sli_get_iocbq(phba);
if (nvmewqe == NULL) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6105 NVMET prep LS wqe: No WQE: "
"NPORT x%x oxid x%x ste %d\n",
ctxp->sid, ctxp->oxid, ctxp->state);
@ -2602,7 +2600,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6106 NVMET prep LS wqe: No ndlp: "
"NPORT x%x oxid x%x ste %d\n",
ctxp->sid, ctxp->oxid, ctxp->state);
@ -2711,7 +2709,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
int xc = 1;
if (!lpfc_is_link_up(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6107 NVMET prep FCP wqe: link err:"
"NPORT x%x oxid x%x ste %d\n",
ctxp->sid, ctxp->oxid, ctxp->state);
@ -2722,7 +2720,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6108 NVMET prep FCP wqe: no ndlp: "
"NPORT x%x oxid x%x ste %d\n",
ctxp->sid, ctxp->oxid, ctxp->state);
@ -2730,7 +2728,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
}
if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6109 NVMET prep FCP wqe: seg cnt err: "
"NPORT x%x oxid x%x ste %d cnt %d\n",
ctxp->sid, ctxp->oxid, ctxp->state,
@ -2745,7 +2743,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
/* Allocate buffer for command wqe */
nvmewqe = ctxp->ctxbuf->iocbq;
if (nvmewqe == NULL) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6110 NVMET prep FCP wqe: No "
"WQE: NPORT x%x oxid x%x ste %d\n",
ctxp->sid, ctxp->oxid, ctxp->state);
@ -2763,7 +2761,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
(ctxp->state == LPFC_NVME_STE_DATA)) {
wqe = &nvmewqe->wqe;
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6111 Wrong state NVMET FCP: %d cnt %d\n",
ctxp->state, ctxp->entry_cnt);
return NULL;
@ -3136,7 +3134,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
/* Sanity check */
if (ctxp->state != LPFC_NVME_STE_ABORT) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6112 ABTS Wrong state:%d oxid x%x\n",
ctxp->state, ctxp->oxid);
}
@ -3210,7 +3208,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
result, wcqe->word3);
if (!ctxp) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6415 NVMET LS Abort No ctx: WCQE: "
"%08x %08x %08x %08x\n",
wcqe->word0, wcqe->total_data_placed,
@ -3221,7 +3219,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
}
if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6416 NVMET LS abort cmpl state mismatch: "
"oxid x%x: %d %d\n",
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
@ -3256,7 +3254,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
if (tgtp)
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6134 Drop ABTS - wrong NDLP state x%x.\n",
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
@ -3353,7 +3351,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6160 Drop ABORT - wrong NDLP state x%x.\n",
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
@ -3369,7 +3367,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
spin_lock_irqsave(&ctxp->ctxlock, flags);
if (!ctxp->abort_wqeq) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6161 ABORT failed: No wqeqs: "
"xri: x%x\n", ctxp->oxid);
/* No failure to an ABTS request. */
@ -3396,7 +3394,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6163 Driver in reset cleanup - flushing "
"NVME Req now. hba_flag x%x oxid x%x\n",
phba->hba_flag, ctxp->oxid);
@ -3411,7 +3409,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
spin_unlock_irqrestore(&phba->hbalock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6164 Outstanding NVME I/O Abort Request "
"still pending on oxid x%x\n",
ctxp->oxid);
@ -3449,7 +3447,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_sli_release_iocbq(phba, abts_wqeq);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6166 Failed ABORT issue_wqe with status x%x "
"for oxid x%x.\n",
rc, ctxp->oxid);
@ -3474,7 +3472,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
}
if (ctxp->state == LPFC_NVME_STE_FREE) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
rc = WQE_BUSY;
@ -3512,7 +3510,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6135 Failed to Issue ABTS for oxid x%x. Status x%x "
"(%x)\n",
ctxp->oxid, rc, released);
@ -3544,7 +3542,7 @@ lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
ctxp->state = LPFC_NVME_STE_LS_ABORT;
ctxp->entry_cnt++;
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6418 NVMET LS abort state mismatch "
"IO x%x: %d %d\n",
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
@ -3558,7 +3556,7 @@ lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
/* Issue ABTS for this WQE based on iotag */
ctxp->wqeq = lpfc_sli_get_iocbq(phba);
if (!ctxp->wqeq) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6068 Abort failed: No wqeqs: "
"xri: x%x\n", xri);
/* No failure to an ABTS request. */
@ -3590,7 +3588,7 @@ lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
abts_wqeq->context2 = NULL;
abts_wqeq->context3 = NULL;
lpfc_sli_release_iocbq(phba, abts_wqeq);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6056 Failed to Issue ABTS. Status x%x\n", rc);
return 1;
}

View file

@ -867,11 +867,11 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
lpfc_cmd->seg_cnt = nseg;
if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
"9064 BLKGRD: %s: Too many sg segments from "
"dma_map_sg. Config %d, seg_cnt %d\n",
__func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9064 BLKGRD: %s: Too many sg segments"
" from dma_map_sg. Config %d, seg_cnt"
" %d\n", __func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
lpfc_cmd->seg_cnt = 0;
scsi_dma_unmap(scsi_cmnd);
@ -1061,7 +1061,8 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* inserted in middle of the IO.
*/
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR,
LOG_TRACE_EVENT,
"9076 BLKGRD: Injecting reftag error: "
"write lba x%lx + x%x oldrefTag x%x\n",
(unsigned long)lba, blockoff,
@ -1111,7 +1112,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
rc = BG_ERR_TGT | BG_ERR_CHECK;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9078 BLKGRD: Injecting reftag error: "
"write lba x%lx\n", (unsigned long)lba);
break;
@ -1132,7 +1133,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
rc = BG_ERR_INIT;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9077 BLKGRD: Injecting reftag error: "
"write lba x%lx\n", (unsigned long)lba);
break;
@ -1159,7 +1160,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
rc = BG_ERR_INIT;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9079 BLKGRD: Injecting reftag error: "
"read lba x%lx\n", (unsigned long)lba);
break;
@ -1181,7 +1182,8 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* inserted in middle of the IO.
*/
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR,
LOG_TRACE_EVENT,
"9080 BLKGRD: Injecting apptag error: "
"write lba x%lx + x%x oldappTag x%x\n",
(unsigned long)lba, blockoff,
@ -1230,7 +1232,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
rc = BG_ERR_TGT | BG_ERR_CHECK;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0813 BLKGRD: Injecting apptag error: "
"write lba x%lx\n", (unsigned long)lba);
break;
@ -1251,7 +1253,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
rc = BG_ERR_INIT;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0812 BLKGRD: Injecting apptag error: "
"write lba x%lx\n", (unsigned long)lba);
break;
@ -1278,7 +1280,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
rc = BG_ERR_INIT;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0814 BLKGRD: Injecting apptag error: "
"read lba x%lx\n", (unsigned long)lba);
break;
@ -1313,7 +1315,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
rc |= BG_ERR_TGT | BG_ERR_SWAP;
/* Signals the caller to swap CRC->CSUM */
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0817 BLKGRD: Injecting guard error: "
"write lba x%lx\n", (unsigned long)lba);
break;
@ -1335,7 +1337,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
rc = BG_ERR_INIT | BG_ERR_SWAP;
/* Signals the caller to swap CRC->CSUM */
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0816 BLKGRD: Injecting guard error: "
"write lba x%lx\n", (unsigned long)lba);
break;
@ -1363,7 +1365,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
rc = BG_ERR_INIT | BG_ERR_SWAP;
/* Signals the caller to swap CRC->CSUM */
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0818 BLKGRD: Injecting guard error: "
"read lba x%lx\n", (unsigned long)lba);
}
@ -1413,7 +1415,7 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
case SCSI_PROT_NORMAL:
default:
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
scsi_get_prot_op(sc));
ret = 1;
@ -1442,7 +1444,7 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
case SCSI_PROT_NORMAL:
default:
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
scsi_get_prot_op(sc));
ret = 1;
@ -1728,7 +1730,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
sgde = scsi_sglist(sc);
if (!sgpe || !sgde) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9020 Invalid s/g entry: data=x%px prot=x%px\n",
sgpe, sgde);
return 0;
@ -1840,7 +1842,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
return num_bde + 1;
if (!sgde) {
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9065 BLKGRD:%s Invalid data segment\n",
__func__);
return 0;
@ -1903,8 +1905,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
reftag += protgrp_blks;
} else {
/* if we're here, we have a bug */
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
"9054 BLKGRD: bug in %s\n", __func__);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9054 BLKGRD: bug in %s\n", __func__);
}
} while (!alldone);
@ -2154,7 +2156,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
sgde = scsi_sglist(sc);
if (!sgpe || !sgde) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9082 Invalid s/g entry: data=x%px prot=x%px\n",
sgpe, sgde);
return 0;
@ -2307,7 +2309,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
return num_sge + 1;
if (!sgde) {
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9086 BLKGRD:%s Invalid data segment\n",
__func__);
return 0;
@ -2412,8 +2414,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
reftag += protgrp_blks;
} else {
/* if we're here, we have a bug */
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
"9085 BLKGRD: bug in %s\n", __func__);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9085 BLKGRD: bug in %s\n", __func__);
}
} while (!alldone);
@ -2453,7 +2455,7 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
break;
default:
if (phba)
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9021 Unsupported protection op:%d\n",
op);
break;
@ -2617,7 +2619,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
scsi_dma_unmap(scsi_cmnd);
lpfc_cmd->seg_cnt = 0;
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9022 Unexpected protection group %i\n",
prot_group_type);
return 2;
@ -2661,7 +2663,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
scsi_prot_sg_count(scsi_cmnd),
scsi_cmnd->sc_data_direction);
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9023 Cannot setup S/G List for HBA"
"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
@ -3085,11 +3087,12 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
lpfc_cmd->seg_cnt = nseg;
if (!phba->cfg_xpsgl &&
lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
" %s: Too many sg segments from "
"dma_map_sg. Config %d, seg_cnt %d\n",
__func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9074 BLKGRD:"
" %s: Too many sg segments from "
"dma_map_sg. Config %d, seg_cnt %d\n",
__func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
lpfc_cmd->seg_cnt = 0;
scsi_dma_unmap(scsi_cmnd);
@ -3366,7 +3369,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
scsi_dma_unmap(scsi_cmnd);
lpfc_cmd->seg_cnt = 0;
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9083 Unexpected protection group %i\n",
prot_group_type);
return 2;
@ -3422,7 +3425,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
scsi_prot_sg_count(scsi_cmnd),
scsi_cmnd->sc_data_direction);
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9084 Cannot setup S/G List for HBA"
"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
@ -3632,17 +3635,17 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
if (resp_info & RSP_LEN_VALID) {
rsplen = be32_to_cpu(fcprsp->rspRspLen);
if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"2719 Invalid response length: "
"tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
cmnd->device->id,
cmnd->device->lun, cmnd->cmnd[0],
rsplen);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2719 Invalid response length: "
"tgt x%x lun x%llx cmnd x%x rsplen "
"x%x\n", cmnd->device->id,
cmnd->device->lun, cmnd->cmnd[0],
rsplen);
host_status = DID_ERROR;
goto out;
}
if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2757 Protocol failure detected during "
"processing of FCP I/O op: "
"tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
@ -3812,7 +3815,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
if (!cmd || !phba) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2621 IO completion: Not an active IO\n");
spin_unlock(&lpfc_cmd->buf_lock);
return;
@ -4277,7 +4280,7 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1418 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
@ -4324,7 +4327,7 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
* 0, successful
*/
int
lpfc_check_pci_resettable(const struct lpfc_hba *phba)
lpfc_check_pci_resettable(struct lpfc_hba *phba)
{
const struct pci_dev *pdev = phba->pcidev;
struct pci_dev *ptr = NULL;
@ -4528,7 +4531,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
" op:%02x str=%s without registering for"
" BlockGuard - Rejecting command\n",
@ -4887,7 +4890,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (lpfc_cmd->pCmd == cmnd) {
ret = FAILED;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0748 abort handler timed out waiting "
"for aborting I/O (xri:x%x) to complete: "
"ret %#x, ID %d, LUN %llu\n",
@ -5080,7 +5083,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
(iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
if (status != IOCB_SUCCESS ||
iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0727 TMF %s to TGT %d LUN %llu "
"failed (%d, %d) iocb_flag x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd),
@ -5195,7 +5198,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
}
if (cnt) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0724 I/O flush failure for context %s : cnt x%x\n",
((context == LPFC_CTX_LUN) ? "LUN" :
((context == LPFC_CTX_TGT) ? "TGT" :
@ -5231,7 +5234,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0798 Device Reset rdata failure: rdata x%px\n",
rdata);
return FAILED;
@ -5243,7 +5246,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_chk_tgt_mapped(vport, cmnd);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0721 Device Reset rport failure: rdata x%px\n", rdata);
return FAILED;
}
@ -5260,7 +5263,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
FCP_LUN_RESET);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0713 SCSI layer issued Device Reset (%d, %llu) "
"return x%x\n", tgt_id, lun_id, status);
@ -5302,7 +5305,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0799 Target Reset rdata failure: rdata x%px\n",
rdata);
return FAILED;
@ -5314,7 +5317,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_chk_tgt_mapped(vport, cmnd);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0722 Target Reset rport failure: rdata x%px\n", rdata);
if (pnode) {
spin_lock_irq(shost->host_lock);
@ -5339,7 +5342,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
FCP_TARGET_RESET);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0723 SCSI layer issued Target Reset (%d, %llu) "
"return x%x\n", tgt_id, lun_id, status);
@ -5420,7 +5423,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
i, 0, FCP_TARGET_RESET);
if (status != SUCCESS) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0700 Bus Reset on target %d failed\n",
i);
ret = FAILED;
@ -5437,7 +5440,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
if (status != SUCCESS)
ret = FAILED;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
return ret;
}
@ -5466,7 +5469,7 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_hba *phba = vport->phba;
int rc, ret = SUCCESS;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"3172 SCSI layer issued Host Reset Data:\n");
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
@ -5483,7 +5486,7 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
return ret;
error:
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"3323 Failed host reset\n");
lpfc_unblock_mgmt_io(phba);
return FAILED;
@ -5594,7 +5597,7 @@ lpfc_slave_alloc(struct scsi_device *sdev)
}
num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
if (num_to_alloc != num_allocated) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0708 Allocation request of %d "
"command buffers did not succeed. "
"Allocated %d buffers.\n",

File diff suppressed because it is too large Load diff

View file

@ -145,7 +145,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
if (signal_pending(current)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1830 Signal aborted mbxCmd x%x\n",
mb->mbxCommand);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@ -154,7 +154,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
mempool_free(pmb, phba->mbox_mem_pool);
return -EINTR;
} else {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1818 VPort failed init, mbxCmd x%x "
"READ_SPARM mbxStatus x%x, rc = x%x\n",
mb->mbxCommand, mb->mbxStatus, rc);
@ -190,7 +190,7 @@ lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
return 1;
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1822 Invalid %s: %02x:%02x:%02x:%02x:"
"%02x:%02x:%02x:%02x\n",
name_type,
@ -284,11 +284,11 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
}
if (time_after(jiffies, wait_time_max))
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1835 Vport discovery quiesce failed:"
" state x%x fc_flags x%x wait msecs x%x\n",
vport->port_state, vport->fc_flag,
jiffies_to_msecs(jiffies - start_time));
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1835 Vport discovery quiesce failed:"
" state x%x fc_flags x%x wait msecs x%x\n",
vport->port_state, vport->fc_flag,
jiffies_to_msecs(jiffies - start_time));
}
int
@ -305,7 +305,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
int status;
if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1808 Create VPORT failed: "
"NPIV is not enabled: SLImode:%d\n",
phba->sli_rev);
@ -315,7 +315,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
/* NPIV is not supported if HBA has NVME Target enabled */
if (phba->nvmet_support) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3189 Create VPORT failed: "
"NPIV is not supported on NVME Target\n");
rc = VPORT_INVAL;
@ -324,7 +324,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
vpi = lpfc_alloc_vpi(phba);
if (vpi == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1809 Create VPORT failed: "
"Max VPORTs (%d) exceeded\n",
phba->max_vpi);
@ -334,7 +334,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
/* Assign an unused board number */
if ((instance = lpfc_get_instance()) < 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1810 Create VPORT failed: Cannot get "
"instance number\n");
lpfc_free_vpi(phba, vpi);
@ -344,7 +344,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
vport = lpfc_create_port(phba, instance, &fc_vport->dev);
if (!vport) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1811 Create VPORT failed: vpi x%x\n", vpi);
lpfc_free_vpi(phba, vpi);
rc = VPORT_NORESOURCES;
@ -356,11 +356,11 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
if ((status = lpfc_vport_sparm(phba, vport))) {
if (status == -EINTR) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1831 Create VPORT Interrupted.\n");
rc = VPORT_ERROR;
} else {
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1813 Create VPORT failed. "
"Cannot get sparam\n");
rc = VPORT_NORESOURCES;
@ -378,7 +378,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
!lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1821 Create VPORT failed. "
"Invalid WWN format\n");
lpfc_free_vpi(phba, vpi);
@ -388,7 +388,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
}
if (!lpfc_unique_wwpn(phba, vport)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1823 Create VPORT failed. "
"Duplicate WWN on HBA\n");
lpfc_free_vpi(phba, vpi);
@ -426,7 +426,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
(pport->fc_flag & FC_VFI_REGISTERED)) {
rc = lpfc_sli4_init_vpi(vport);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1838 Failed to INIT_VPI on vpi %d "
"status %d\n", vpi, rc);
rc = VPORT_NORESOURCES;
@ -469,7 +469,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
lpfc_initial_fdisc(vport);
} else {
lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0262 No NPIV Fabric support\n");
}
} else {
@ -478,8 +478,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
rc = VPORT_OK;
out:
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1825 Vport Created.\n");
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1825 Vport Created.\n");
lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
error_out:
return rc;
@ -534,7 +534,7 @@ disable_vport(struct fc_vport *fc_vport)
}
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1826 Vport Disabled.\n");
return VPORT_OK;
}
@ -575,7 +575,7 @@ enable_vport(struct fc_vport *fc_vport)
lpfc_initial_fdisc(vport);
} else {
lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0264 No NPIV Fabric support\n");
}
} else {
@ -583,7 +583,7 @@ enable_vport(struct fc_vport *fc_vport)
}
out:
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1827 Vport Enabled.\n");
return VPORT_OK;
}
@ -609,7 +609,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
bool ns_ndlp_referenced = false;
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1812 vport_delete failed: Cannot delete "
"physical host\n");
return VPORT_ERROR;
@ -618,7 +618,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
/* If the vport is a static vport fail the deletion. */
if ((vport->vport_flag & STATIC_VPORT) &&
!(phba->pport->load_flag & FC_UNLOADING)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1837 vport_delete failed: Cannot delete "
"static vport.\n");
return VPORT_ERROR;
@ -807,7 +807,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry);
spin_unlock_irq(&phba->port_list_lock);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1828 Vport Deleted.\n");
scsi_host_put(shost);
return VPORT_OK;
@ -828,7 +828,8 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
if (port_iterator->load_flag & FC_UNLOADING)
continue;
if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT,
lpfc_printf_vlog(port_iterator, KERN_ERR,
LOG_TRACE_EVENT,
"1801 Create vport work array FAILED: "
"cannot do scsi_host_get\n");
continue;
@ -898,7 +899,8 @@ lpfc_alloc_bucket(struct lpfc_vport *vport)
GFP_ATOMIC);
if (!ndlp->lat_data)
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0287 lpfc_alloc_bucket failed to "
"allocate statistical data buffer DID "
"0x%x\n", ndlp->nlp_DID);