Commit ef918d3c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "This is a set of user visible fixes (excepting one format string
  change).

  Four of the qla2xxx fixes only affect the firmware dump path, but it's
  still important to the enterprise. The rest are various NULL pointer
  crash conditions or outright driver hangs"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: cxgb4i: libcxgbi: in error case RST tcp conn
  scsi: scsi_debug: Avoid PI being disabled when TPGS is enabled
  scsi: qla2xxx: Fix extraneous ref on sp's after adapter break
  scsi: lpfc: prevent potential null pointer dereference
  scsi: lpfc: Avoid NULL pointer dereference in lpfc_els_abort()
  scsi: lpfc: nvmet_fc: fix format string
  scsi: qla2xxx: Fix crash due to NULL pointer dereference of ctx
  scsi: qla2xxx: Fix mailbox pointer error in fwdump capture
  scsi: qla2xxx: Set bit 15 for DIAG_ECHO_TEST MBC
  scsi: qla2xxx: Modify T262 FW dump template to specify same start/end to debug customer issues
  scsi: qla2xxx: Fix crash due to mismatch mumber of Q-pair creation for Multi queue
  scsi: qla2xxx: Fix NULL pointer access due to redundant fc_host_port_name call
  scsi: qla2xxx: Fix recursive loop during target mode configuration for ISP25XX leaving system unresponsive
  scsi: bnx2fc: fix race condition in bnx2fc_get_host_stats()
  scsi: qla2xxx: don't disable a not previously enabled PCI device
parents 8f56821d e0f8e8cf
...@@ -191,6 +191,7 @@ struct bnx2fc_hba { ...@@ -191,6 +191,7 @@ struct bnx2fc_hba {
struct bnx2fc_cmd_mgr *cmd_mgr; struct bnx2fc_cmd_mgr *cmd_mgr;
spinlock_t hba_lock; spinlock_t hba_lock;
struct mutex hba_mutex; struct mutex hba_mutex;
struct mutex hba_stats_mutex;
unsigned long adapter_state; unsigned long adapter_state;
#define ADAPTER_STATE_UP 0 #define ADAPTER_STATE_UP 0
#define ADAPTER_STATE_GOING_DOWN 1 #define ADAPTER_STATE_GOING_DOWN 1
......
...@@ -663,15 +663,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) ...@@ -663,15 +663,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
if (!fw_stats) if (!fw_stats)
return NULL; return NULL;
mutex_lock(&hba->hba_stats_mutex);
bnx2fc_stats = fc_get_host_stats(shost); bnx2fc_stats = fc_get_host_stats(shost);
init_completion(&hba->stat_req_done); init_completion(&hba->stat_req_done);
if (bnx2fc_send_stat_req(hba)) if (bnx2fc_send_stat_req(hba))
return bnx2fc_stats; goto unlock_stats_mutex;
rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
if (!rc) { if (!rc) {
BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
return bnx2fc_stats; goto unlock_stats_mutex;
} }
BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
...@@ -693,6 +695,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) ...@@ -693,6 +695,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
memcpy(&hba->prev_stats, hba->stats_buffer, memcpy(&hba->prev_stats, hba->stats_buffer,
sizeof(struct fcoe_statistics_params)); sizeof(struct fcoe_statistics_params));
unlock_stats_mutex:
mutex_unlock(&hba->hba_stats_mutex);
return bnx2fc_stats; return bnx2fc_stats;
} }
...@@ -1340,6 +1345,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) ...@@ -1340,6 +1345,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
} }
spin_lock_init(&hba->hba_lock); spin_lock_init(&hba->hba_lock);
mutex_init(&hba->hba_mutex); mutex_init(&hba->hba_mutex);
mutex_init(&hba->hba_stats_mutex);
hba->cnic = cnic; hba->cnic = cnic;
......
...@@ -1595,7 +1595,6 @@ static void release_offload_resources(struct cxgbi_sock *csk) ...@@ -1595,7 +1595,6 @@ static void release_offload_resources(struct cxgbi_sock *csk)
cxgbi_sock_put(csk); cxgbi_sock_put(csk);
} }
csk->dst = NULL; csk->dst = NULL;
csk->cdev = NULL;
} }
static int init_act_open(struct cxgbi_sock *csk) static int init_act_open(struct cxgbi_sock *csk)
......
...@@ -867,7 +867,8 @@ static void need_active_close(struct cxgbi_sock *csk) ...@@ -867,7 +867,8 @@ static void need_active_close(struct cxgbi_sock *csk)
log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
csk, (csk)->state, (csk)->flags, (csk)->tid); csk, (csk)->state, (csk)->flags, (csk)->tid);
spin_lock_bh(&csk->lock); spin_lock_bh(&csk->lock);
dst_confirm(csk->dst); if (csk->dst)
dst_confirm(csk->dst);
data_lost = skb_queue_len(&csk->receive_queue); data_lost = skb_queue_len(&csk->receive_queue);
__skb_queue_purge(&csk->receive_queue); __skb_queue_purge(&csk->receive_queue);
...@@ -882,7 +883,8 @@ static void need_active_close(struct cxgbi_sock *csk) ...@@ -882,7 +883,8 @@ static void need_active_close(struct cxgbi_sock *csk)
} }
if (close_req) { if (close_req) {
if (data_lost) if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) ||
data_lost)
csk->cdev->csk_send_abort_req(csk); csk->cdev->csk_send_abort_req(csk);
else else
csk->cdev->csk_send_close_req(csk); csk->cdev->csk_send_close_req(csk);
...@@ -1186,9 +1188,10 @@ static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb) ...@@ -1186,9 +1188,10 @@ static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
skb = next; skb = next;
} }
done:
if (likely(skb_queue_len(&csk->write_queue))) if (likely(skb_queue_len(&csk->write_queue)))
cdev->csk_push_tx_frames(csk, 1); cdev->csk_push_tx_frames(csk, 1);
done:
spin_unlock_bh(&csk->lock); spin_unlock_bh(&csk->lock);
return copied; return copied;
...@@ -1568,9 +1571,12 @@ static inline int read_pdu_skb(struct iscsi_conn *conn, ...@@ -1568,9 +1571,12 @@ static inline int read_pdu_skb(struct iscsi_conn *conn,
} }
} }
static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) static int
skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn,
struct sk_buff *skb)
{ {
struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
int err;
log_debug(1 << CXGBI_DBG_PDU_RX, log_debug(1 << CXGBI_DBG_PDU_RX,
"conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
...@@ -1608,7 +1614,16 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) ...@@ -1608,7 +1614,16 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
} }
} }
return read_pdu_skb(conn, skb, 0, 0); err = read_pdu_skb(conn, skb, 0, 0);
if (likely(err >= 0)) {
struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data;
u8 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
if (unlikely(opcode == ISCSI_OP_LOGOUT_RSP))
cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD);
}
return err;
} }
static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
...@@ -1713,7 +1728,7 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) ...@@ -1713,7 +1728,7 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
cxgbi_skcb_rx_pdulen(skb)); cxgbi_skcb_rx_pdulen(skb));
if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
err = skb_read_pdu_bhs(conn, skb); err = skb_read_pdu_bhs(csk, conn, skb);
if (err < 0) { if (err < 0) {
pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
"f 0x%lx, plen %u.\n", "f 0x%lx, plen %u.\n",
...@@ -1731,7 +1746,7 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) ...@@ -1731,7 +1746,7 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
cxgbi_skcb_flags(skb), cxgbi_skcb_flags(skb),
cxgbi_skcb_rx_pdulen(skb)); cxgbi_skcb_rx_pdulen(skb));
} else { } else {
err = skb_read_pdu_bhs(conn, skb); err = skb_read_pdu_bhs(csk, conn, skb);
if (err < 0) { if (err < 0) {
pr_err("bhs, csk 0x%p, skb 0x%p,%u, " pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
"f 0x%lx, plen %u.\n", "f 0x%lx, plen %u.\n",
......
...@@ -187,6 +187,7 @@ enum cxgbi_sock_flags { ...@@ -187,6 +187,7 @@ enum cxgbi_sock_flags {
CTPF_HAS_ATID, /* reserved atid */ CTPF_HAS_ATID, /* reserved atid */
CTPF_HAS_TID, /* reserved hw tid */ CTPF_HAS_TID, /* reserved hw tid */
CTPF_OFFLOAD_DOWN, /* offload function off */ CTPF_OFFLOAD_DOWN, /* offload function off */
CTPF_LOGOUT_RSP_RCVD, /* received logout response */
}; };
struct cxgbi_skb_rx_cb { struct cxgbi_skb_rx_cb {
......
...@@ -127,7 +127,7 @@ int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *, ...@@ -127,7 +127,7 @@ int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
struct serv_parm *, uint32_t, int); struct serv_parm *, uint32_t, int);
int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); void lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
void lpfc_more_plogi(struct lpfc_vport *); void lpfc_more_plogi(struct lpfc_vport *);
void lpfc_more_adisc(struct lpfc_vport *); void lpfc_more_adisc(struct lpfc_vport *);
void lpfc_end_rscn(struct lpfc_vport *); void lpfc_end_rscn(struct lpfc_vport *);
......
...@@ -978,9 +978,10 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -978,9 +978,10 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp, did, ndlp->nlp_fc4_type, ndlp, did, ndlp->nlp_fc4_type,
FC_TYPE_FCP, FC_TYPE_NVME); FC_TYPE_FCP, FC_TYPE_NVME);
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
lpfc_issue_els_prli(vport, ndlp, 0);
} }
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
lpfc_issue_els_prli(vport, ndlp, 0);
} else } else
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"3065 GFT_ID failed x%08x\n", irsp->ulpStatus); "3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
......
...@@ -206,7 +206,7 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -206,7 +206,7 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* associated with a LPFC_NODELIST entry. This * associated with a LPFC_NODELIST entry. This
* routine effectively results in a "software abort". * routine effectively results in a "software abort".
*/ */
int void
lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{ {
LIST_HEAD(abort_list); LIST_HEAD(abort_list);
...@@ -215,6 +215,10 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) ...@@ -215,6 +215,10 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
pring = lpfc_phba_elsring(phba); pring = lpfc_phba_elsring(phba);
/* In case of error recovery path, we might have a NULL pring here */
if (!pring)
return;
/* Abort outstanding I/O on NPort <nlp_DID> */ /* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
"2819 Abort outstanding I/O on NPort x%x " "2819 Abort outstanding I/O on NPort x%x "
...@@ -273,7 +277,6 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) ...@@ -273,7 +277,6 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
return 0;
} }
static int static int
......
...@@ -799,8 +799,8 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, ...@@ -799,8 +799,8 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
} }
spin_unlock_irqrestore(&ctxp->ctxlock, flags); spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid, lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
ctxp->state, 0); ctxp->state, aborting);
atomic_inc(&lpfc_nvmep->xmt_fcp_release); atomic_inc(&lpfc_nvmep->xmt_fcp_release);
......
...@@ -730,6 +730,8 @@ qla2x00_process_loopback(struct bsg_job *bsg_job) ...@@ -730,6 +730,8 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
return -EIO; return -EIO;
} }
memset(&elreq, 0, sizeof(elreq));
elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -795,10 +797,9 @@ qla2x00_process_loopback(struct bsg_job *bsg_job) ...@@ -795,10 +797,9 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
if (atomic_read(&vha->loop_state) == LOOP_READY && if (atomic_read(&vha->loop_state) == LOOP_READY &&
(ha->current_topology == ISP_CFG_F || (ha->current_topology == ISP_CFG_F ||
((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) && elreq.options == EXTERNAL_LOOPBACK) {
elreq.options == EXTERNAL_LOOPBACK) {
type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
ql_dbg(ql_dbg_user, vha, 0x701e, ql_dbg(ql_dbg_user, vha, 0x701e,
"BSG request type: %s.\n", type); "BSG request type: %s.\n", type);
......
...@@ -1131,7 +1131,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) ...@@ -1131,7 +1131,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Mailbox registers. */ /* Mailbox registers. */
mbx_reg = &reg->mailbox0; mbx_reg = &reg->mailbox0;
for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
/* Transfer sequence registers. */ /* Transfer sequence registers. */
...@@ -2090,7 +2090,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) ...@@ -2090,7 +2090,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Mailbox registers. */ /* Mailbox registers. */
mbx_reg = &reg->mailbox0; mbx_reg = &reg->mailbox0;
for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
/* Transfer sequence registers. */ /* Transfer sequence registers. */
......
...@@ -3425,6 +3425,7 @@ struct qla_hw_data { ...@@ -3425,6 +3425,7 @@ struct qla_hw_data {
uint8_t max_req_queues; uint8_t max_req_queues;
uint8_t max_rsp_queues; uint8_t max_rsp_queues;
uint8_t max_qpairs; uint8_t max_qpairs;
uint8_t num_qpairs;
struct qla_qpair *base_qpair; struct qla_qpair *base_qpair;
struct qla_npiv_entry *npiv_info; struct qla_npiv_entry *npiv_info;
uint16_t nvram_npiv_size; uint16_t nvram_npiv_size;
......
...@@ -7543,12 +7543,13 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v ...@@ -7543,12 +7543,13 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
/* Assign available que pair id */ /* Assign available que pair id */
mutex_lock(&ha->mq_lock); mutex_lock(&ha->mq_lock);
qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
if (qpair_id >= ha->max_qpairs) { if (ha->num_qpairs >= ha->max_qpairs) {
mutex_unlock(&ha->mq_lock); mutex_unlock(&ha->mq_lock);
ql_log(ql_log_warn, vha, 0x0183, ql_log(ql_log_warn, vha, 0x0183,
"No resources to create additional q pair.\n"); "No resources to create additional q pair.\n");
goto fail_qid_map; goto fail_qid_map;
} }
ha->num_qpairs++;
set_bit(qpair_id, ha->qpair_qid_map); set_bit(qpair_id, ha->qpair_qid_map);
ha->queue_pair_map[qpair_id] = qpair; ha->queue_pair_map[qpair_id] = qpair;
qpair->id = qpair_id; qpair->id = qpair_id;
...@@ -7635,6 +7636,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v ...@@ -7635,6 +7636,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
fail_msix: fail_msix:
ha->queue_pair_map[qpair_id] = NULL; ha->queue_pair_map[qpair_id] = NULL;
clear_bit(qpair_id, ha->qpair_qid_map); clear_bit(qpair_id, ha->qpair_qid_map);
ha->num_qpairs--;
mutex_unlock(&ha->mq_lock); mutex_unlock(&ha->mq_lock);
fail_qid_map: fail_qid_map:
kfree(qpair); kfree(qpair);
...@@ -7660,6 +7662,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) ...@@ -7660,6 +7662,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
mutex_lock(&ha->mq_lock); mutex_lock(&ha->mq_lock);
ha->queue_pair_map[qpair->id] = NULL; ha->queue_pair_map[qpair->id] = NULL;
clear_bit(qpair->id, ha->qpair_qid_map); clear_bit(qpair->id, ha->qpair_qid_map);
ha->num_qpairs--;
list_del(&qpair->qp_list_elem); list_del(&qpair->qp_list_elem);
if (list_empty(&vha->qp_list)) if (list_empty(&vha->qp_list))
vha->flags.qpairs_available = 0; vha->flags.qpairs_available = 0;
......
...@@ -129,28 +129,16 @@ qla2x00_clear_loop_id(fc_port_t *fcport) { ...@@ -129,28 +129,16 @@ qla2x00_clear_loop_id(fc_port_t *fcport) {
} }
static inline void static inline void
qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp, qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
struct qla_tgt_cmd *tc)
{ {
struct dsd_dma *dsd_ptr, *tdsd_ptr; struct dsd_dma *dsd, *tdsd;
struct crc_context *ctx;
if (sp)
ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
else if (tc)
ctx = (struct crc_context *)tc->ctx;
else {
BUG();
return;
}
/* clean up allocated prev pool */ /* clean up allocated prev pool */
list_for_each_entry_safe(dsd_ptr, tdsd_ptr, list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
&ctx->dsd_list, list) { dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, dsd->dsd_list_dma);
dsd_ptr->dsd_list_dma); list_del(&dsd->list);
list_del(&dsd_ptr->list); kfree(dsd);
kfree(dsd_ptr);
} }
INIT_LIST_HEAD(&ctx->dsd_list); INIT_LIST_HEAD(&ctx->dsd_list);
} }
......
...@@ -3282,7 +3282,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ...@@ -3282,7 +3282,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
} }
/* Enable MSI-X vector for response queue update for queue 0 */ /* Enable MSI-X vector for response queue update for queue 0 */
if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
if (ha->msixbase && ha->mqiobase && if (ha->msixbase && ha->mqiobase &&
(ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
ql2xmqsupport)) ql2xmqsupport))
......
...@@ -3676,15 +3676,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, ...@@ -3676,15 +3676,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
qlt_update_host_map(vha, id); qlt_update_host_map(vha, id);
} }
fc_host_port_name(vha->host) =
wwn_to_u64(vha->port_name);
if (qla_ini_mode_enabled(vha))
ql_dbg(ql_dbg_mbx, vha, 0x1018,
"FA-WWN portname %016llx (%x)\n",
fc_host_port_name(vha->host),
rptid_entry->vp_status);
set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
} else { } else {
...@@ -4821,9 +4812,9 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, ...@@ -4821,9 +4812,9 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
memset(mcp->mb, 0 , sizeof(mcp->mb)); memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ /* BIT_6 specifies 64bit address */
mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
if (IS_CNA_CAPABLE(ha)) { if (IS_CNA_CAPABLE(ha)) {
mcp->mb[1] |= BIT_15;
mcp->mb[2] = vha->fcoe_fcf_idx; mcp->mb[2] = vha->fcoe_fcf_idx;
} }
mcp->mb[16] = LSW(mreq->rcv_dma); mcp->mb[16] = LSW(mreq->rcv_dma);
......
...@@ -630,29 +630,34 @@ qla2x00_sp_free_dma(void *ptr) ...@@ -630,29 +630,34 @@ qla2x00_sp_free_dma(void *ptr)
sp->flags &= ~SRB_CRC_PROT_DMA_VALID; sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
} }
if (!ctx)
goto end;
if (sp->flags & SRB_CRC_CTX_DSD_VALID) { if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
/* List assured to be having elements */ /* List assured to be having elements */
qla2x00_clean_dsd_pool(ha, sp, NULL); qla2x00_clean_dsd_pool(ha, ctx);
sp->flags &= ~SRB_CRC_CTX_DSD_VALID; sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
} }
if (sp->flags & SRB_CRC_CTX_DMA_VALID) { if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
dma_pool_free(ha->dl_dma_pool, ctx, struct crc_context *ctx0 = ctx;
((struct crc_context *)ctx)->crc_ctx_dma);
dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
sp->flags &= ~SRB_CRC_CTX_DMA_VALID; sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
} }
if (sp->flags & SRB_FCP_CMND_DMA_VALID) { if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; struct ct6_dsd *ctx1 = ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma); ctx1->fcp_cmnd_dma);
list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
ha->gbl_dsd_avail += ctx1->dsd_use_cnt; ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
mempool_free(ctx1, ha->ctx_mempool); mempool_free(ctx1, ha->ctx_mempool);
} }
end:
CMD_SP(cmd) = NULL; CMD_SP(cmd) = NULL;
qla2x00_rel_sp(sp); qla2x00_rel_sp(sp);
} }
...@@ -699,21 +704,24 @@ qla2xxx_qpair_sp_free_dma(void *ptr) ...@@ -699,21 +704,24 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
sp->flags &= ~SRB_CRC_PROT_DMA_VALID; sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
} }
if (!ctx)
goto end;
if (sp->flags & SRB_CRC_CTX_DSD_VALID) { if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
/* List assured to be having elements */ /* List assured to be having elements */
qla2x00_clean_dsd_pool(ha, sp, NULL); qla2x00_clean_dsd_pool(ha, ctx);
sp->flags &= ~SRB_CRC_CTX_DSD_VALID; sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
} }
if (sp->flags & SRB_CRC_CTX_DMA_VALID) { if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
dma_pool_free(ha->dl_dma_pool, ctx, struct crc_context *ctx0 = ctx;
((struct crc_context *)ctx)->crc_ctx_dma);
dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma);
sp->flags &= ~SRB_CRC_CTX_DMA_VALID; sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
} }
if (sp->flags & SRB_FCP_CMND_DMA_VALID) { if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; struct ct6_dsd *ctx1 = ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma); ctx1->fcp_cmnd_dma);
list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
...@@ -721,7 +729,7 @@ qla2xxx_qpair_sp_free_dma(void *ptr) ...@@ -721,7 +729,7 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
ha->gbl_dsd_avail += ctx1->dsd_use_cnt; ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
mempool_free(ctx1, ha->ctx_mempool); mempool_free(ctx1, ha->ctx_mempool);
} }
end:
CMD_SP(cmd) = NULL; CMD_SP(cmd) = NULL;
qla2xxx_rel_qpair_sp(sp->qpair, sp); qla2xxx_rel_qpair_sp(sp->qpair, sp);
} }
...@@ -1632,7 +1640,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) ...@@ -1632,7 +1640,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
void void
qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
{ {
int que, cnt; int que, cnt, status;
unsigned long flags; unsigned long flags;
srb_t *sp; srb_t *sp;
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
...@@ -1662,8 +1670,12 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) ...@@ -1662,8 +1670,12 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
*/ */
sp_get(sp); sp_get(sp);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2xxx_eh_abort(GET_CMD_SP(sp)); status = qla2xxx_eh_abort(GET_CMD_SP(sp));
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
/* Get rid of extra reference if immediate exit
* from ql2xxx_eh_abort */
if (status == FAILED && (qla2x00_isp_reg_stat(ha)))
atomic_dec(&sp->ref_count);
} }
req->outstanding_cmds[cnt] = NULL;