Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 315938 Details for
Bug 461330
Update realtime kernel's lpfc version to 8.2.0.29
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
Patch to update Emulex lpfc driver 8.2.2 to 8.2.0.29
lpfc-8.2.2-to-8.2.0.29.diff (text/plain), 630.77 KB, created by
Jamie Wellnitz
on 2008-09-05 22:02:46 UTC
(
hide
)
Description:
Patch to update Emulex lpfc driver 8.2.2 to 8.2.0.29
Filename:
MIME Type:
Creator:
Jamie Wellnitz
Created:
2008-09-05 22:02:46 UTC
Size:
630.77 KB
patch
obsolete
>diff -urpN a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c >--- a/drivers/scsi/lpfc/lpfc_attr.c 2008-09-05 17:47:41.627244000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_attr.c 2008-09-05 17:47:49.638880000 -0400 >@@ -1,10 +1,10 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * >- * Portions Copyright (C) 2004-2005 Christoph Hellwig * >+ * Portions Copyright (C) 2004-2008 Christoph Hellwig * > * * > * This program is free software; you can redistribute it and/or * > * modify it under the terms of version 2 of the GNU General * >@@ -40,11 +40,25 @@ > #include "lpfc_compat.h" > #include "lpfc_crtn.h" > #include "lpfc_vport.h" >+#include "lpfc_auth_access.h" > > #define LPFC_DEF_DEVLOSS_TMO 30 > #define LPFC_MIN_DEVLOSS_TMO 1 > #define LPFC_MAX_DEVLOSS_TMO 255 > >+#define LPFC_MAX_LINK_SPEED 8 >+#define LPFC_LINK_SPEED_BITMAP 0x00000117 >+#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8" >+ >+extern struct bin_attribute sysfs_menlo_attr; >+ >+/* >+ * Write key size should be multiple of 4. If write key is changed >+ * make sure that library write key is also changed. >+ */ >+#define LPFC_REG_WRITE_KEY_SIZE 4 >+#define LPFC_REG_WRITE_KEY "EMLX" >+ > static void > lpfc_jedec_to_ascii(int incr, char hdw[]) > { >@@ -68,6 +82,12 @@ lpfc_drvr_version_show(struct class_devi > } > > static ssize_t >+management_version_show(struct class_device *cdev, char *buf) >+{ >+ return snprintf(buf, PAGE_SIZE, DFC_API_VERSION "\n"); >+} >+ >+static ssize_t > lpfc_info_show(struct class_device *cdev, char *buf) > { > struct Scsi_Host *host = class_to_shost(cdev); >@@ -86,6 +106,15 @@ lpfc_serialnum_show(struct class_device > } > > static ssize_t >+lpfc_temp_sensor_show(struct class_device *cdev, char *buf) >+{ >+ struct Scsi_Host *shost = class_to_shost(cdev); >+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; >+ struct lpfc_hba *phba = vport->phba; >+ return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support); >+} >+ >+static ssize_t > lpfc_modeldesc_show(struct class_device *cdev, char *buf) > { > struct Scsi_Host *shost = class_to_shost(cdev); >@@ -116,6 +145,16 @@ lpfc_programtype_show(struct class_devic > } > > static ssize_t >+lpfc_mlomgmt_show(struct class_device *cdev, char *buf) >+{ >+ struct Scsi_Host *shost = class_to_shost(cdev); >+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; >+ struct lpfc_hba *phba = vport->phba; >+ >+ return snprintf(buf, PAGE_SIZE, "%d\n", >+ (phba->sli.sli_flag & LPFC_MENLO_MAINT)); >+} >+static ssize_t > lpfc_vportnum_show(struct class_device *cdev, char *buf) > { > struct Scsi_Host *shost = class_to_shost(cdev); >@@ -178,12 +217,9 @@ lpfc_state_show(struct class_device *cde > case LPFC_LINK_UP: > case LPFC_CLEAR_LA: > case LPFC_HBA_READY: >- len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - \n"); >+ len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - "); > > switch (vport->port_state) { >- len += snprintf(buf + len, PAGE_SIZE-len, >- "initializing\n"); >- break; > case LPFC_LOCAL_CFG_LINK: > len += snprintf(buf + len, PAGE_SIZE-len, > "Configuring Link\n"); >@@ -211,8 +247,10 @@ lpfc_state_show(struct class_device *cde > "Unknown\n"); > break; > } >- >- if (phba->fc_topology == TOPOLOGY_LOOP) { >+ if (phba->sli.sli_flag & LPFC_MENLO_MAINT) >+ len += snprintf(buf + len, PAGE_SIZE-len, >+ " Menlo Maint Mode\n"); >+ else if (phba->fc_topology == TOPOLOGY_LOOP) { > if (vport->fc_flag & FC_PUBLIC_LOOP) > len += snprintf(buf + len, PAGE_SIZE-len, > " Public Loop\n"); >@@ -252,8 +290,7 @@ lpfc_issue_lip(struct Scsi_Host *shost) > int mbxstatus = MBXERR_ERROR; > > if ((vport->fc_flag & FC_OFFLINE_MODE) || >- (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) || >- (vport->port_state != LPFC_VPORT_READY)) >+ (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) > return -EPERM; > > pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); >@@ -305,12 +342,14 @@ lpfc_do_offline(struct lpfc_hba *phba, u > > psli = &phba->sli; > >+ /* Wait a little for things to settle down, but not >+ * long enough for dev loss timeout to expire. >+ */ > for (i = 0; i < psli->num_rings; i++) { > pring = &psli->ring[i]; >- /* The linkdown event takes 30 seconds to timeout. */ > while (pring->txcmplq_cnt) { > msleep(10); >- if (cnt++ > 3000) { >+ if (cnt++ > 500) { /* 5 secs */ > lpfc_printf_log(phba, > KERN_WARNING, LOG_INIT, > "0466 Outstanding IO when " >@@ -330,20 +369,22 @@ lpfc_do_offline(struct lpfc_hba *phba, u > return 0; > } > >-static int >+int > lpfc_selective_reset(struct lpfc_hba *phba) > { > struct completion online_compl; > int status = 0; > >+ if (!phba->cfg_enable_hba_reset) >+ return -EIO; >+ > status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); > > if (status != 0) > return status; > > init_completion(&online_compl); >- lpfc_workq_post_event(phba, &status, &online_compl, >- LPFC_EVT_ONLINE); >+ lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); > wait_for_completion(&online_compl); > > if (status != 0) >@@ -409,6 +450,8 @@ lpfc_board_mode_store(struct class_devic > struct completion online_compl; > int status=0; > >+ if (!phba->cfg_enable_hba_reset) >+ return -EACCES; > init_completion(&online_compl); > > if(strncmp(buf, "online", sizeof("online") - 1) == 0) { >@@ -652,6 +695,134 @@ lpfc_poll_store(struct class_device *cde > return strlen(buf); > } > >+static ssize_t >+lpfc_auth_state_show(struct class_device *cdev, char *buf) >+{ >+ struct Scsi_Host *shost = class_to_shost(cdev); >+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; >+ switch (vport->auth.auth_state) { >+ case LPFC_AUTH_UNKNOWN: >+ if (vport->auth.auth_msg_state == LPFC_AUTH_NEGOTIATE || >+ vport->auth.auth_msg_state == LPFC_DHCHAP_CHALLENGE || >+ vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY || >+ vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS_REPLY) >+ return snprintf(buf, PAGE_SIZE, "Authenticating\n"); >+ else >+ return snprintf(buf, PAGE_SIZE, "Not Authenticated\n"); >+ case LPFC_AUTH_FAIL: >+ return snprintf(buf, PAGE_SIZE, "Failed\n"); >+ case LPFC_AUTH_SUCCESS: >+ if (vport->auth.auth_msg_state == LPFC_AUTH_NEGOTIATE || >+ vport->auth.auth_msg_state == LPFC_DHCHAP_CHALLENGE || >+ vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY || >+ vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS_REPLY) >+ return snprintf(buf, PAGE_SIZE, "Authenticating\n"); >+ else if (vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS) >+ return snprintf(buf, PAGE_SIZE, "Authenticated\n"); >+ } >+ return snprintf(buf, PAGE_SIZE, "Unknown\n"); >+} >+ >+static ssize_t >+lpfc_auth_dir_show(struct class_device *cdev, char *buf) >+{ >+ struct Scsi_Host *shost = class_to_shost(cdev); >+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; >+ if (!vport->cfg_enable_auth || >+ vport->auth.auth_state != LPFC_AUTH_SUCCESS) >+ return snprintf(buf, PAGE_SIZE, "Unknown\n"); >+ if (vport->auth.direction == AUTH_DIRECTION_LOCAL) >+ return snprintf(buf, PAGE_SIZE, "Local Authenticated\n"); >+ else if (vport->auth.direction == AUTH_DIRECTION_REMOTE) >+ return snprintf(buf, PAGE_SIZE, "Remote Authenticated\n"); >+ else if (vport->auth.direction == AUTH_DIRECTION_BIDI) >+ return snprintf(buf, PAGE_SIZE, "Bidi Authentication\n"); >+ return snprintf(buf, PAGE_SIZE, "Unknown\n"); >+} >+ >+static ssize_t >+lpfc_auth_protocol_show(struct class_device *cdev, char *buf) >+{ >+ struct Scsi_Host *shost = class_to_shost(cdev); >+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; >+ if (vport->cfg_enable_auth && >+ vport->auth.auth_state == LPFC_AUTH_SUCCESS) >+ return snprintf(buf, PAGE_SIZE, "1 (DH-CHAP)\n"); >+ else >+ return snprintf(buf, PAGE_SIZE, "Unknown\n"); >+} >+ >+static ssize_t >+lpfc_auth_dhgroup_show(struct class_device *cdev, char *buf) >+{ >+ struct Scsi_Host *shost = class_to_shost(cdev); >+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; >+ if (!vport->cfg_enable_auth || >+ vport->auth.auth_state != LPFC_AUTH_SUCCESS) >+ return snprintf(buf, PAGE_SIZE, "Unknown\n"); >+ switch (vport->auth.group_id) { >+ case DH_GROUP_NULL: >+ return snprintf(buf, PAGE_SIZE, "0 (NULL)\n"); >+ case DH_GROUP_1024: >+ return snprintf(buf, PAGE_SIZE, "1 (1024)\n"); >+ case DH_GROUP_1280: >+ return snprintf(buf, PAGE_SIZE, "2 (1280)\n"); >+ case DH_GROUP_1536: >+ return snprintf(buf, PAGE_SIZE, "3 (1536)\n"); >+ case DH_GROUP_2048: >+ return snprintf(buf, PAGE_SIZE, "4 (2048)\n"); >+ } >+ return snprintf(buf, PAGE_SIZE, "%d (Unrecognized)\n", >+ vport->auth.group_id); >+} >+ >+static ssize_t >+lpfc_auth_hash_show(struct class_device *cdev, char *buf) >+{ >+ struct Scsi_Host *shost = class_to_shost(cdev); >+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; >+ if (!vport->cfg_enable_auth || >+ vport->auth.auth_state != LPFC_AUTH_SUCCESS) >+ return snprintf(buf, PAGE_SIZE, "Unknown\n"); >+ switch (vport->auth.hash_id) { >+ case FC_SP_HASH_MD5: >+ return snprintf(buf, PAGE_SIZE, "5 (MD5)\n"); >+ case FC_SP_HASH_SHA1: >+ return snprintf(buf, PAGE_SIZE, "6 (SHA1)\n"); >+ } >+ return snprintf(buf, PAGE_SIZE, "%d (Unrecognized)\n", >+ vport->auth.hash_id); >+} >+static ssize_t >+lpfc_auth_last_show(struct class_device *cdev, char *buf) >+{ >+ struct Scsi_Host *shost = class_to_shost(cdev); >+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; >+ struct timeval last_time; >+ if (!vport->cfg_enable_auth || vport->auth.last_auth == 0) >+ return snprintf(buf, PAGE_SIZE, "%d\n", -1); >+ jiffies_to_timeval((jiffies - vport->auth.last_auth), &last_time); >+ return snprintf(buf, PAGE_SIZE, "%ld\n", last_time.tv_sec); >+} >+ >+static ssize_t >+lpfc_auth_next_show(struct class_device *cdev, char *buf) >+{ >+ struct Scsi_Host *shost = class_to_shost(cdev); >+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; >+ unsigned long next_jiff; >+ struct timeval next_time; >+ if (!vport->cfg_enable_auth || >+ vport->auth.last_auth == 0 || >+ vport->auth.reauth_interval == 0) >+ return snprintf(buf, PAGE_SIZE, "%d\n", -1); >+ /* calculate the amount of time left until next auth */ >+ next_jiff = (msecs_to_jiffies(vport->auth.reauth_interval * 60000) + >+ vport->auth.last_auth) - jiffies; >+ jiffies_to_timeval(next_jiff, &next_time); >+ return snprintf(buf, PAGE_SIZE, "%ld\n", next_time.tv_sec); >+} >+ > #define lpfc_param_show(attr) \ > static ssize_t \ > lpfc_##attr##_show(struct class_device *cdev, char *buf) \ >@@ -756,7 +927,7 @@ lpfc_##attr##_init(struct lpfc_vport *vp > return 0;\ > }\ > lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ >- "0449 lpfc_"#attr" attribute cannot be set to %d, "\ >+ "0423 lpfc_"#attr" attribute cannot be set to %d, "\ > "allowed range is ["#minval", "#maxval"]\n", val); \ > vport->cfg_##attr = default;\ > return -EINVAL;\ >@@ -771,7 +942,7 @@ lpfc_##attr##_set(struct lpfc_vport *vpo > return 0;\ > }\ > lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ >- "0450 lpfc_"#attr" attribute cannot be set to %d, "\ >+ "0424 lpfc_"#attr" attribute cannot be set to %d, "\ > "allowed range is ["#minval", "#maxval"]\n", val); \ > return -EINVAL;\ > } >@@ -895,9 +1066,12 @@ static CLASS_DEVICE_ATTR(option_rom_vers > lpfc_option_rom_version_show, NULL); > static CLASS_DEVICE_ATTR(num_discovered_ports, S_IRUGO, > lpfc_num_discovered_ports_show, NULL); >+static CLASS_DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL); > static CLASS_DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); > static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, > NULL); >+static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show, >+ NULL); > static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, > lpfc_board_mode_show, lpfc_board_mode_store); > static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); >@@ -908,6 +1082,40 @@ static CLASS_DEVICE_ATTR(used_rpi, S_IRU > static CLASS_DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL); > static CLASS_DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); > static CLASS_DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); >+static CLASS_DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, >+ NULL); >+static CLASS_DEVICE_ATTR(auth_state, S_IRUGO, lpfc_auth_state_show, NULL); >+static CLASS_DEVICE_ATTR(auth_dir, S_IRUGO, lpfc_auth_dir_show, NULL); >+static CLASS_DEVICE_ATTR(auth_protocol, S_IRUGO, lpfc_auth_protocol_show, NULL); >+static CLASS_DEVICE_ATTR(auth_dhgroup, S_IRUGO, lpfc_auth_dhgroup_show, NULL); >+static CLASS_DEVICE_ATTR(auth_hash, S_IRUGO, lpfc_auth_hash_show, NULL); >+static CLASS_DEVICE_ATTR(auth_last, S_IRUGO, lpfc_auth_last_show, NULL); >+static CLASS_DEVICE_ATTR(auth_next, S_IRUGO, lpfc_auth_next_show, NULL); >+ >+static int >+lpfc_parse_wwn(const char *ns, uint8_t *nm) >+{ >+ unsigned int i, j; >+ memset(nm, 0, 8); >+ >+ /* Validate and store the new name */ >+ for (i=0, j=0; i < 16; i++) { >+ if ((*ns >= 'a') && (*ns <= 'f')) >+ j = ((j << 4) | ((*ns++ -'a') + 10)); >+ else if ((*ns >= 'A') && (*ns <= 'F')) >+ j = ((j << 4) | ((*ns++ -'A') + 10)); >+ else if ((*ns >= '0') && (*ns <= '9')) >+ j = ((j << 4) | (*ns++ -'0')); >+ else >+ return -EINVAL; >+ if (i % 2) { >+ nm[i/2] = j & 0xff; >+ j = 0; >+ } >+ } >+ >+ return 0; >+} > > > static char *lpfc_soft_wwn_key = "C99G71SL8032A"; >@@ -971,6 +1179,14 @@ lpfc_soft_wwpn_store(struct class_device > unsigned int i, j, cnt=count; > u8 wwpn[8]; > >+ if (!phba->cfg_enable_hba_reset) >+ return -EACCES; >+ spin_lock_irq(&phba->hbalock); >+ if (phba->over_temp_state == HBA_OVER_TEMP) { >+ spin_unlock_irq(&phba->hbalock); >+ return -EACCES; >+ } >+ spin_unlock_irq(&phba->hbalock); > /* count may include a LF at end of string */ > if (buf[cnt-1] == '\n') > cnt--; >@@ -1102,7 +1318,13 @@ MODULE_PARM_DESC(lpfc_sli_mode, "SLI mod > " 2 - select SLI-2 even on SLI-3 capable HBAs," > " 3 - select SLI-3"); > >-LPFC_ATTR_R(enable_npiv, 0, 0, 1, "Enable NPIV functionality"); >+int lpfc_enable_npiv = 0; >+module_param(lpfc_enable_npiv, int, 0); >+MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality"); >+lpfc_param_show(enable_npiv); >+lpfc_param_init(enable_npiv, 0, 0, 1); >+static CLASS_DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, >+ lpfc_enable_npiv_show, NULL); > > /* > # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear >@@ -1131,7 +1353,7 @@ lpfc_nodev_tmo_init(struct lpfc_vport *v > vport->cfg_nodev_tmo = vport->cfg_devloss_tmo; > if (val != LPFC_DEF_DEVLOSS_TMO) > lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, >- "0402 Ignoring nodev_tmo module " >+ "0407 Ignoring nodev_tmo module " > "parameter because devloss_tmo is " > "set.\n"); > return 0; >@@ -1159,7 +1381,7 @@ lpfc_update_rport_devloss_tmo(struct lpf > shost = lpfc_shost_from_vport(vport); > spin_lock_irq(shost->host_lock); > list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) >- if (ndlp->rport) >+ if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport) > ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; > spin_unlock_irq(shost->host_lock); > } >@@ -1186,12 +1408,91 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vp > val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); > return -EINVAL; > } >- > lpfc_vport_param_store(nodev_tmo) >- > static CLASS_DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR, > lpfc_nodev_tmo_show, lpfc_nodev_tmo_store); > >+static ssize_t >+lpfc_authenticate (struct class_device *cdev, const char *buf, size_t count) >+{ >+ struct Scsi_Host *shost = class_to_shost(cdev); >+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; >+ struct lpfc_hba *phba = vport->phba; >+ struct lpfc_nodelist *ndlp; >+ int status; >+ struct lpfc_name wwpn; >+ >+ if (lpfc_parse_wwn(buf, wwpn.u.wwn)) >+ return -EINVAL; >+ >+ if (vport->port_state == LPFC_VPORT_FAILED) { >+ lpfc_issue_lip(shost); >+ return strlen(buf); >+ } >+ if ((vport->fc_flag & FC_OFFLINE_MODE) || >+ (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) || >+ (!vport->cfg_enable_auth)) >+ return -EPERM; >+ >+ /* If vport already in the middle of authentication do not restart */ >+ if ((vport->auth.auth_msg_state == LPFC_AUTH_NEGOTIATE) || >+ (vport->auth.auth_msg_state == LPFC_DHCHAP_CHALLENGE) || >+ (vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY)) >+ return -EAGAIN; >+ >+ if (wwn_to_u64(wwpn.u.wwn) == AUTH_FABRIC_WWN) >+ ndlp = lpfc_findnode_did(vport, Fabric_DID); >+ else >+ ndlp = lpfc_findnode_wwnn(vport, &wwpn); >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) >+ return -EPERM; >+ status = lpfc_start_node_authentication(ndlp); >+ if (status) >+ return status; >+ return strlen(buf); >+} >+static CLASS_DEVICE_ATTR(lpfc_authenticate, S_IRUGO | S_IWUSR, >+ NULL, lpfc_authenticate); >+ >+static ssize_t >+lpfc_update_auth_config (struct class_device *cdev, const char *buf, >+ size_t count) >+{ >+ struct Scsi_Host *shost = class_to_shost(cdev); >+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; >+ struct lpfc_hba *phba = vport->phba; >+ struct lpfc_nodelist *ndlp; >+ struct lpfc_name wwpn; >+ int status; >+ >+ if (lpfc_parse_wwn(buf, wwpn.u.wwn)) >+ return -EINVAL; >+ >+ if ((vport->fc_flag & FC_OFFLINE_MODE) || >+ (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) || >+ (!vport->cfg_enable_auth)) >+ return -EPERM; >+ >+ /* If vport already in the middle of authentication do not restart */ >+ if ((vport->auth.auth_msg_state == LPFC_AUTH_NEGOTIATE) || >+ (vport->auth.auth_msg_state == LPFC_DHCHAP_CHALLENGE) || >+ (vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY)) >+ return -EAGAIN; >+ >+ if (wwn_to_u64(wwpn.u.wwn) == AUTH_FABRIC_WWN) >+ ndlp = lpfc_findnode_did(vport, Fabric_DID); >+ else >+ ndlp = lpfc_findnode_wwnn(vport, &wwpn); >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) >+ return -EPERM; >+ status = lpfc_get_auth_config(ndlp, &wwpn); >+ if (status) >+ return -EPERM; >+ return strlen(buf); >+} >+static CLASS_DEVICE_ATTR(lpfc_update_auth_config, S_IRUGO | S_IWUSR, >+ NULL, lpfc_update_auth_config); >+ > /* > # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that > # disappear until the timer expires. Value range is [0,255]. Default >@@ -1248,6 +1549,13 @@ LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, > "Verbose logging bit-mask"); > > /* >+# lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters >+# objects that have been registered with the nameserver after login. >+*/ >+LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1, >+ "Deregister nameserver objects before LOGO"); >+ >+/* > # lun_queue_depth: This parameter is used to limit the number of outstanding > # commands per FCP LUN. Value range is [1,128]. Default value is 30. > */ >@@ -1299,7 +1607,7 @@ lpfc_restrict_login_init(struct lpfc_vpo > { > if (val < 0 || val > 1) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, >- "0449 lpfc_restrict_login attribute cannot " >+ "0422 lpfc_restrict_login attribute cannot " > "be set to %d, allowed range is [0, 1]\n", > val); > vport->cfg_restrict_login = 1; >@@ -1318,7 +1626,7 @@ lpfc_restrict_login_set(struct lpfc_vpor > { > if (val < 0 || val > 1) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, >- "0450 lpfc_restrict_login attribute cannot " >+ "0425 lpfc_restrict_login attribute cannot " > "be set to %d, allowed range is [0, 1]\n", > val); > vport->cfg_restrict_login = 1; >@@ -1369,7 +1677,33 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1, > # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. > # Default value is 0. > */ >-LPFC_ATTR_RW(topology, 0, 0, 6, "Select Fibre Channel topology"); >+static int >+lpfc_topology_set(struct lpfc_hba *phba, int val) >+{ >+ int err; >+ uint32_t prev_val; >+ if (val >= 0 && val <= 6) { >+ prev_val = phba->cfg_topology; >+ phba->cfg_topology = val; >+ err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); >+ if (err) >+ phba->cfg_topology = prev_val; >+ return err; >+ } >+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, >+ "%d:0467 lpfc_topology attribute cannot be set to %d, " >+ "allowed range is [0, 6]\n", >+ phba->brd_no, val); >+ return -EINVAL; >+} >+static int lpfc_topology = 0; >+module_param(lpfc_topology, int, 0); >+MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology"); >+lpfc_param_show(topology) >+lpfc_param_init(topology, 0, 0, 6) >+lpfc_param_store(topology) >+static CLASS_DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, >+ lpfc_topology_show, lpfc_topology_store); > > /* > # lpfc_link_speed: Link speed selection for initializing the Fibre Channel >@@ -1381,7 +1715,59 @@ LPFC_ATTR_RW(topology, 0, 0, 6, "Select > # 8 = 8 Gigabaud > # Value range is [0,8]. Default value is 0. > */ >-LPFC_ATTR_R(link_speed, 0, 0, 8, "Select link speed"); >+static int >+lpfc_link_speed_set(struct lpfc_hba *phba, int val) >+{ >+ int err; >+ uint32_t prev_val; >+ >+ if (((val == LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || >+ ((val == LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || >+ ((val == LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || >+ ((val == LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || >+ ((val == LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb))) >+ return -EINVAL; >+ >+ if ((val >= 0 && val <= 8) >+ && (LPFC_LINK_SPEED_BITMAP & (1 << val))) { >+ prev_val = phba->cfg_link_speed; >+ phba->cfg_link_speed = val; >+ err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); >+ if (err) >+ phba->cfg_link_speed = prev_val; >+ return err; >+ } >+ >+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, >+ "%d:0469 lpfc_link_speed attribute cannot be set to %d, " >+ "allowed range is [0, 8]\n", >+ phba->brd_no, val); >+ return -EINVAL; >+} >+ >+static int lpfc_link_speed = 0; >+module_param(lpfc_link_speed, int, 0); >+MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); >+lpfc_param_show(link_speed) >+static int >+lpfc_link_speed_init(struct lpfc_hba *phba, int val) >+{ >+ if ((val >= 0 && val <= LPFC_MAX_LINK_SPEED) >+ && (LPFC_LINK_SPEED_BITMAP & (1 << val))) { >+ phba->cfg_link_speed = val; >+ return 0; >+ } >+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, >+ "0405 lpfc_link_speed attribute cannot " >+ "be set to %d, allowed values are " >+ "["LPFC_LINK_SPEED_STRING"]\n", val); >+ phba->cfg_link_speed = 0; >+ return -EINVAL; >+} >+ >+lpfc_param_store(link_speed) >+static CLASS_DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR, >+ lpfc_link_speed_show, lpfc_link_speed_store); > > /* > # lpfc_fcp_class: Determines FC class to use for the FCP protocol. >@@ -1475,10 +1861,127 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, > # support this feature > # 0 = MSI disabled (default) > # 1 = MSI enabled >-# Value range is [0,1]. Default value is 0. >+# 2 = MSI-X enabled >+# Value range is [0,2]. Default value is 0. > */ >-LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible"); >+LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " >+ "MSI-X (2), if possible"); > >+/* >+# lpfc_enable_auth: controls FC Authentication. >+# 0 = Authentication OFF >+# 1 = Authentication ON >+# Value range [0,1]. Default value is 0. >+*/ >+static int lpfc_enable_auth = 0; >+module_param(lpfc_enable_auth, int, 0); >+MODULE_PARM_DESC(lpfc_enable_auth, "Enable FC Authentication"); >+lpfc_vport_param_show(enable_auth); >+lpfc_vport_param_init(enable_auth, 0, 0, 1); >+static int >+lpfc_enable_auth_set(struct lpfc_vport *vport, int val) >+{ >+ if (val == vport->cfg_enable_auth) >+ return 0; >+ if (val == 0) { >+ spin_lock_irq(&fc_security_user_lock); >+ list_del(&vport->sc_users); >+ spin_unlock_irq(&fc_security_user_lock); >+ vport->cfg_enable_auth = val; >+ lpfc_fc_queue_security_work(vport, >+ &vport->sc_offline_work); >+ return 0; >+ } else if (val == 1) { >+ spin_lock_irq(&fc_security_user_lock); >+ list_add_tail(&vport->sc_users, &fc_security_user_list); >+ spin_unlock_irq(&fc_security_user_lock); >+ vport->cfg_enable_auth = val; >+ lpfc_fc_queue_security_work(vport, >+ &vport->sc_online_work); >+ return 0; >+ } >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, >+ "0426 lpfc_enable_auth attribute cannot be set to %d, " >+ "allowed range is [0, 1]\n", val); >+ return -EINVAL; >+} >+lpfc_vport_param_store(enable_auth); >+static CLASS_DEVICE_ATTR(lpfc_enable_auth, S_IRUGO | S_IWUSR, >+ lpfc_enable_auth_show, lpfc_enable_auth_store); >+ >+/* >+# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. >+# 0 = HBA resets disabled >+# 1 = HBA resets enabled (default) >+# Value range is [0,1]. Default value is 1. >+*/ >+LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver."); >+ >+/* >+# lpfc_enable_hba_heartbeat: Enable HBA heartbeat timer.. >+# 0 = HBA Heartbeat disabled >+# 1 = HBA Heartbeat enabled (default) >+# Value range is [0,1]. Default value is 1. >+*/ >+LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); >+ >+/* >+ * lpfc_sg_seg_cnt: Initial Maximum DMA Segment Count >+ * This value can be set to values between 64 and 256. The default value is >+ * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer >+ * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). >+ */ >+LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, >+ LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); >+ >+/* >+# lpfc_pci_max_read: Maximum DMA read byte count. This parameter can have >+# values 512, 1024, 2048, 4096. Default value is 2048. >+*/ >+static int lpfc_pci_max_read = 2048; >+module_param(lpfc_pci_max_read, int, 0); >+MODULE_PARM_DESC(lpfc_pci_max_read, >+ "Maximum DMA read byte count. Allowed values:" >+ " 512,1024,2048,4096." ); >+static ssize_t >+lpfc_pci_max_read_show(struct class_device *cdev, char *buf) >+{ >+ struct Scsi_Host *shost = class_to_shost(cdev); >+ struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; >+ uint32_t val = 0; >+ val = phba->cfg_pci_max_read; >+ return snprintf(buf, PAGE_SIZE, "%d\n", val); >+} >+ >+static int >+lpfc_pci_max_read_init(struct lpfc_hba *phba, int val) >+{ >+ phba->cfg_pci_max_read = 2048; >+ if ((val == 512) || (val == 1024) || (val == 2048) >+ || (val == 4096)) >+ phba->cfg_pci_max_read = val; >+ return 0; >+} >+ >+static int >+lpfc_pci_max_read_set(struct lpfc_hba *phba, int val) >+{ >+ uint32_t prev_val; >+ int ret; >+ >+ prev_val = phba->cfg_pci_max_read; >+ phba->cfg_pci_max_read = val; >+ if ((ret = lpfc_sli_set_dma_length(phba, 0))) { >+ phba->cfg_pci_max_read = prev_val; >+ return ret; >+ } else >+ return 0; >+} >+ >+lpfc_param_store(pci_max_read) >+ >+static CLASS_DEVICE_ATTR(lpfc_pci_max_read, S_IRUGO | S_IWUSR, >+ lpfc_pci_max_read_show, lpfc_pci_max_read_store); > > > struct class_device_attribute *lpfc_hba_attrs[] = { >@@ -1493,10 +1996,13 @@ struct class_device_attribute *lpfc_hba_ > &class_device_attr_option_rom_version, > &class_device_attr_state, > &class_device_attr_num_discovered_ports, >+ &class_device_attr_menlo_mgmt_mode, > &class_device_attr_lpfc_drvr_version, >+ &class_device_attr_lpfc_temp_sensor, > &class_device_attr_lpfc_log_verbose, > &class_device_attr_lpfc_lun_queue_depth, > &class_device_attr_lpfc_hba_queue_depth, >+ &class_device_attr_lpfc_pci_max_read, > &class_device_attr_lpfc_peer_port_login, > &class_device_attr_lpfc_nodev_tmo, > &class_device_attr_lpfc_devloss_tmo, >@@ -1515,6 +2021,7 @@ struct class_device_attribute *lpfc_hba_ > &class_device_attr_lpfc_max_luns, > &class_device_attr_lpfc_enable_npiv, > &class_device_attr_nport_evt_cnt, >+ &class_device_attr_management_version, > &class_device_attr_board_mode, > &class_device_attr_max_vpi, > &class_device_attr_used_vpi, >@@ -1527,18 +2034,25 @@ struct class_device_attribute *lpfc_hba_ > &class_device_attr_lpfc_poll, > &class_device_attr_lpfc_poll_tmo, > &class_device_attr_lpfc_use_msi, >+ &class_device_attr_lpfc_enable_auth, >+ &class_device_attr_lpfc_authenticate, >+ &class_device_attr_lpfc_update_auth_config, > &class_device_attr_lpfc_soft_wwnn, > &class_device_attr_lpfc_soft_wwpn, > &class_device_attr_lpfc_soft_wwn_enable, >+ &class_device_attr_lpfc_enable_hba_reset, >+ &class_device_attr_lpfc_enable_hba_heartbeat, >+ &class_device_attr_lpfc_sg_seg_cnt, > NULL, > }; > >+ > struct class_device_attribute *lpfc_vport_attrs[] = { > &class_device_attr_info, > &class_device_attr_state, > &class_device_attr_num_discovered_ports, > &class_device_attr_lpfc_drvr_version, >- >+ &class_device_attr_lpfc_enable_auth, > &class_device_attr_lpfc_log_verbose, > &class_device_attr_lpfc_lun_queue_depth, > &class_device_attr_lpfc_nodev_tmo, >@@ -1551,7 +2065,9 @@ struct class_device_attribute *lpfc_vpor > &class_device_attr_lpfc_fdmi_on, > &class_device_attr_lpfc_max_luns, > &class_device_attr_nport_evt_cnt, >+ &class_device_attr_management_version, > &class_device_attr_npiv_info, >+ &class_device_attr_lpfc_enable_da_id, > NULL, > }; > >@@ -1566,21 +2082,23 @@ sysfs_ctlreg_write(struct kobject *kobj, > struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; > struct lpfc_hba *phba = vport->phba; > >- if ((off + count) > FF_REG_AREA_SIZE) >+ if ((off + count) > FF_REG_AREA_SIZE + LPFC_REG_WRITE_KEY_SIZE) > return -ERANGE; > >- if (count == 0) return 0; >+ if (count <= LPFC_REG_WRITE_KEY_SIZE) >+ return 0; > > if (off % 4 || count % 4 || (unsigned long)buf % 4) > return -EINVAL; > >- if (!(vport->fc_flag & FC_OFFLINE_MODE)) { >- return -EPERM; >- } >+ /* This is to protect HBA registers from accidental writes. */ >+ if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE)) >+ return -EINVAL; > > spin_lock_irq(&phba->hbalock); >- for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) >- writel(*((uint32_t *)(buf + buf_off)), >+ for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE; >+ buf_off += sizeof(uint32_t)) >+ writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)), > phba->ctrl_regs_memmap_p + off + buf_off); > > spin_unlock_irq(&phba->hbalock); >@@ -1627,24 +2145,215 @@ static struct bin_attribute sysfs_ctlreg > .attr = { > .name = "ctlreg", > .mode = S_IRUSR | S_IWUSR, >+ .owner = THIS_MODULE, > }, > .size = 256, > .read = sysfs_ctlreg_read, > .write = sysfs_ctlreg_write, > }; > >+static struct lpfc_sysfs_mbox * >+lpfc_get_sysfs_mbox(struct lpfc_hba *phba, uint8_t create) >+{ >+ struct lpfc_sysfs_mbox *sysfs_mbox; >+ pid_t pid; >+ >+ pid = current->pid; >+ >+ spin_lock_irq(&phba->hbalock); >+ list_for_each_entry(sysfs_mbox, &phba->sysfs_mbox_list, list) { >+ if (sysfs_mbox->pid == pid) { >+ spin_unlock_irq(&phba->hbalock); >+ return sysfs_mbox; >+ } >+ } >+ if (!create) { >+ spin_unlock_irq(&phba->hbalock); >+ return NULL; >+ } >+ spin_unlock_irq(&phba->hbalock); >+ sysfs_mbox = kzalloc(sizeof(struct lpfc_sysfs_mbox), >+ GFP_KERNEL); >+ if (!sysfs_mbox) >+ return NULL; >+ sysfs_mbox->state = SMBOX_IDLE; >+ sysfs_mbox->pid = pid; >+ spin_lock_irq(&phba->hbalock); >+ list_add_tail(&sysfs_mbox->list, &phba->sysfs_mbox_list); >+ >+ spin_unlock_irq(&phba->hbalock); >+ return sysfs_mbox; >+ >+} > > static void >-sysfs_mbox_idle(struct lpfc_hba *phba) >+sysfs_mbox_idle(struct lpfc_hba *phba, >+ struct lpfc_sysfs_mbox *sysfs_mbox) > { >- phba->sysfs_mbox.state = SMBOX_IDLE; >- phba->sysfs_mbox.offset = 0; >- >- if (phba->sysfs_mbox.mbox) { >- mempool_free(phba->sysfs_mbox.mbox, >+ list_del_init(&sysfs_mbox->list); >+ if (sysfs_mbox->mbox) { >+ mempool_free(sysfs_mbox->mbox, > phba->mbox_mem_pool); >- phba->sysfs_mbox.mbox = NULL; > } >+ >+ if (sysfs_mbox->mbext) >+ kfree(sysfs_mbox->mbext); >+ >+ /* If txmit buffer allocated free txmit buffer */ >+ if (sysfs_mbox->txmit_buff) { >+ if (sysfs_mbox->txmit_buff->virt) >+ __lpfc_mbuf_free(phba, >+ sysfs_mbox->txmit_buff->virt, >+ sysfs_mbox->txmit_buff->phys); >+ kfree(sysfs_mbox->txmit_buff); >+ } >+ >+ /* If rcv buffer allocated free txmit buffer */ >+ if (sysfs_mbox->rcv_buff) { >+ if (sysfs_mbox->rcv_buff->virt) >+ __lpfc_mbuf_free(phba, >+ sysfs_mbox->rcv_buff->virt, >+ sysfs_mbox->rcv_buff->phys); >+ kfree(sysfs_mbox->rcv_buff); >+ } >+ >+ kfree(sysfs_mbox); >+} >+ >+static size_t >+lpfc_syfs_mbox_copy_rcv_buff(struct lpfc_hba *phba, >+ struct lpfc_sysfs_mbox *sysfs_mbox, >+ char *buf, loff_t off, size_t count) >+{ >+ uint32_t size; >+ spin_lock_irq(&phba->hbalock); >+ if (!sysfs_mbox->mbox) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -EAGAIN; >+ } >+ >+ if (sysfs_mbox->mbox->mb.mbxCommand == MBX_READ_EVENT_LOG) >+ size = sysfs_mbox->mbox->mb.un. >+ varRdEventLog.rcv_bde64.tus.f.bdeSize; >+ else >+ size = sysfs_mbox->mbox->mb.un. >+ varBIUdiag.un.s2.rcv_bde64.tus.f.bdeSize; >+ >+ >+ if ((count + off) > size) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ERANGE; >+ } >+ if (count > LPFC_BPL_SIZE) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ERANGE; >+ } >+ if (sysfs_mbox->extoff != off) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -EAGAIN; >+ } >+ >+ memcpy(buf, (uint8_t *) sysfs_mbox->rcv_buff->virt + off, count); >+ sysfs_mbox->extoff = off + count; >+ >+ if (sysfs_mbox->extoff >= size) >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ >+ spin_unlock_irq(&phba->hbalock); >+ >+ return count; >+} >+ >+static size_t >+lpfc_syfs_mbox_copy_extdata(struct lpfc_hba *phba, >+ struct lpfc_sysfs_mbox * sysfs_mbox, >+ char *buf, loff_t off, size_t count) >+{ >+ uint32_t size; >+ >+ spin_lock_irq(&phba->hbalock); >+ if (!sysfs_mbox->mbox) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -EAGAIN; >+ } >+ >+ size = sysfs_mbox->mbox_data.out_ext_wlen * sizeof(uint32_t); >+ >+ if ((count + off) > size) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ERANGE; >+ } >+ >+ if (size > MAILBOX_EXT_SIZE) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ERANGE; >+ } >+ >+ if (sysfs_mbox->extoff != off) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -EAGAIN; >+ } >+ >+ memcpy(buf, (uint8_t *) sysfs_mbox->mbext + off, count); >+ sysfs_mbox->extoff = off + count; >+ >+ if (sysfs_mbox->extoff >= size) >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ >+ spin_unlock_irq(&phba->hbalock); >+ >+ return count; >+} >+ >+static size_t >+lpfc_syfs_mbox_copy_txmit_buff(struct lpfc_hba *phba, >+ struct lpfc_sysfs_mbox *sysfs_mbox, >+ char *buf, loff_t off, size_t count) >+{ >+ uint32_t size; >+ spin_lock_irq(&phba->hbalock); >+ if (!sysfs_mbox->mbox || >+ (sysfs_mbox->offset != sizeof(struct lpfc_sysfs_mbox_data))) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -EAGAIN; >+ } >+ >+ size = sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.xmit_bde64. >+ tus.f.bdeSize; >+ >+ if ((count + off) > size) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ERANGE; >+ } >+ >+ if (size > LPFC_BPL_SIZE) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ERANGE; >+ } >+ >+ if (sysfs_mbox->extoff != off) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -EAGAIN; >+ } >+ >+ memcpy((uint8_t *) sysfs_mbox->txmit_buff->virt + off, buf, count); >+ sysfs_mbox->extoff = off + count; >+ >+ spin_unlock_irq(&phba->hbalock); >+ >+ return count; > } > > static ssize_t >@@ -1657,9 +2366,9 @@ sysfs_mbox_write(struct kobject *kobj, s > struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; > struct lpfc_hba *phba = vport->phba; > struct lpfcMboxq *mbox = NULL; >- >- if ((count + off) > MAILBOX_CMD_SIZE) >- return -ERANGE; >+ struct lpfc_sysfs_mbox *sysfs_mbox; >+ uint8_t *ext; >+ uint32_t size; > > if (off % 4 || count % 4 || (unsigned long)buf % 4) > return -EINVAL; >@@ -1668,34 +2377,234 @@ sysfs_mbox_write(struct kobject *kobj, s > return 0; > > if (off == 0) { >+ sysfs_mbox = lpfc_get_sysfs_mbox(phba, 1); >+ if (sysfs_mbox == NULL) >+ return -ENOMEM; >+ /* >+ * If sysfs expect the reading of buffer and >+ * app doesnot know how to do it, use a different >+ * context. >+ */ >+ if (sysfs_mbox->state == SMBOX_READING_BUFF || >+ sysfs_mbox->state == SMBOX_READING_MBEXT) { >+ spin_lock_irq(&phba->hbalock); >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ sysfs_mbox = lpfc_get_sysfs_mbox(phba, 1); >+ if (sysfs_mbox == NULL) >+ return -ENOMEM; >+ } >+ } else { >+ sysfs_mbox = lpfc_get_sysfs_mbox(phba, 0); >+ if (sysfs_mbox == NULL) >+ return -EAGAIN; >+ } >+ >+ spin_lock_irq(&phba->hbalock); >+ >+ if (sysfs_mbox->state == SMBOX_WRITING_MBEXT) { >+ if (!sysfs_mbox->mbox || >+ (sysfs_mbox->offset != >+ sizeof(struct lpfc_sysfs_mbox_data))) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -EAGAIN; >+ } >+ >+ size = sysfs_mbox->mbox_data.in_ext_wlen * sizeof(uint32_t); >+ >+ if ((count + sysfs_mbox->extoff) > size) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ERANGE; >+ } >+ >+ if (size > MAILBOX_EXT_SIZE) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ERANGE; >+ } >+ >+ if (!sysfs_mbox->mbext) { >+ spin_unlock_irq(&phba->hbalock); >+ >+ ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL); >+ if (!ext) { >+ spin_lock_irq(&phba->hbalock); >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ENOMEM; >+ } >+ >+ spin_lock_irq(&phba->hbalock); >+ sysfs_mbox->mbext = ext; >+ } >+ >+ if (sysfs_mbox->extoff != off) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -EAGAIN; >+ } >+ >+ memcpy((uint8_t *) sysfs_mbox->mbext + off, buf, count); >+ sysfs_mbox->extoff = off + count; >+ >+ spin_unlock_irq(&phba->hbalock); >+ >+ return count; >+ } >+ >+ spin_unlock_irq(&phba->hbalock); >+ >+ if (sysfs_mbox->state == SMBOX_WRITING_BUFF) >+ return lpfc_syfs_mbox_copy_txmit_buff(phba, >+ sysfs_mbox, buf, off, count); >+ >+ if ((count + off) > sizeof(struct lpfc_sysfs_mbox_data)) { >+ spin_lock_irq(&phba->hbalock); >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ERANGE; >+ } >+ >+ if (off == 0) { > mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); >- if (!mbox) >+ if (!mbox) { >+ spin_lock_irq(&phba->hbalock); >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); > return -ENOMEM; >+ } > memset(mbox, 0, sizeof (LPFC_MBOXQ_t)); > } > > spin_lock_irq(&phba->hbalock); > > if (off == 0) { >- if (phba->sysfs_mbox.mbox) >+ if (sysfs_mbox->mbox) > mempool_free(mbox, phba->mbox_mem_pool); > else >- phba->sysfs_mbox.mbox = mbox; >- phba->sysfs_mbox.state = SMBOX_WRITING; >+ sysfs_mbox->mbox = mbox; >+ sysfs_mbox->state = SMBOX_WRITING; > } else { >- if (phba->sysfs_mbox.state != SMBOX_WRITING || >- phba->sysfs_mbox.offset != off || >- phba->sysfs_mbox.mbox == NULL) { >- sysfs_mbox_idle(phba); >+ if (sysfs_mbox->state != SMBOX_WRITING || >+ sysfs_mbox->offset != off || >+ sysfs_mbox->mbox == NULL) { >+ sysfs_mbox_idle(phba, sysfs_mbox); > spin_unlock_irq(&phba->hbalock); > return -EAGAIN; > } > } > >- memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off, >+ memcpy((uint8_t *) & sysfs_mbox->mbox_data + off, > buf, count); > >- phba->sysfs_mbox.offset = off + count; >+ sysfs_mbox->offset = off + count; >+ >+ if (sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) { >+ memcpy((uint8_t *) & sysfs_mbox->mbox->mb, >+ (uint8_t *) &sysfs_mbox->mbox_data.mbox, >+ sizeof(MAILBOX_t)); >+ } >+ >+ if ((sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) && >+ (sysfs_mbox->mbox_data.in_ext_wlen || >+ sysfs_mbox->mbox_data.out_ext_wlen)) { >+ >+ if (!sysfs_mbox->mbext) { >+ spin_unlock_irq(&phba->hbalock); >+ >+ ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL); >+ if (!ext) { >+ spin_lock_irq(&phba->hbalock); >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ENOMEM; >+ } >+ >+ spin_lock_irq(&phba->hbalock); >+ sysfs_mbox->mbext = ext; >+ } >+ } >+ >+ if ((sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) && >+ (sysfs_mbox->mbox_data.in_ext_wlen)) { >+ sysfs_mbox->state = SMBOX_WRITING_MBEXT; >+ } >+ >+ if ((sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) && >+ (sysfs_mbox->mbox->mb.mbxCommand == MBX_RUN_BIU_DIAG64)) { >+ sysfs_mbox->state = SMBOX_WRITING_BUFF; >+ spin_unlock_irq(&phba->hbalock); >+ >+ /* Allocate txmit buffer */ >+ sysfs_mbox->txmit_buff = >+ kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); >+ if (!sysfs_mbox->txmit_buff) { >+ spin_lock_irq(&phba->hbalock); >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ENOMEM; >+ } >+ INIT_LIST_HEAD(&sysfs_mbox->txmit_buff->list); >+ sysfs_mbox->txmit_buff->virt = >+ lpfc_mbuf_alloc(phba, 0, >+ &(sysfs_mbox->txmit_buff->phys)); >+ if (!sysfs_mbox->txmit_buff->virt) { >+ spin_lock_irq(&phba->hbalock); >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ENOMEM; >+ } >+ >+ /* Allocate rcv buffer */ >+ sysfs_mbox->rcv_buff = >+ kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); >+ if (!sysfs_mbox->rcv_buff) { >+ spin_lock_irq(&phba->hbalock); >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ENOMEM; >+ } >+ INIT_LIST_HEAD(&sysfs_mbox->rcv_buff->list); >+ sysfs_mbox->rcv_buff->virt = >+ lpfc_mbuf_alloc(phba, 0, >+ &(sysfs_mbox->rcv_buff->phys)); >+ if (!sysfs_mbox->rcv_buff->virt) { >+ spin_lock_irq(&phba->hbalock); >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ENOMEM; >+ } >+ return count; >+ } >+ if ((sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) && >+ (sysfs_mbox->mbox->mb.mbxCommand == MBX_READ_EVENT_LOG)) { >+ sysfs_mbox->state = SMBOX_WRITING; >+ spin_unlock_irq(&phba->hbalock); >+ >+ >+ /* Allocate rcv buffer */ >+ sysfs_mbox->rcv_buff = >+ kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); >+ if (!sysfs_mbox->rcv_buff) { >+ spin_lock_irq(&phba->hbalock); >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ENOMEM; >+ } >+ INIT_LIST_HEAD(&sysfs_mbox->rcv_buff->list); >+ sysfs_mbox->rcv_buff->virt = >+ lpfc_mbuf_alloc(phba, 0, >+ &(sysfs_mbox->rcv_buff->phys)); >+ if (!sysfs_mbox->rcv_buff->virt) { >+ spin_lock_irq(&phba->hbalock); >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -ENOMEM; >+ } >+ return count; >+ } > > spin_unlock_irq(&phba->hbalock); > >@@ -1712,6 +2621,42 @@ sysfs_mbox_read(struct kobject *kobj, st > struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; > struct lpfc_hba *phba = vport->phba; > int rc; >+ int wait_4_menlo_maint = 0; >+ struct lpfc_sysfs_mbox *sysfs_mbox; >+ ssize_t ret; >+ sysfs_mbox = lpfc_get_sysfs_mbox(phba, 0); >+ >+ if (!sysfs_mbox) >+ return -EPERM; >+ >+ /* >+ * If sysfs expect the writing of buffer and >+ * app doesnot know how to do it, fail the mailbox >+ * command. >+ */ >+ if ((sysfs_mbox->state == SMBOX_WRITING_BUFF) && >+ (sysfs_mbox->extoff == 0)) { >+ spin_lock_irq(&phba->hbalock); >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -EINVAL; >+ } >+ if (sysfs_mbox->state == SMBOX_READING_BUFF) { >+ ret = lpfc_syfs_mbox_copy_rcv_buff(phba, sysfs_mbox, >+ buf, off, count); >+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, >+ "1254 mbox: cmd 0x%x, 0x%x ret %x\n", >+ sysfs_mbox->mbox->mb.mbxCommand, >+ sysfs_mbox->mbox->mb.un.varWords[0], >+ (uint32_t)ret); >+ return ret; >+ } >+ >+ if (sysfs_mbox->state == SMBOX_READING_MBEXT) { >+ ret = lpfc_syfs_mbox_copy_extdata(phba, sysfs_mbox, >+ buf, off, count); >+ return ret; >+ } > > if (off > MAILBOX_CMD_SIZE) > return -ERANGE; >@@ -1727,13 +2672,20 @@ sysfs_mbox_read(struct kobject *kobj, st > > spin_lock_irq(&phba->hbalock); > >+ if (phba->over_temp_state == HBA_OVER_TEMP) { >+ sysfs_mbox_idle(phba, sysfs_mbox); >+ spin_unlock_irq(&phba->hbalock); >+ return -EACCES; >+ } >+ > if (off == 0 && >- phba->sysfs_mbox.state == SMBOX_WRITING && >- phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { >+ ((sysfs_mbox->state == SMBOX_WRITING) || >+ (sysfs_mbox->state == SMBOX_WRITING_MBEXT) || >+ (sysfs_mbox->state == SMBOX_WRITING_BUFF) ) && >+ sysfs_mbox->offset >= 2 * sizeof(uint32_t)) { > >- switch (phba->sysfs_mbox.mbox->mb.mbxCommand) { >+ switch (sysfs_mbox->mbox->mb.mbxCommand) { > /* Offline only */ >- case MBX_WRITE_NV: > case MBX_INIT_LINK: > case MBX_DOWN_LINK: > case MBX_CONFIG_LINK: >@@ -1744,18 +2696,17 @@ sysfs_mbox_read(struct kobject *kobj, st > case MBX_DUMP_CONTEXT: > case MBX_RUN_DIAGS: > case MBX_RESTART: >- case MBX_FLASH_WR_ULA: > case MBX_SET_MASK: >- case MBX_SET_SLIM: >- case MBX_SET_DEBUG: > if (!(vport->fc_flag & FC_OFFLINE_MODE)) { > printk(KERN_WARNING "mbox_read:Command 0x%x " > "is illegal in on-line state\n", >- phba->sysfs_mbox.mbox->mb.mbxCommand); >- sysfs_mbox_idle(phba); >+ sysfs_mbox->mbox->mb.mbxCommand); >+ sysfs_mbox_idle(phba,sysfs_mbox); > spin_unlock_irq(&phba->hbalock); > return -EPERM; > } >+ case MBX_WRITE_NV: >+ case MBX_WRITE_VPARMS: > case MBX_LOAD_SM: > case MBX_READ_NV: > case MBX_READ_CONFIG: >@@ -1766,13 +2717,70 @@ sysfs_mbox_read(struct kobject *kobj, st > case MBX_READ_LNK_STAT: > case MBX_DUMP_MEMORY: > case MBX_DOWN_LOAD: >- case MBX_UPDATE_CFG: > case MBX_KILL_BOARD: > case MBX_LOAD_AREA: > case MBX_LOAD_EXP_ROM: > case MBX_BEACON: > case MBX_DEL_LD_ENTRY: >+ case MBX_SET_DEBUG: >+ break; >+ case MBX_READ_EVENT_LOG_STATUS: >+ break; >+ case MBX_SET_VARIABLE: >+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, >+ "1226 mbox: set_variable 0x%x, 0x%x\n", >+ sysfs_mbox->mbox->mb.un.varWords[0], >+ sysfs_mbox->mbox->mb.un.varWords[1]); >+ if ((sysfs_mbox->mbox->mb.un.varWords[0] >+ == SETVAR_MLOMNT) >+ && (sysfs_mbox->mbox->mb.un.varWords[1] >+ == 1)) { >+ wait_4_menlo_maint = 1; >+ phba->wait_4_mlo_maint_flg = 1; >+ } >+ case MBX_WRITE_WWN: >+ case MBX_UPDATE_CFG: >+ break; >+ case MBX_RUN_BIU_DIAG64: >+ if (sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2. >+ xmit_bde64.tus.f.bdeSize) { >+ sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2. >+ xmit_bde64.addrHigh = >+ putPaddrHigh(sysfs_mbox-> >+ txmit_buff->phys); >+ sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2. >+ xmit_bde64.addrLow = >+ putPaddrLow(sysfs_mbox-> >+ txmit_buff->phys); >+ } >+ >+ if (sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2. >+ rcv_bde64.tus.f.bdeSize) { >+ sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2. >+ rcv_bde64.addrHigh = >+ putPaddrHigh(sysfs_mbox-> >+ rcv_buff->phys); >+ sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2. >+ rcv_bde64.addrLow = >+ putPaddrLow(sysfs_mbox->rcv_buff->phys); >+ } >+ break; >+ case MBX_WRITE_EVENT_LOG: > break; >+ case MBX_READ_EVENT_LOG: >+ >+ if (sysfs_mbox->mbox->mb.un.varRdEventLog. >+ rcv_bde64.tus.f.bdeSize) { >+ sysfs_mbox->mbox->mb.un.varRdEventLog. >+ rcv_bde64.addrHigh = >+ putPaddrHigh(sysfs_mbox-> >+ rcv_buff->phys); >+ sysfs_mbox->mbox->mb.un.varRdEventLog. >+ rcv_bde64.addrLow = >+ putPaddrLow(sysfs_mbox->rcv_buff->phys); >+ } >+ break; >+ > case MBX_READ_SPARM64: > case MBX_READ_LA: > case MBX_READ_LA64: >@@ -1781,22 +2789,46 @@ sysfs_mbox_read(struct kobject *kobj, st > case MBX_CONFIG_PORT: > case MBX_RUN_BIU_DIAG: > printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", >- phba->sysfs_mbox.mbox->mb.mbxCommand); >- sysfs_mbox_idle(phba); >+ sysfs_mbox->mbox->mb.mbxCommand); >+ sysfs_mbox_idle(phba,sysfs_mbox); > spin_unlock_irq(&phba->hbalock); > return -EPERM; > default: > printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", >- phba->sysfs_mbox.mbox->mb.mbxCommand); >- sysfs_mbox_idle(phba); >+ sysfs_mbox->mbox->mb.mbxCommand); >+ sysfs_mbox_idle(phba,sysfs_mbox); > spin_unlock_irq(&phba->hbalock); > return -EPERM; > } > >- phba->sysfs_mbox.mbox->vport = vport; >+ if (sysfs_mbox->mbox_data.in_ext_wlen || >+ sysfs_mbox->mbox_data.out_ext_wlen) { >+ sysfs_mbox->mbox->context2 = sysfs_mbox->mbext; >+ sysfs_mbox->mbox->in_ext_byte_len = >+ sysfs_mbox->mbox_data.in_ext_wlen * >+ sizeof(uint32_t); >+ sysfs_mbox->mbox->out_ext_byte_len = >+ sysfs_mbox->mbox_data.out_ext_wlen * >+ sizeof(uint32_t); >+ sysfs_mbox->mbox->mbox_offset_word = >+ sysfs_mbox->mbox_data.mboffset; >+ } > >+ /* If HBA encountered an error attention, allow only DUMP >+ * or RESTART mailbox commands until the HBA is restarted. >+ */ >+ if (phba->pport->stopped) >+ printk(KERN_WARNING "mbox_read: issued mailbox cmd " >+ "0x%x while in stopped state \n", >+ sysfs_mbox->mbox->mb.mbxCommand); >+ >+ sysfs_mbox->mbox->vport = vport; >+ >+ /* Don't allow mailbox commands to be sent when blocked >+ * or when in the middle of discovery >+ */ > if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { >- sysfs_mbox_idle(phba); >+ sysfs_mbox_idle(phba,sysfs_mbox); > spin_unlock_irq(&phba->hbalock); > return -EAGAIN; > } >@@ -1806,43 +2838,86 @@ sysfs_mbox_read(struct kobject *kobj, st > > spin_unlock_irq(&phba->hbalock); > rc = lpfc_sli_issue_mbox (phba, >- phba->sysfs_mbox.mbox, >+ sysfs_mbox->mbox, > MBX_POLL); > spin_lock_irq(&phba->hbalock); > > } else { > spin_unlock_irq(&phba->hbalock); > rc = lpfc_sli_issue_mbox_wait (phba, >- phba->sysfs_mbox.mbox, >+ sysfs_mbox->mbox, > lpfc_mbox_tmo_val(phba, >- phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ); >+ sysfs_mbox->mbox->mb.mbxCommand) * HZ); > spin_lock_irq(&phba->hbalock); > } > > if (rc != MBX_SUCCESS) { > if (rc == MBX_TIMEOUT) { >- phba->sysfs_mbox.mbox = NULL; >+ sysfs_mbox->mbox = NULL; > } >- sysfs_mbox_idle(phba); >+ sysfs_mbox_idle(phba,sysfs_mbox); > spin_unlock_irq(&phba->hbalock); > return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; > } >- phba->sysfs_mbox.state = SMBOX_READING; >+ if (wait_4_menlo_maint) { >+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, >+ "1229 waiting for menlo mnt\n"); >+ spin_unlock_irq(&phba->hbalock); >+ if (phba->wait_4_mlo_maint_flg) >+ wait_event_interruptible_timeout( >+ phba->wait_4_mlo_m_q, >+ phba->wait_4_mlo_maint_flg ==0, >+ 60 * HZ); >+ spin_lock_irq(&phba->hbalock); >+ if (phba->wait_4_mlo_maint_flg) { >+ sysfs_mbox_idle(phba,sysfs_mbox); >+ phba->wait_4_mlo_maint_flg = 0; >+ spin_unlock_irq(&phba->hbalock); >+ return -EINTR; >+ } else >+ spin_unlock_irq(&phba->hbalock); >+ >+ spin_lock_irq(&phba->hbalock); >+ if (phba->wait_4_mlo_maint_flg != 0) { >+ sysfs_mbox_idle(phba,sysfs_mbox); >+ phba->wait_4_mlo_maint_flg = 0; >+ spin_unlock_irq(&phba->hbalock); >+ return -ETIME; >+ } >+ >+ } >+ sysfs_mbox->state = SMBOX_READING; > } >- else if (phba->sysfs_mbox.offset != off || >- phba->sysfs_mbox.state != SMBOX_READING) { >- printk(KERN_WARNING "mbox_read: Bad State\n"); >- sysfs_mbox_idle(phba); >+ else if (sysfs_mbox->offset != off || >+ sysfs_mbox->state != SMBOX_READING) { >+ sysfs_mbox_idle(phba,sysfs_mbox); > spin_unlock_irq(&phba->hbalock); > return -EAGAIN; > } > >- memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); >+ memcpy(buf, (uint8_t *) & sysfs_mbox->mbox->mb + off, count); > >- phba->sysfs_mbox.offset = off + count; >+ sysfs_mbox->offset = off + count; > >- if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE) >- sysfs_mbox_idle(phba); >+ if ((sysfs_mbox->offset == MAILBOX_CMD_SIZE) && >+ ((sysfs_mbox->mbox->mb.mbxCommand == MBX_RUN_BIU_DIAG64) || >+ (sysfs_mbox->mbox->mb.mbxCommand == MBX_READ_EVENT_LOG))) { >+ sysfs_mbox->state = SMBOX_READING_BUFF; >+ sysfs_mbox->extoff = 0; >+ spin_unlock_irq(&phba->hbalock); >+ return count; >+ } >+ >+ if ((sysfs_mbox->offset == MAILBOX_CMD_SIZE) && >+ sysfs_mbox->mbox_data.out_ext_wlen) { >+ sysfs_mbox->state = SMBOX_READING_MBEXT; >+ sysfs_mbox->extoff = 0; >+ spin_unlock_irq(&phba->hbalock); >+ return count; >+ } >+ >+ if (sysfs_mbox->offset == MAILBOX_CMD_SIZE) >+ sysfs_mbox_idle(phba,sysfs_mbox); > > spin_unlock_irq(&phba->hbalock); > >@@ -1853,8 +2928,9 @@ static struct bin_attribute sysfs_mbox_a > .attr = { > .name = "mbox", > .mode = S_IRUSR | S_IWUSR, >+ .owner = THIS_MODULE, > }, >- .size = MAILBOX_CMD_SIZE, >+ .size = MAILBOX_MAX_XMIT_SIZE, > .read = sysfs_mbox_read, > .write = sysfs_mbox_write, > }; >@@ -1875,6 +2951,11 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport > if (error) > goto out_remove_ctlreg_attr; > >+ error = sysfs_create_bin_file(&shost->shost_classdev.kobj, >+ &sysfs_menlo_attr); >+ if (error) >+ goto out_remove_ctlreg_attr; >+ > return 0; > out_remove_ctlreg_attr: > sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_ctlreg_attr); >@@ -1889,6 +2970,7 @@ lpfc_free_sysfs_attr(struct lpfc_vport * > > sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_mbox_attr); > sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_ctlreg_attr); >+ sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_menlo_attr); > } > > >@@ -1993,7 +3075,8 @@ lpfc_get_host_speed(struct Scsi_Host *sh > fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; > break; > } >- } >+ } else >+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; > > spin_unlock_irq(shost->host_lock); > } >@@ -2013,7 +3096,7 @@ lpfc_get_host_fabric_name (struct Scsi_H > node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); > else > /* fabric is local port if there is no F/FL_Port */ >- node_name = wwn_to_u64(vport->fc_nodename.u.wwn); >+ node_name = 0; > > spin_unlock_irq(shost->host_lock); > >@@ -2220,7 +3303,8 @@ lpfc_get_node_by_target(struct scsi_targ > spin_lock_irq(shost->host_lock); > /* Search for this, mapped, target ID */ > list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { >- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && >+ if (NLP_CHK_NODE_ACT(ndlp) && >+ ndlp->nlp_state == NLP_STE_MAPPED_NODE && > starget->id == ndlp->nlp_sid) { > spin_unlock_irq(shost->host_lock); > return ndlp; >@@ -2337,8 +3421,6 @@ struct fc_function_template lpfc_transpo > .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, > .terminate_rport_io = lpfc_terminate_rport_io, > >- .vport_create = lpfc_vport_create, >- .vport_delete = lpfc_vport_delete, > .dd_fcvport_size = sizeof(struct lpfc_vport *), > }; > >@@ -2410,25 +3492,28 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) > lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type); > lpfc_ack0_init(phba, lpfc_ack0); > lpfc_topology_init(phba, lpfc_topology); >+ lpfc_pci_max_read_init(phba, lpfc_pci_max_read); > lpfc_link_speed_init(phba, lpfc_link_speed); > lpfc_poll_tmo_init(phba, lpfc_poll_tmo); > lpfc_enable_npiv_init(phba, lpfc_enable_npiv); > lpfc_use_msi_init(phba, lpfc_use_msi); >+ lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); >+ lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); > phba->cfg_poll = lpfc_poll; > phba->cfg_soft_wwnn = 0L; > phba->cfg_soft_wwpn = 0L; >- /* >- * The total number of segments is the configuration value plus 2 >- * since the IOCB need a command and response bde. >- */ >- phba->cfg_sg_seg_cnt = LPFC_SG_SEG_CNT + 2; >+ lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); >+ /* Also reinitialize the host templates with new values. */ >+ lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; >+ lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; > /* > * Since the sg_tablesize is module parameter, the sg_dma_buf_size >- * used to create the sg_dma_buf_pool must be dynamically calculated >+ * used to create the sg_dma_buf_pool must be dynamically calculated. >+ * 2 segments are added since the IOCB needs a command and response bde. > */ > phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + > sizeof(struct fcp_rsp) + >- (phba->cfg_sg_seg_cnt * sizeof(struct ulp_bde64)); >+ ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); > lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); > return; > } >@@ -2448,5 +3533,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vpor > lpfc_discovery_threads_init(vport, lpfc_discovery_threads); > lpfc_max_luns_init(vport, lpfc_max_luns); > lpfc_scan_down_init(vport, lpfc_scan_down); >+ lpfc_enable_da_id_init(vport, lpfc_enable_da_id); >+ lpfc_enable_auth_init(vport, lpfc_enable_auth); > return; > } >diff -urpN a/drivers/scsi/lpfc/lpfc_auth_access.c b/drivers/scsi/lpfc/lpfc_auth_access.c >--- a/drivers/scsi/lpfc/lpfc_auth_access.c 1969-12-31 19:00:00.000000000 -0500 >+++ b/drivers/scsi/lpfc/lpfc_auth_access.c 2008-09-05 17:47:49.775876000 -0400 >@@ -0,0 +1,743 @@ >+/******************************************************************* >+ * This file is part of the Emulex Linux Device Driver for * >+ * Fibre Channel Host Bus Adapters. * >+ * Copyright (C) 2006-2007 Emulex. All rights reserved. * >+ * EMULEX and SLI are trademarks of Emulex. * >+ * www.emulex.com * >+ * * >+ * This program is free software; you can redistribute it and/or * >+ * modify it under the terms of version 2 of the GNU General * >+ * Public License as published by the Free Software Foundation. * >+ * This program is distributed in the hope that it will be useful. * >+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * >+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * >+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * >+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * >+ * TO BE LEGALLY INVALID. See the GNU General Public License for * >+ * more details, a copy of which can be found in the file COPYING * >+ * included with this package. * >+ *******************************************************************/ >+#include <linux/blkdev.h> >+#include <linux/pci.h> >+#include <linux/kthread.h> >+#include <linux/interrupt.h> >+ >+#include <linux/module.h> >+#include <linux/init.h> >+#include <linux/sched.h> /* workqueue stuff, HZ */ >+#include <scsi/scsi_device.h> >+#include <scsi/scsi_host.h> >+#include <scsi/scsi_transport.h> >+#include <scsi/scsi_transport_fc.h> >+#include <scsi/scsi_cmnd.h> >+#include <linux/time.h> >+#include <linux/jiffies.h> >+#include <linux/security.h> >+#include <net/sock.h> >+#include <net/netlink.h> >+ >+#include <scsi/scsi.h> >+ >+#include "lpfc_hw.h" >+#include "lpfc_sli.h" >+#include "lpfc_disc.h" >+#include "lpfc_scsi.h" >+#include "lpfc.h" >+#include "lpfc_logmsg.h" >+#include "lpfc_crtn.h" >+#include "lpfc_vport.h" >+#include "lpfc_auth_access.h" >+ >+/* fc security */ >+char security_work_q_name[KOBJ_NAME_LEN]; >+struct workqueue_struct *security_work_q = NULL; >+struct sock *fc_nl_sock; >+struct list_head fc_security_user_list; >+int fc_service_state = FC_SC_SERVICESTATE_UNKNOWN; >+static int fc_service_pid; >+DEFINE_SPINLOCK(fc_security_user_lock); >+ >+static inline struct lpfc_vport * >+lpfc_fc_find_vport(unsigned long host_no) >+{ >+ struct lpfc_vport *vport; >+ struct Scsi_Host *shost; >+ >+ list_for_each_entry(vport, &fc_security_user_list, sc_users) { >+ shost = lpfc_shost_from_vport(vport); >+ if (shost && (shost->host_no == host_no)) >+ return vport; >+ } >+ >+ return NULL; >+} >+ >+ >+/** >+ * lpfc_fc_sc_add_timer >+ * >+ * >+ **/ >+ >+void >+lpfc_fc_sc_add_timer(struct fc_security_request *req, int timeout, >+ void (*complete)(struct fc_security_request *)) >+{ >+ >+ init_timer(&req->timer); >+ >+ >+ req->timer.data = (unsigned long)req; >+ req->timer.expires = jiffies + timeout; >+ req->timer.function = (void (*)(unsigned long)) complete; >+ >+ add_timer(&req->timer); >+} >+/** >+ * lpfc_fc_sc_req_times_out >+ * >+ * >+ **/ >+ >+void >+lpfc_fc_sc_req_times_out(struct fc_security_request *req) >+{ >+ >+ unsigned long flags; >+ int found = 0; >+ struct fc_security_request *fc_sc_req; >+ struct lpfc_vport *vport = req->vport; >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); >+ >+ if (!req) >+ return; >+ >+ spin_lock_irqsave(shost->host_lock, flags); >+ >+ /* To avoid a completion race check to see if request is on the list */ >+ >+ list_for_each_entry(fc_sc_req, &vport->sc_response_wait_queue, rlist) >+ if (fc_sc_req == req) { >+ found = 1; >+ break; >+ } >+ >+ if (!found) { >+ spin_unlock_irqrestore(shost->host_lock, flags); >+ return; >+ } >+ >+ list_del(&fc_sc_req->rlist); >+ >+ spin_unlock_irqrestore(shost->host_lock, flags); >+ >+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SECURITY, >+ "1019 Request tranid %d timed out\n", >+ fc_sc_req->tran_id); >+ >+ switch (fc_sc_req->req_type) { >+ >+ case FC_NL_SC_GET_CONFIG_REQ: >+ lpfc_security_config(shost, -ETIMEDOUT, >+ fc_sc_req->data); >+ break; >+ >+ case FC_NL_SC_DHCHAP_MAKE_CHALLENGE_REQ: >+ lpfc_dhchap_make_challenge(shost, -ETIMEDOUT, >+ fc_sc_req->data, 0); >+ break; >+ >+ case FC_NL_SC_DHCHAP_MAKE_RESPONSE_REQ: >+ lpfc_dhchap_make_response(shost, -ETIMEDOUT, >+ fc_sc_req->data, 0); >+ break; >+ >+ case FC_NL_SC_DHCHAP_AUTHENTICATE_REQ: >+ lpfc_dhchap_authenticate(shost, -ETIMEDOUT, fc_sc_req->data, 0); >+ break; >+ } >+ >+ kfree(fc_sc_req); >+ >+} >+ >+ >+static inline struct fc_security_request * >+lpfc_fc_find_sc_request(u32 tran_id, u32 type, struct lpfc_vport *vport) >+{ >+ struct fc_security_request *fc_sc_req; >+ >+ list_for_each_entry(fc_sc_req, &vport->sc_response_wait_queue, rlist) >+ if (fc_sc_req->tran_id == tran_id && >+ fc_sc_req->req_type == type) >+ return fc_sc_req; >+ return NULL; >+} >+ >+ >+ >+/** >+ * lpfc_fc_sc_request >+ * >+ * >+ **/ >+ >+int >+lpfc_fc_sc_request(struct lpfc_vport *vport, >+ u32 msg_type, >+ struct fc_auth_req *auth_req, >+ u32 auth_req_len, /* includes length of struct fc_auth_req */ >+ struct fc_auth_rsp *auth_rsp, >+ u32 auth_rsp_len) /* includes length of struct fc_auth_rsp */ >+{ >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); >+ struct fc_security_request *fc_sc_req; >+ struct sk_buff *skb; >+ struct nlmsghdr *nlh; >+ struct fc_nl_sc_message *fc_nl_sc_msg; >+ const char *fn; >+ unsigned long flags; >+ u32 len; >+ int err = 0; >+ u32 seq = ++vport->sc_tran_id; >+ >+ if (fc_service_state != FC_SC_SERVICESTATE_ONLINE) >+ return -EINVAL; >+ >+ if (vport->port_state == FC_PORTSTATE_DELETED) >+ return -EINVAL; >+ >+ fc_sc_req = kzalloc(sizeof(struct fc_security_request), GFP_KERNEL); >+ >+ if (!fc_sc_req) >+ return -ENOMEM; >+ >+ fc_sc_req->req_type = msg_type; >+ fc_sc_req->data = auth_rsp; >+ fc_sc_req->data_len = auth_rsp_len; >+ fc_sc_req->vport = vport; >+ >+ len = NLMSG_SPACE(sizeof(struct fc_nl_sc_message) + auth_req_len); >+ >+ skb = alloc_skb(len, GFP_KERNEL); >+ if (!skb) { >+ err = -ENOBUFS; >+ fn = "alloc_skb"; >+ goto send_fail; >+ } >+ >+ nlh = nlmsg_put(skb, fc_service_pid, seq, FC_TRANSPORT_MSG, >+ len - sizeof(*nlh), 0); >+ if (!nlh) { >+ err = -ENOBUFS; >+ fn = "nlmsg_put"; >+ goto send_fail; >+ } >+ >+ fc_nl_sc_msg = NLMSG_DATA(nlh); >+ fc_nl_sc_msg->snlh.version = SCSI_NL_VERSION; >+ fc_nl_sc_msg->snlh.transport = SCSI_NL_TRANSPORT_FC; >+ fc_nl_sc_msg->snlh.magic = SCSI_NL_MAGIC; >+ fc_nl_sc_msg->snlh.msgtype = msg_type; >+ fc_nl_sc_msg->snlh.msglen = len; >+ fc_nl_sc_msg->data_len = auth_req_len; >+ if (auth_req_len) >+ memcpy(fc_nl_sc_msg->data, auth_req, auth_req_len); >+ >+ fc_nl_sc_msg->host_no = shost->host_no; >+ fc_nl_sc_msg->tran_id = seq; >+ fc_sc_req->tran_id = seq; >+ >+ spin_lock_irqsave(shost->host_lock, flags); >+ list_add_tail(&fc_sc_req->rlist, &vport->sc_response_wait_queue); >+ spin_unlock_irqrestore(shost->host_lock, flags); >+ >+ err = nlmsg_unicast(fc_nl_sock, skb, fc_service_pid); >+ if (err < 0) { >+ fn = "nlmsg_unicast"; >+ spin_lock_irqsave(shost->host_lock, flags); >+ list_del(&fc_sc_req->rlist); >+ spin_unlock_irqrestore(shost->host_lock, flags); >+ goto send_fail; >+ } >+ lpfc_fc_sc_add_timer(fc_sc_req, FC_SC_REQ_TIMEOUT, >+ lpfc_fc_sc_req_times_out); >+ >+ return 0; >+ >+send_fail: >+ >+ kfree(fc_sc_req); >+ >+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SECURITY, >+ "1020 Dropped Message type %d to PID %d : %s : err " >+ "%d\n", msg_type, fc_service_pid, fn, err); >+ return err; >+ >+} >+ >+/** >+ * lpfc_fc_security_get_config >+ * >+ * >+ **/ >+ >+int >+lpfc_fc_security_get_config(struct Scsi_Host *shost, >+ struct fc_auth_req *auth_req, >+ u32 auth_req_len, >+ struct fc_auth_rsp *auth_rsp, >+ u32 auth_rsp_len) >+{ >+ >+ return(lpfc_fc_sc_request((struct lpfc_vport *) shost->hostdata, >+ FC_NL_SC_GET_CONFIG_REQ, auth_req, >+ auth_req_len, auth_rsp, auth_rsp_len)); >+ >+} >+EXPORT_SYMBOL(lpfc_fc_security_get_config); >+ >+/** >+ * lpfc_fc_security_dhchap_make_challenge >+ * >+ * >+ **/ >+ >+int >+lpfc_fc_security_dhchap_make_challenge(struct Scsi_Host *shost, >+ struct fc_auth_req *auth_req, >+ u32 auth_req_len, >+ struct fc_auth_rsp *auth_rsp, >+ u32 auth_rsp_len) >+{ >+ >+ return(lpfc_fc_sc_request((struct lpfc_vport *) shost->hostdata, >+ FC_NL_SC_DHCHAP_MAKE_CHALLENGE_REQ, >+ auth_req, auth_req_len, auth_rsp, auth_rsp_len)); >+ >+} >+EXPORT_SYMBOL(lpfc_fc_security_dhchap_make_challenge); >+ >+/** >+ * lpfc_fc_security_dhchap_make_response >+ * >+ * >+ **/ >+ >+int >+lpfc_fc_security_dhchap_make_response(struct Scsi_Host *shost, >+ struct fc_auth_req *auth_req, >+ u32 auth_req_len, >+ struct fc_auth_rsp *auth_rsp, >+ u32 auth_rsp_len) >+{ >+ >+ return(lpfc_fc_sc_request((struct lpfc_vport *) shost->hostdata, >+ FC_NL_SC_DHCHAP_MAKE_RESPONSE_REQ, >+ auth_req, auth_req_len, auth_rsp, auth_rsp_len)); >+ >+} >+EXPORT_SYMBOL(lpfc_fc_security_dhchap_make_response); >+ >+ >+/** >+ * lpfc_fc_security_dhchap_authenticate >+ * >+ * >+ **/ >+ >+int >+lpfc_fc_security_dhchap_authenticate(struct Scsi_Host *shost, >+ struct fc_auth_req *auth_req, >+ u32 auth_req_len, >+ struct fc_auth_rsp *auth_rsp, >+ u32 auth_rsp_len) >+{ >+ >+ return(lpfc_fc_sc_request((struct lpfc_vport *) shost->hostdata, >+ FC_NL_SC_DHCHAP_AUTHENTICATE_REQ, >+ auth_req, auth_req_len, auth_rsp, auth_rsp_len)); >+ >+} >+EXPORT_SYMBOL(lpfc_fc_security_dhchap_authenticate); >+ >+/** >+ * lpfc_fc_queue_security_work - Queue work to the fc_host security workqueue. >+ * @shost: Pointer to Scsi_Host bound to fc_host. >+ * @work: Work to queue for execution. >+ * >+ * Return value: >+ * 1 - work queued for execution >+ * 0 - work is already queued >+ * -EINVAL - work queue doesn't exist >+ **/ >+int >+lpfc_fc_queue_security_work(struct lpfc_vport *vport, struct work_struct *work) >+{ >+ if (unlikely(!security_work_q)) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1021 ERROR: attempted to queue security work, " >+ "when no workqueue created.\n"); >+ dump_stack(); >+ >+ return -EINVAL; >+ } >+ >+ return queue_work(security_work_q, work); >+ >+} >+ >+ >+ >+ /** >+ * lpfc_fc_sc_schedule_notify_all >+ * >+ * >+ **/ >+ >+void >+lpfc_fc_sc_schedule_notify_all(int message) >+{ >+ struct lpfc_vport *vport; >+ unsigned long flags; >+ >+ spin_lock_irqsave(&fc_security_user_lock, flags); >+ >+ list_for_each_entry(vport, &fc_security_user_list, sc_users) { >+ >+ switch (message) { >+ >+ case FC_NL_SC_REG: >+ lpfc_fc_queue_security_work(vport, >+ &vport->sc_online_work); >+ break; >+ >+ case FC_NL_SC_DEREG: >+ lpfc_fc_queue_security_work(vport, >+ &vport->sc_offline_work); >+ break; >+ } >+ } >+ >+ spin_unlock_irqrestore(&fc_security_user_lock, flags); >+} >+ >+ >+ >+/** >+ * lpfc_fc_sc_security_online >+ * >+ * >+ **/ >+ >+void >+lpfc_fc_sc_security_online(struct work_struct *work) >+{ >+ struct lpfc_vport *vport = container_of(work, struct lpfc_vport, >+ sc_online_work); >+ lpfc_security_service_online(lpfc_shost_from_vport(vport)); >+ return; >+} >+ >+/** >+ * lpfc_fc_sc_security_offline >+ * >+ * >+ **/ >+void >+lpfc_fc_sc_security_offline(struct work_struct *work) >+{ >+ struct lpfc_vport *vport = container_of(work, struct lpfc_vport, >+ sc_offline_work); >+ lpfc_security_service_offline(lpfc_shost_from_vport(vport)); >+ return; >+} >+ >+ >+/** >+ * lpfc_fc_sc_process_msg >+ * >+ * >+ **/ >+static void >+lpfc_fc_sc_process_msg(struct work_struct *work) >+{ >+ struct fc_sc_msg_work_q_wrapper *wqw = >+ container_of(work, struct fc_sc_msg_work_q_wrapper, work); >+ >+ switch (wqw->msgtype) { >+ >+ case FC_NL_SC_GET_CONFIG_RSP: >+ lpfc_security_config(lpfc_shost_from_vport(wqw->fc_sc_req-> >+ vport), wqw->status, >+ wqw->fc_sc_req->data); >+ break; >+ >+ case FC_NL_SC_DHCHAP_MAKE_CHALLENGE_RSP: >+ lpfc_dhchap_make_challenge(lpfc_shost_from_vport(wqw-> >+ fc_sc_req->vport), wqw->status, >+ wqw->fc_sc_req->data, wqw->data_len); >+ break; >+ >+ case FC_NL_SC_DHCHAP_MAKE_RESPONSE_RSP: >+ lpfc_dhchap_make_response(lpfc_shost_from_vport(wqw-> >+ fc_sc_req->vport), wqw->status, >+ wqw->fc_sc_req->data, wqw->data_len); >+ break; >+ >+ case FC_NL_SC_DHCHAP_AUTHENTICATE_RSP: >+ lpfc_dhchap_authenticate(lpfc_shost_from_vport(wqw->fc_sc_req-> >+ vport), >+ wqw->status, >+ wqw->fc_sc_req->data, wqw->data_len); >+ break; >+ } >+ >+ kfree(wqw->fc_sc_req); >+ kfree(wqw); >+ >+ return; >+} >+ >+ >+/** >+ * lpfc_fc_sc_schedule_msg >+ * >+ * >+ **/ >+ >+int >+lpfc_fc_sc_schedule_msg(struct fc_nl_sc_message *fc_nl_sc_msg, int rcvlen) >+{ >+ struct fc_security_request *fc_sc_req; >+ u32 req_type; >+ struct lpfc_vport *vport = 0; >+ int err = 0; >+ struct fc_sc_msg_work_q_wrapper *wqw; >+ unsigned long flags; >+ struct Scsi_Host *shost; >+ >+ spin_lock_irqsave(&fc_security_user_lock, flags); >+ >+ vport = lpfc_fc_find_vport(fc_nl_sc_msg->host_no); >+ >+ spin_unlock_irqrestore(&fc_security_user_lock, flags); >+ if (!vport) { >+ printk(KERN_WARNING >+ "%s: Host does not exist for msg type %x.\n", >+ __FUNCTION__, fc_nl_sc_msg->snlh.msgtype); >+ return -EBADR; >+ } >+ shost = lpfc_shost_from_vport(vport); >+ >+ if (vport->port_state == FC_PORTSTATE_DELETED) { >+ printk(KERN_WARNING >+ "%s: Host being deleted.\n", __FUNCTION__); >+ return -EBADR; >+ } >+ >+ wqw = kzalloc(sizeof(struct fc_sc_msg_work_q_wrapper), GFP_KERNEL); >+ >+ if (!wqw) >+ return -ENOMEM; >+ >+ switch (fc_nl_sc_msg->snlh.msgtype) { >+ case FC_NL_SC_GET_CONFIG_RSP: >+ req_type = FC_NL_SC_GET_CONFIG_REQ; >+ break; >+ >+ case FC_NL_SC_DHCHAP_MAKE_CHALLENGE_RSP: >+ req_type = FC_NL_SC_DHCHAP_MAKE_CHALLENGE_REQ; >+ break; >+ >+ case FC_NL_SC_DHCHAP_MAKE_RESPONSE_RSP: >+ req_type = FC_NL_SC_DHCHAP_MAKE_RESPONSE_REQ; >+ break; >+ >+ case FC_NL_SC_DHCHAP_AUTHENTICATE_RSP: >+ req_type = FC_NL_SC_DHCHAP_AUTHENTICATE_REQ; >+ break; >+ >+ default: >+ kfree(wqw); >+ return -EINVAL; >+ } >+ >+ spin_lock_irqsave(shost->host_lock, flags); >+ >+ fc_sc_req = lpfc_fc_find_sc_request(fc_nl_sc_msg->tran_id, >+ req_type, vport); >+ >+ if (!fc_sc_req) { >+ spin_unlock_irqrestore(shost->host_lock, flags); >+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SECURITY, >+ "1022 Security request does not exist.\n"); >+ kfree(wqw); >+ return -EBADR; >+ } >+ >+ list_del(&fc_sc_req->rlist); >+ >+ spin_unlock_irqrestore(shost->host_lock, flags); >+ >+ del_singleshot_timer_sync(&fc_sc_req->timer); >+ >+ wqw->status = 0; >+ wqw->fc_sc_req = fc_sc_req; >+ wqw->data_len = rcvlen; >+ wqw->msgtype = fc_nl_sc_msg->snlh.msgtype; >+ >+ if (!fc_sc_req->data || >+ (fc_sc_req->data_len < fc_nl_sc_msg->data_len)) { >+ wqw->status = -ENOBUFS; >+ wqw->data_len = 0; >+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SECURITY, >+ "1023 Warning - data may have been truncated. " >+ "data:%p reqdl:%x mesdl:%x\n", >+ fc_sc_req->data, >+ fc_sc_req->data_len, fc_nl_sc_msg->data_len); >+ } else { >+ memcpy(fc_sc_req->data, fc_nl_sc_msg->data, >+ fc_nl_sc_msg->data_len); >+ } >+ >+ INIT_WORK(&wqw->work, lpfc_fc_sc_process_msg); >+ lpfc_fc_queue_security_work(vport, &wqw->work); >+ >+ return err; >+} >+ >+static int >+lpfc_fc_handle_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int rcvlen) >+{ >+ struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh); >+ int err = 0; >+ int pid; >+ >+ pid = nlh->nlmsg_pid; >+ >+ switch (snlh->msgtype) { >+ >+ case FC_NL_SC_REG: >+ >+ fc_service_pid = nlh->nlmsg_pid; >+ fc_service_state = FC_SC_SERVICESTATE_ONLINE; >+ if (nlh->nlmsg_flags & NLM_F_ACK) >+ netlink_ack(skb, nlh, err); >+ skb_pull(skb, rcvlen); >+ lpfc_fc_sc_schedule_notify_all(FC_NL_SC_REG); >+ break; >+ >+ case FC_NL_SC_DEREG: >+ >+ fc_service_pid = nlh->nlmsg_pid; >+ fc_service_state = FC_SC_SERVICESTATE_OFFLINE; >+ if (nlh->nlmsg_flags & NLM_F_ACK) >+ netlink_ack(skb, nlh, err); >+ skb_pull(skb, rcvlen); >+ lpfc_fc_sc_schedule_notify_all(FC_NL_SC_DEREG); >+ break; >+ >+ case FC_NL_SC_GET_CONFIG_RSP: >+ case FC_NL_SC_DHCHAP_MAKE_CHALLENGE_RSP: >+ case FC_NL_SC_DHCHAP_MAKE_RESPONSE_RSP: >+ case FC_NL_SC_DHCHAP_AUTHENTICATE_RSP: >+ >+ err = lpfc_fc_sc_schedule_msg((struct fc_nl_sc_message *)snlh, >+ rcvlen); >+ >+ if ((nlh->nlmsg_flags & NLM_F_ACK) || err) >+ netlink_ack(skb, nlh, err); >+ >+ skb_pull(skb, rcvlen); >+ break; >+ >+ default: >+ printk(KERN_WARNING "%s: unknown msg type 0x%x len %d\n", >+ __FUNCTION__, snlh->msgtype, rcvlen); >+ netlink_ack(skb, nlh, -EBADR); >+ skb_pull(skb, rcvlen); >+ break; >+ } >+ >+ return err; >+} >+ >+void >+lpfc_fc_nl_rcv_msg(struct sk_buff *skb) >+{ >+ struct nlmsghdr *nlh; >+ struct scsi_nl_hdr *snlh; >+ uint32_t rlen; >+ int err; >+ >+ while (skb->len >= NLMSG_SPACE(0)) { >+ err = 0; >+ >+ nlh = (struct nlmsghdr *) skb->data; >+ >+ if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*snlh))) || >+ (skb->len < nlh->nlmsg_len)) { >+ printk(KERN_WARNING "%s: discarding partial skb\n", >+ __FUNCTION__); >+ break; >+ } >+ >+ rlen = NLMSG_ALIGN(nlh->nlmsg_len); >+ if (rlen > skb->len) { >+ printk(KERN_WARNING "%s: rlen > skb->len\n", >+ __FUNCTION__); >+ rlen = skb->len; >+ } >+ >+ if (nlh->nlmsg_type != FC_TRANSPORT_MSG) { >+ printk(KERN_WARNING "%s: Not FC_TRANSPORT_MSG\n", >+ __FUNCTION__); >+ err = -EBADMSG; >+ goto next_msg; >+ } >+ >+ snlh = NLMSG_DATA(nlh); >+ if ((snlh->version != SCSI_NL_VERSION) || >+ (snlh->magic != SCSI_NL_MAGIC)) { >+ printk(KERN_WARNING "%s: Bad Version or Magic number\n", >+ __FUNCTION__); >+ err = -EPROTOTYPE; >+ goto next_msg; >+ } >+ >+next_msg: >+ if (err) { >+ printk(KERN_WARNING "%s: err %d\n", __FUNCTION__, err); >+ netlink_ack(skb, nlh, err); >+ skb_pull(skb, rlen); >+ continue; >+ } >+ >+ lpfc_fc_handle_nl_rcv_msg(skb, nlh, rlen); >+ } >+} >+ >+ >+int >+lpfc_fc_nl_rcv_nl_event(struct notifier_block *this, >+ unsigned long event, >+ void *ptr) >+{ >+ struct netlink_notify *n = ptr; >+ >+ if ((event == NETLINK_URELEASE) && >+ (n->protocol == NETLINK_FCTRANSPORT) && (n->pid)) { >+ fc_service_state = FC_SC_SERVICESTATE_OFFLINE; >+ lpfc_fc_sc_schedule_notify_all(FC_NL_SC_DEREG); >+ } >+ >+ return NOTIFY_DONE; >+} >+ >+struct notifier_block lpfc_fc_netlink_notifier = { >+ .notifier_call = lpfc_fc_nl_rcv_nl_event, >+}; >+ >+ >+ >diff -urpN a/drivers/scsi/lpfc/lpfc_auth_access.h b/drivers/scsi/lpfc/lpfc_auth_access.h >--- a/drivers/scsi/lpfc/lpfc_auth_access.h 1969-12-31 19:00:00.000000000 -0500 >+++ b/drivers/scsi/lpfc/lpfc_auth_access.h 2008-09-05 17:47:49.814878000 -0400 >@@ -0,0 +1,245 @@ >+/******************************************************************* >+ * This file is part of the Emulex Linux Device Driver for * >+ * Fibre Channel Host Bus Adapters. * >+ * Copyright (C) 2006-2007 Emulex. All rights reserved. * >+ * EMULEX and SLI are trademarks of Emulex. * >+ * www.emulex.com * >+ * * >+ * This program is free software; you can redistribute it and/or * >+ * modify it under the terms of version 2 of the GNU General * >+ * Public License as published by the Free Software Foundation. * >+ * This program is distributed in the hope that it will be useful. * >+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * >+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * >+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * >+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * >+ * TO BE LEGALLY INVALID. See the GNU General Public License for * >+ * more details, a copy of which can be found in the file COPYING * >+ * included with this package. * >+ *******************************************************************/ >+ >+#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t) >+ >+/* scsi_nl_hdr->version value */ >+#define SCSI_NL_VERSION 1 >+ >+/* scsi_nl_hdr->magic value */ >+#define SCSI_NL_MAGIC 0xA1B2 >+ >+/* scsi_nl_hdr->transport value */ >+#define SCSI_NL_TRANSPORT 0 >+#define SCSI_NL_TRANSPORT_FC 1 >+#define SCSI_NL_MAX_TRANSPORTS 2 >+ >+#define FC_NL_GROUP_CNT 0 >+ >+ /* Note: when specifying vendor_id to fc_host_post_vendor_event() >+ * be sure to read the Vendor Type and ID formatting requirements >+ * specified in scsi_netlink.h >+ */ >+ >+#define FC_SC_REQ_TIMEOUT (60*HZ) >+ >+enum fc_sc_service_state { >+ FC_SC_SERVICESTATE_UNKNOWN, >+ FC_SC_SERVICESTATE_ONLINE, >+ FC_SC_SERVICESTATE_OFFLINE, >+ FC_SC_SERVICESTATE_ERROR, >+}; >+ >+struct fc_security_request { >+ struct list_head rlist; >+ int pid; >+ u32 tran_id; >+ u32 req_type; >+ struct timer_list timer; >+ struct lpfc_vport *vport; >+ u32 data_len; >+ void *data; >+}; >+ >+struct fc_sc_msg_work_q_wrapper { >+ struct work_struct work; >+ struct fc_security_request *fc_sc_req; >+ u32 data_len; >+ int status; >+ u32 msgtype; >+}; >+struct fc_sc_notify_work_q_wrapper { >+ struct work_struct work; >+ struct Scsi_Host *shost; >+ int msg; >+}; >+ >+#define FC_DHCHAP 1 >+#define FC_FCAP 2 >+#define FC_FCPAP 3 >+#define FC_KERBEROS 4 >+ >+#define FC_AUTHMODE_UNKNOWN 0 >+#define FC_AUTHMODE_NONE 1 >+#define FC_AUTHMODE_ACTIVE 2 >+#define FC_AUTHMODE_PASSIVE 3 >+ >+#define FC_SP_HASH_MD5 0x5 >+#define FC_SP_HASH_SHA1 0x6 >+ >+#define DH_GROUP_NULL 0x00 >+#define DH_GROUP_1024 0x01 >+#define DH_GROUP_1280 0x02 >+#define DH_GROUP_1536 0x03 >+#define DH_GROUP_2048 0x04 >+ >+#define MAX_AUTH_REQ_SIZE 1024 >+#define MAX_AUTH_RSP_SIZE 1024 >+ >+#define AUTH_FABRIC_WWN 0xFFFFFFFFFFFFFFFFLL >+ >+struct fc_auth_req { >+ uint64_t local_wwpn; >+ uint64_t remote_wwpn; >+ union { >+ struct dhchap_challenge_req { >+ uint32_t transaction_id; >+ uint32_t dh_group_id; >+ uint32_t hash_id; >+ } dhchap_challenge; >+ struct dhchap_reply_req { >+ uint32_t transaction_id; >+ uint32_t dh_group_id; >+ uint32_t hash_id; >+ uint32_t bidirectional; >+ uint32_t received_challenge_len; >+ uint32_t received_public_key_len; >+ uint8_t data[0]; >+ } dhchap_reply; >+ struct dhchap_success_req { >+ uint32_t transaction_id; >+ uint32_t dh_group_id; >+ uint32_t hash_id; >+ uint32_t our_challenge_len; >+ uint32_t received_response_len; >+ uint32_t received_public_key_len; >+ uint32_t received_challenge_len; >+ uint8_t data[0]; >+ } dhchap_success; >+ }u; >+} __attribute__ ((packed)); >+ >+struct fc_auth_rsp { >+ uint64_t local_wwpn; >+ uint64_t remote_wwpn; >+ union { >+ struct authinfo { >+ uint8_t auth_mode; >+ uint16_t auth_timeout; >+ uint8_t bidirectional; >+ uint8_t type_priority[4]; >+ uint16_t type_len; >+ uint8_t hash_priority[4]; >+ uint16_t hash_len; >+ uint8_t dh_group_priority[8]; >+ uint16_t dh_group_len; >+ uint32_t reauth_interval; >+ } dhchap_security_config; >+ struct dhchap_challenge_rsp { >+ uint32_t transaction_id; >+ uint32_t our_challenge_len; >+ uint32_t our_public_key_len; >+ uint8_t data[0]; >+ } dhchap_challenge; >+ struct dhchap_reply_rsp { >+ uint32_t transaction_id; >+ uint32_t our_challenge_rsp_len; >+ uint32_t our_public_key_len; >+ uint32_t our_challenge_len; >+ uint8_t data[0]; >+ } dhchap_reply; >+ struct dhchap_success_rsp { >+ uint32_t transaction_id; >+ uint32_t authenticated; >+ uint32_t response_len; >+ uint8_t data[0]; >+ } dhchap_success; >+ }u; >+}__attribute__ ((packed)); >+ >+int >+lpfc_fc_security_get_config(struct Scsi_Host *shost, >+ struct fc_auth_req *auth_req, >+ u32 req_len, >+ struct fc_auth_rsp *auth_rsp, >+ u32 rsp_len); >+int >+lpfc_fc_security_dhchap_make_challenge(struct Scsi_Host *shost, >+ struct fc_auth_req *auth_req, >+ u32 req_len, >+ struct fc_auth_rsp *auth_rsp, >+ u32 rsp_len); >+int >+lpfc_fc_security_dhchap_make_response(struct Scsi_Host *shost, >+ struct fc_auth_req *auth_req, >+ u32 req_len, >+ struct fc_auth_rsp *auth_rsp, >+ u32 rsp_len); >+int >+lpfc_fc_security_dhchap_authenticate(struct Scsi_Host *shost, >+ struct fc_auth_req *auth_req, >+ u32 req_len, >+ struct fc_auth_rsp *auth_rsp, >+ u32 rsp_len); >+ >+int lpfc_fc_queue_security_work(struct lpfc_vport *, >+ struct work_struct *); >+ >+/* >+ * FC Transport Message Types >+ */ >+ /* user -> kernel */ >+#define FC_NL_EVENTS_REG 0x0001 >+#define FC_NL_EVENTS_DEREG 0x0002 >+#define FC_NL_SC_REG 0x0003 >+#define FC_NL_SC_DEREG 0x0004 >+#define FC_NL_SC_GET_CONFIG_RSP 0x0005 >+#define FC_NL_SC_SET_CONFIG_RSP 0x0006 >+#define FC_NL_SC_DHCHAP_MAKE_CHALLENGE_RSP 0x0007 >+#define FC_NL_SC_DHCHAP_MAKE_RESPONSE_RSP 0x0008 >+#define FC_NL_SC_DHCHAP_AUTHENTICATE_RSP 0x0009 >+ /* kernel -> user */ >+#define FC_NL_ASYNC_EVENT 0x0010 >+#define FC_NL_SC_GET_CONFIG_REQ 0x0020 >+#define FC_NL_SC_SET_CONFIG_REQ 0x0030 >+#define FC_NL_SC_DHCHAP_MAKE_CHALLENGE_REQ 0x0040 >+#define FC_NL_SC_DHCHAP_MAKE_RESPONSE_REQ 0x0050 >+#define FC_NL_SC_DHCHAP_AUTHENTICATE_REQ 0x0060 >+ >+/* >+ * Message Structures : >+ */ >+ >+/* macro to round up message lengths to 8byte boundary */ >+#define FC_NL_MSGALIGN(len) (((len) + 7) & ~7) >+ >+#define FC_NETLINK_API_VERSION 1 >+ >+/* Single Netlink Message type to send all FC Transport messages */ >+#define FC_TRANSPORT_MSG NLMSG_MIN_TYPE + 1 >+ >+/* SCSI_TRANSPORT_MSG event message header */ >+/* >+struct scsi_nl_hdr { >+ uint8_t version; >+ uint8_t transport; >+ uint16_t magic; >+ uint16_t msgtype; >+ uint16_t msglen; >+} __attribute__((aligned(sizeof(uint64_t)))); >+*/ >+struct fc_nl_sc_message { >+ struct scsi_nl_hdr snlh; /* must be 1st element ! */ >+ uint32_t host_no; >+ uint32_t tran_id; >+ uint32_t data_len; >+ uint8_t data[0]; >+} __attribute__((aligned(sizeof(uint64_t)))); >+ >diff -urpN a/drivers/scsi/lpfc/lpfc_auth.c b/drivers/scsi/lpfc/lpfc_auth.c >--- a/drivers/scsi/lpfc/lpfc_auth.c 1969-12-31 19:00:00.000000000 -0500 >+++ b/drivers/scsi/lpfc/lpfc_auth.c 2008-09-05 17:47:49.624877000 -0400 >@@ -0,0 +1,837 @@ >+/******************************************************************* >+ * This file is part of the Emulex Linux Device Driver for * >+ * Fibre Channel Host Bus Adapters. * >+ * Copyright (C) 2006-2008 Emulex. All rights reserved. * >+ * EMULEX and SLI are trademarks of Emulex. * >+ * www.emulex.com * >+ * * >+ * This program is free software; you can redistribute it and/or * >+ * modify it under the terms of version 2 of the GNU General * >+ * Public License as published by the Free Software Foundation. * >+ * This program is distributed in the hope that it will be useful. * >+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * >+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * >+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * >+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * >+ * TO BE LEGALLY INVALID. See the GNU General Public License for * >+ * more details, a copy of which can be found in the file COPYING * >+ * included with this package. * >+ *******************************************************************/ >+/* See Fibre Channel protocol T11 FC-SP for details */ >+#include <linux/pci.h> >+#include <linux/interrupt.h> >+ >+#include <scsi/scsi.h> >+#include <scsi/scsi_tcq.h> >+#include <scsi/scsi_transport_fc.h> >+ >+#include "lpfc_hw.h" >+#include "lpfc_sli.h" >+#include "lpfc_disc.h" >+#include "lpfc.h" >+#include "lpfc_crtn.h" >+#include "lpfc_logmsg.h" >+#include "lpfc_auth_access.h" >+#include "lpfc_auth.h" >+ >+void >+lpfc_start_authentication(struct lpfc_vport *vport, >+ struct lpfc_nodelist *ndlp) >+{ >+ uint32_t nego_payload_len; >+ uint8_t *nego_payload; >+ >+ nego_payload = kmalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL); >+ if (!nego_payload) >+ return; >+ vport->auth.trans_id++; >+ vport->auth.auth_msg_state = LPFC_AUTH_NEGOTIATE; >+ nego_payload_len = lpfc_build_auth_neg(vport, nego_payload); >+ lpfc_issue_els_auth(vport, ndlp, AUTH_NEGOTIATE, >+ nego_payload, nego_payload_len); >+ kfree(nego_payload); >+} >+ >+void >+lpfc_dhchap_make_challenge(struct Scsi_Host *shost, int status, >+ void *rsp, uint32_t rsp_len) >+{ >+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; >+ struct lpfc_nodelist *ndlp; >+ uint32_t chal_payload_len; >+ uint8_t *chal_payload; >+ struct fc_auth_rsp *auth_rsp = rsp; >+ >+ ndlp = lpfc_findnode_did(vport, Fabric_DID); >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { >+ kfree(rsp); >+ return; >+ } >+ >+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY, >+ "1003 Send dhchap challenge local_wwpn " >+ "%llX remote_wwpn %llX \n", >+ (unsigned long long)auth_rsp->local_wwpn, >+ (unsigned long long)auth_rsp->remote_wwpn); >+ >+ chal_payload = kmalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL); >+ if (!chal_payload) { >+ kfree(rsp); >+ return; >+ } >+ vport->auth.auth_msg_state = LPFC_DHCHAP_CHALLENGE; >+ chal_payload_len = lpfc_build_dhchap_challenge(vport, >+ chal_payload, rsp); >+ lpfc_issue_els_auth(vport, ndlp, DHCHAP_CHALLENGE, >+ chal_payload, chal_payload_len); >+ kfree(chal_payload); >+ kfree(rsp); >+} >+ >+ >+void >+lpfc_dhchap_make_response(struct Scsi_Host *shost, int status, >+ void *rsp, uint32_t rsp_len) >+{ >+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; >+ struct lpfc_nodelist *ndlp; >+ uint32_t reply_payload_len; >+ uint8_t *reply_payload; >+ struct fc_auth_rsp *auth_rsp = rsp; >+ >+ ndlp = lpfc_findnode_did(vport, Fabric_DID); >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { >+ kfree(rsp); >+ return; >+ } >+ >+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY, >+ "1004 Send dhchap reply local_wwpn " >+ "%llX remote_wwpn %llX \n", >+ (unsigned long long)auth_rsp->local_wwpn, >+ (unsigned long long)auth_rsp->remote_wwpn); >+ >+ reply_payload = kmalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL); >+ if (!reply_payload) { >+ kfree(rsp); >+ return; >+ } >+ >+ vport->auth.auth_msg_state = LPFC_DHCHAP_REPLY; >+ reply_payload_len = lpfc_build_dhchap_reply(vport, reply_payload, rsp); >+ lpfc_issue_els_auth(vport, ndlp, DHCHAP_REPLY, >+ reply_payload, reply_payload_len); >+ kfree(reply_payload); >+ kfree(rsp); >+ >+} >+ >+ >+void >+lpfc_dhchap_authenticate(struct Scsi_Host *shost, >+ int status, void *rsp, >+ uint32_t rsp_len) >+{ >+ struct fc_auth_rsp *auth_rsp = (struct fc_auth_rsp *)rsp; >+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; >+ struct lpfc_nodelist *ndlp; >+ >+ ndlp = lpfc_findnode_did(vport, Fabric_DID); >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { >+ kfree(rsp); >+ return; >+ } >+ if (status != 0) { >+ lpfc_issue_els_auth_reject(vport, ndlp, >+ AUTH_ERR, AUTHENTICATION_FAILED); >+ kfree(rsp); >+ return; >+ } >+ >+ if (auth_rsp->u.dhchap_success.authenticated) { >+ uint32_t suc_payload_len; >+ uint8_t *suc_payload; >+ >+ suc_payload = kmalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL); >+ if (!suc_payload) { >+ lpfc_issue_els_auth_reject(vport, ndlp, >+ AUTH_ERR, AUTHENTICATION_FAILED); >+ kfree(rsp); >+ return; >+ } >+ suc_payload_len = lpfc_build_dhchap_success(vport, >+ suc_payload, rsp); >+ if (suc_payload_len == sizeof(uint32_t)) { >+ /* Authentication is complete after sending this SUCCESS */ >+ vport->auth.auth_msg_state = LPFC_DHCHAP_SUCCESS; >+ } else { >+ /* Need to wait for SUCCESS from Auth Initiator */ >+ vport->auth.auth_msg_state = LPFC_DHCHAP_SUCCESS_REPLY; >+ } >+ lpfc_issue_els_auth(vport, ndlp, DHCHAP_SUCCESS, >+ suc_payload, suc_payload_len); >+ kfree(suc_payload); >+ vport->auth.direction |= AUTH_DIRECTION_LOCAL; >+ } else { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1005 AUTHENTICATION_FAILURE Nport:x%x\n", >+ ndlp->nlp_DID); >+ lpfc_issue_els_auth_reject(vport, ndlp, >+ AUTH_ERR, AUTHENTICATION_FAILED); >+ if (vport->auth.auth_state == LPFC_AUTH_SUCCESS) { >+ lpfc_port_auth_failed(ndlp); >+ } >+ } >+ >+ kfree(rsp); >+} >+ >+int >+lpfc_unpack_auth_negotiate(struct lpfc_vport *vport, uint8_t *message, >+ uint8_t *reason, uint8_t *explanation) >+{ >+ uint32_t prot_len; >+ uint32_t param_len; >+ int i, j = 0; >+ >+ /* Following is the format of the message. Name Format. >+ * uint16_t nameTag; >+ * uint16_t nameLength; >+ * uint8_t name[8]; >+ * AUTH_Negotiate Message >+ * uint32_t NumberOfAuthProtocals >+ * uint32_t AuthProtParameter#1Len >+ * uint32_t AuthProtID#1 (DH-CHAP = 0x1) >+ * AUTH_Negotiate DH-CHAP >+ * uint16_t DH-CHAPParameterTag (HashList = 0x1) >+ * uint16_t DH-CHAPParameterWordCount (number of uint32_t entries) >+ * uint8_t DH-CHAPParameter[]; (uint32_t entries) >+ * uint16_t DH-CHAPParameterTag (DHglDList = 0x2) >+ * uint16_t DH-CHAPParameterWordCount (number of uint32_t entries) >+ * uint8_t DH-CHAPParameter[]; (uint32_t entries) >+ * DHCHAP_Challenge Message >+ * uint32_t hashIdentifier; >+ * uint32_t dhgroupIdentifier; >+ * uint32_t challengevalueLen; >+ * uint8_t challengeValue[]; >+ * uint32_t dhvalueLen; >+ * uint8_t dhvalue[]; >+ */ >+ >+ /* Name Tag */ >+ if (be16_to_cpu(*(uint16_t *)message) != NAME_TAG) { >+ *reason = AUTH_ERR; >+ *explanation = BAD_PAYLOAD; >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1006 Bad Name tag in auth message 0x%x\n", >+ be16_to_cpu(*(uint16_t *)message)); >+ return 1; >+ } >+ message += sizeof(uint16_t); >+ >+ /* Name Length */ >+ if (be16_to_cpu(*(uint16_t *)message) != NAME_LEN) { >+ *reason = AUTH_ERR; >+ *explanation = BAD_PAYLOAD; >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1007 Bad Name length in auth message 0x%x\n", >+ be16_to_cpu(*(uint16_t *)message)); >+ return 1; >+ } >+ message += sizeof(uint16_t); >+ >+ /* Skip over Remote Port Name */ >+ message += NAME_LEN; >+ >+ /* Number of Auth Protocols must be 1 DH-CHAP */ >+ if (be32_to_cpu(*(uint32_t *)message) != 1) { >+ *reason = AUTH_ERR; >+ *explanation = BAD_PAYLOAD; >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1008 Bad Number of Protocols 0x%x\n", >+ be32_to_cpu(*(uint32_t *)message)); >+ return 1; >+ } >+ message += sizeof(uint32_t); >+ >+ /* Protocol Parameter Length */ >+ prot_len = be32_to_cpu(*(uint32_t *)message); >+ message += sizeof(uint32_t); >+ >+ /* Protocol Parameter type */ >+ if (be32_to_cpu(*(uint32_t *)message) != FC_DHCHAP) { >+ *reason = AUTH_ERR; >+ *explanation = BAD_PAYLOAD; >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1009 Bad param type 0x%x\n", >+ be32_to_cpu(*(uint32_t *)message)); >+ return 1; >+ } >+ message += sizeof(uint32_t); >+ >+ /* Parameter #1 Tag */ >+ if (be16_to_cpu(*(uint16_t *)message) != HASH_LIST_TAG) { >+ *reason = AUTH_ERR; >+ *explanation = BAD_PAYLOAD; >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1010 Bad Tag 1 0x%x\n", >+ be16_to_cpu(*(uint16_t *)message)); >+ return 1; >+ } >+ message += sizeof(uint16_t); >+ >+ /* Parameter #1 Length */ >+ param_len = be16_to_cpu(*(uint16_t *)message); >+ message += sizeof(uint16_t); >+ >+ /* Choose a hash function */ >+ for (i = 0; i < vport->auth.hash_len; i++) { >+ for (j = 0; j < param_len; j++) { >+ if (vport->auth.hash_priority[i] == >+ be32_to_cpu(((uint32_t *)message)[j])) >+ break; >+ } >+ if (j != param_len) >+ break; >+ } >+ if (i == vport->auth.hash_len && j == param_len) { >+ *reason = AUTH_ERR; >+ *explanation = BAD_PAYLOAD; >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1011 Auth_neg no hash function chosen.\n"); >+ return 1; >+ } >+ vport->auth.hash_id = vport->auth.hash_priority[i]; >+ message += sizeof(uint32_t) * param_len; >+ >+ /* Parameter #2 Tag */ >+ if (be16_to_cpu(*(uint16_t *)message) != DHGID_LIST_TAG) { >+ *reason = AUTH_ERR; >+ *explanation = BAD_PAYLOAD; >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1012 Auth_negotiate Bad Tag 2 0x%x\n", >+ be16_to_cpu(*(uint16_t *)message)); >+ return 1; >+ } >+ message += sizeof(uint16_t); >+ >+ /* Parameter #2 Length */ >+ param_len = be16_to_cpu(*(uint16_t *)message); >+ message += sizeof(uint16_t); >+ >+ /* Choose a DH Group */ >+ for (i = 0; i < vport->auth.dh_group_len; i++) { >+ for (j = 0; j < param_len; j++) { >+ if (vport->auth.dh_group_priority[i] == >+ be32_to_cpu(((uint32_t *)message)[j])) >+ break; >+ } >+ if (j != param_len) >+ break; >+ } >+ if (i == vport->auth.dh_group_len && j == param_len) { >+ *reason = AUTH_ERR; >+ *explanation = BAD_PAYLOAD; >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1013 Auth_negotiate no DH_group found. \n"); >+ return 1; >+ } >+ vport->auth.group_id = vport->auth.dh_group_priority[i]; >+ message += sizeof(uint32_t) * param_len; >+ >+ return 0; >+} >+ >+int >+lpfc_unpack_dhchap_challenge(struct lpfc_vport *vport, uint8_t *message, >+ uint8_t *reason, uint8_t *explanation) >+{ >+ int i; >+ >+ /* Following is the format of the message DHCHAP_Challenge. >+ * uint16_t nameTag; >+ * uint16_t nameLength; >+ * uint8_t name[8]; >+ * uint32_t hashIdentifier; >+ * uint32_t dhgroupIdentifier; >+ * uint32_t challengevalueLen; >+ * uint8_t challengeValue[]; >+ * uint32_t dhvalueLen; >+ * uint8_t dhvalue[]; >+ */ >+ >+ /* Name Tag */ >+ if (be16_to_cpu(*(uint16_t *)message) != NAME_TAG) { >+ *reason = AUTH_ERR; >+ *explanation = BAD_PAYLOAD; >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1014 dhchap challenge bad name tag 0x%x. \n", >+ be16_to_cpu(*(uint16_t *)message)); >+ return 1; >+ } >+ message += sizeof(uint16_t); >+ >+ /* Name Length */ >+ if (be16_to_cpu(*(uint16_t *)message) != NAME_LEN) { >+ *reason = AUTH_ERR; >+ *explanation = BAD_PAYLOAD; >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1015 dhchap challenge bad name length " >+ "0x%x.\n", be16_to_cpu(*(uint16_t *)message)); >+ return 1; >+ } >+ message += sizeof(uint16_t); >+ >+ /* Remote Port Name */ >+ message += NAME_LEN; >+ >+ /* Hash ID */ >+ vport->auth.hash_id = be32_to_cpu(*(uint32_t *)message); /* Hash id */ >+ for (i = 0; i < vport->auth.hash_len; i++) { >+ if (vport->auth.hash_id == vport->auth.hash_priority[i]) >+ break; >+ } >+ if (i == vport->auth.hash_len) { >+ *reason = LOGIC_ERR; >+ *explanation = BAD_ALGORITHM; >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1016 dhchap challenge Hash ID not Supported " >+ "0x%x. \n", vport->auth.hash_id); >+ return 1; >+ } >+ message += sizeof(uint32_t); >+ >+ vport->auth.group_id = >+ be32_to_cpu(*(uint32_t *)message); /* DH group id */ >+ for (i = 0; i < vport->auth.dh_group_len; i++) { >+ if (vport->auth.group_id == vport->auth.dh_group_priority[i]) >+ break; >+ } >+ if (i == vport->auth.dh_group_len) { >+ *reason = LOGIC_ERR; >+ *explanation = BAD_DHGROUP; >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1017 dhchap challenge could not find DH " >+ "Group. \n"); >+ return 1; >+ } >+ message += sizeof(uint32_t); >+ >+ vport->auth.challenge_len = >+ be32_to_cpu(*(uint32_t *)message); /* Challenge Len */ >+ message += sizeof(uint32_t); >+ >+ /* copy challenge to vport */ >+ if (vport->auth.challenge != NULL) { >+ kfree(vport->auth.challenge); >+ } >+ vport->auth.challenge = kmalloc(vport->auth.challenge_len, GFP_KERNEL); >+ if (!vport->auth.challenge) { >+ *reason = AUTH_ERR; >+ return 1; >+ } >+ memcpy (vport->auth.challenge, message, vport->auth.challenge_len); >+ message += vport->auth.challenge_len; >+ >+ vport->auth.dh_pub_key_len = >+ be32_to_cpu(*(uint32_t *)message); /* DH Value Len */ >+ message += sizeof(uint32_t); >+ >+ if (vport->auth.dh_pub_key_len != 0) { >+ if (vport->auth.group_id == DH_GROUP_NULL) { >+ *reason = LOGIC_ERR; >+ *explanation = BAD_DHGROUP; >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1018 dhchap challenge No Public key " >+ "for non-NULL DH Group.\n"); >+ return 1; >+ } >+ >+ /* Copy to the vport to save for authentication */ >+ if (vport->auth.dh_pub_key != NULL) >+ kfree(vport->auth.dh_pub_key); >+ vport->auth.dh_pub_key = kmalloc(vport->auth.dh_pub_key_len, >+ GFP_KERNEL); >+ if (!vport->auth.dh_pub_key) { >+ *reason = AUTH_ERR; >+ return 1; >+ } >+ memcpy(vport->auth.dh_pub_key, message, >+ vport->auth.dh_pub_key_len); >+ } >+ return 0; >+} >+ >+int >+lpfc_unpack_dhchap_reply(struct lpfc_vport *vport, uint8_t *message, >+ struct fc_auth_req *fc_req) >+{ >+ uint32_t rsp_len; >+ uint32_t dh_len; >+ uint32_t challenge_len; >+ >+ /* Following is the format of the message DHCHAP_Reply. >+ * uint32_t Response Value Length; >+ * uint8_t Response Value[]; >+ * uint32_t DH Value Length; >+ * uint8_t DH Value[]; >+ * uint32_t Challenge Value Length; >+ * uint8_t Challenge Value[]; >+ */ >+ >+ rsp_len = be32_to_cpu(*(uint32_t *)message); /* Response Len */ >+ message += sizeof(uint32_t); >+ memcpy (fc_req->u.dhchap_success.data + vport->auth.challenge_len, >+ message, rsp_len); >+ fc_req->u.dhchap_success.received_response_len = rsp_len; >+ message += rsp_len; >+ >+ dh_len = be32_to_cpu(*(uint32_t *)message); /* DH Len */ >+ message += sizeof(uint32_t); >+ memcpy (fc_req->u.dhchap_success.data + vport->auth.challenge_len + >+ rsp_len, message, dh_len); >+ fc_req->u.dhchap_success.received_public_key_len = dh_len; >+ message += dh_len; >+ >+ challenge_len = be32_to_cpu(*(uint32_t *)message); /* Challenge Len */ >+ message += sizeof(uint32_t); >+ memcpy (fc_req->u.dhchap_success.data + vport->auth.challenge_len >+ + rsp_len + dh_len, >+ message, challenge_len); >+ fc_req->u.dhchap_success.received_challenge_len = challenge_len; >+ message += challenge_len; >+ >+ return (rsp_len + dh_len + challenge_len); >+} >+ >+int >+lpfc_unpack_dhchap_success(struct lpfc_vport *vport, uint8_t *message, >+ struct fc_auth_req *fc_req) >+{ >+ uint32_t rsp_len = 0; >+ >+ /* DHCHAP_Success. >+ * uint32_t responseValueLen; >+ * uint8_t response[]; >+ */ >+ >+ rsp_len = be32_to_cpu(*(uint32_t *)message); /* Response Len */ >+ message += sizeof(uint32_t); >+ memcpy(fc_req->u.dhchap_success.data + vport->auth.challenge_len, >+ message, rsp_len); >+ fc_req->u.dhchap_success.received_response_len = rsp_len; >+ >+ memcpy(fc_req->u.dhchap_success.data + >+ vport->auth.challenge_len + rsp_len, >+ vport->auth.dh_pub_key, vport->auth.dh_pub_key_len); >+ >+ fc_req->u.dhchap_success.received_public_key_len = >+ vport->auth.dh_pub_key_len; >+ >+ fc_req->u.dhchap_success.received_challenge_len = 0; >+ >+ return (vport->auth.challenge_len + rsp_len + >+ vport->auth.dh_pub_key_len); >+ return 0; >+} >+ >+int >+lpfc_build_auth_neg(struct lpfc_vport *vport, uint8_t *message) >+{ >+ uint8_t *message_start = message; >+ uint8_t *params_start; >+ uint32_t *params_len; >+ uint32_t len; >+ int i; >+ >+ /* Because some of the fields are not static in length >+ * and number we will pack on the fly.This will be expanded >+ * in the future to optionally offer DHCHAP or FCAP or both. >+ * The packing is done in Big Endian byte order DHCHAP_Reply. >+ * >+ * uint16_t nameTag; >+ * uint16_t nameLength; >+ * uint8_t name[8]; >+ * uint32_t available; For now we will only offer one >+ protocol ( DHCHAP ) for authentication. >+ * uint32_t potocolParamsLenId#1; >+ * uint32_t protocolId#1; 1 : DHCHAP. The protocol list is >+ * in order of preference. >+ * uint16_t parameter#1Tag 1 : HashList >+ * uint16_t parameter#1Len 2 : Count of how many parameter values >+ * follow in order of preference. >+ * uint16_t parameter#1value#1 5 : MD5 Hash Function >+ * uint16_t parameter#1value#2 6 : SHA-1 Hash Function >+ * uint16_t parameter#2Tag 2 : DHglDList >+ * uint16_t parameter#2Len 1 : Only One is supported now >+ * uint16_t parameter#2value#1 0 : NULL DH-CHAP Algorithm >+ * uint16_t parameter#2value#2 ... >+ * uint32_t protocolParamsLenId#2; >+ * uint32_t protocolId#2; 2 = FCAP >+ * uint16_t parameter#1Tag >+ * uint16_t parameter#1Len >+ * uint16_t parameter#1value#1 >+ * uint16_t parameter#1value#2 ... >+ * uint16_t parameter#2Tag >+ * uint16_t parameter#2Len >+ * uint16_t parameter#2value#1 >+ * uint16_t parameter#2value#2 ... >+ */ >+ >+ >+ /* Name Tag */ >+ *((uint16_t *)message) = cpu_to_be16(NAME_TAG); >+ message += sizeof(uint16_t); >+ >+ /* Name Len */ >+ *((uint16_t *)message) = cpu_to_be16(NAME_LEN); >+ message += sizeof(uint16_t); >+ >+ memcpy(message, vport->fc_portname.u.wwn, sizeof(uint64_t)); >+ >+ message += sizeof(uint64_t); >+ >+ /* Protocols Available */ >+ *((uint32_t *)message) = cpu_to_be32(PROTS_NUM); >+ message += sizeof(uint32_t); >+ >+ /* First Protocol Params Len */ >+ params_len = (uint32_t *)message; >+ message += sizeof(uint32_t); >+ >+ /* Start of first Param */ >+ params_start = message; >+ >+ /* Protocol Id */ >+ *((uint32_t *)message) = cpu_to_be32(FC_DHCHAP); >+ message += sizeof(uint32_t); >+ >+ /* Hash List Tag */ >+ *((uint16_t *)message) = cpu_to_be16(HASH_LIST_TAG); >+ message += sizeof(uint16_t); >+ >+ /* Hash Value Len */ >+ *((uint16_t *)message) = cpu_to_be16(vport->auth.hash_len); >+ message += sizeof(uint16_t); >+ >+ /* Hash Value each 4 byte words */ >+ for (i = 0; i < vport->auth.hash_len; i++) { >+ *((uint32_t *)message) = >+ cpu_to_be32(vport->auth.hash_priority[i]); >+ message += sizeof(uint32_t); >+ } >+ >+ /* DHgIDList Tag */ >+ *((uint16_t *)message) = cpu_to_be16(DHGID_LIST_TAG); >+ message += sizeof(uint16_t); >+ >+ /* DHgIDListValue Len */ >+ *((uint16_t *)message) = cpu_to_be16(vport->auth.dh_group_len); >+ >+ message += sizeof(uint16_t); >+ >+ /* DHgIDList each 4 byte words */ >+ >+ for (i = 0; i < vport->auth.dh_group_len; i++) { >+ *((uint32_t *)message) = >+ cpu_to_be32(vport->auth.dh_group_priority[i]); >+ message += sizeof(uint32_t); >+ } >+ >+ *params_len = cpu_to_be32(message - params_start); >+ >+ len = (uint32_t)(message - message_start); >+ >+ return len; >+} >+ >+int >+lpfc_build_dhchap_challenge(struct lpfc_vport *vport, uint8_t *message, >+ struct fc_auth_rsp *fc_rsp) >+{ >+ uint8_t *message_start = message; >+ >+ /* Because some of the fields are not static in length and number >+ * we will pack on the fly. The packing is done in Big Endian byte >+ * order DHCHAP_Challenge. >+ * >+ * uint16_t nameTag; >+ * uint16_t nameLength; >+ * uint8_t name[8]; >+ * uint32_t Hash_Identifier; >+ * uint32_t DH_Group_Identifier; >+ * uint32_t Challenge_Value_Length; >+ * uint8_t Challenge_Value[]; >+ * uint32_t DH_Value_Length; >+ * uint8_t DH_Value[]; >+ */ >+ >+ /* Name Tag */ >+ *((uint16_t *)message) = cpu_to_be16(NAME_TAG); >+ message += sizeof(uint16_t); >+ >+ /* Name Len */ >+ *((uint16_t *)message) = cpu_to_be16(NAME_LEN); >+ message += sizeof(uint16_t); >+ >+ memcpy(message, vport->fc_portname.u.wwn, NAME_LEN); >+ message += NAME_LEN; >+ >+ /* Hash Value each 4 byte words */ >+ *((uint32_t *)message) = cpu_to_be32(vport->auth.hash_id); >+ message += sizeof(uint32_t); >+ >+ /* DH group id each 4 byte words */ >+ *((uint32_t *)message) = cpu_to_be32(vport->auth.group_id); >+ message += sizeof(uint32_t); >+ >+ /* Challenge Length */ >+ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u. >+ dhchap_challenge.our_challenge_len); >+ message += sizeof(uint32_t); >+ >+ /* copy challenge to vport to save */ >+ if (vport->auth.challenge) >+ kfree(vport->auth.challenge); >+ vport->auth.challenge_len = fc_rsp->u. >+ dhchap_challenge.our_challenge_len; >+ vport->auth.challenge = kmalloc(vport->auth.challenge_len, GFP_KERNEL); >+ >+ if (!vport->auth.challenge) >+ return 0; >+ >+ memcpy(vport->auth.challenge, fc_rsp->u.dhchap_challenge.data, >+ fc_rsp->u.dhchap_challenge.our_challenge_len); >+ >+ /* Challenge */ >+ memcpy(message, fc_rsp->u.dhchap_challenge.data, >+ fc_rsp->u.dhchap_challenge.our_challenge_len); >+ message += fc_rsp->u.dhchap_challenge.our_challenge_len; >+ >+ /* Public Key length */ >+ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u. >+ dhchap_challenge.our_public_key_len); >+ message += sizeof(uint32_t); >+ >+ /* Public Key */ >+ memcpy(message, fc_rsp->u.dhchap_challenge.data + >+ fc_rsp->u.dhchap_challenge.our_challenge_len, >+ fc_rsp->u.dhchap_challenge.our_public_key_len); >+ message += fc_rsp->u.dhchap_challenge.our_public_key_len; >+ >+ return ((uint32_t)(message - message_start)); >+ >+} >+ >+int >+lpfc_build_dhchap_reply(struct lpfc_vport *vport, uint8_t *message, >+ struct fc_auth_rsp *fc_rsp) >+ >+{ >+ uint8_t *message_start = message; >+ >+ /* >+ * Because some of the fields are not static in length and >+ * number we will pack on the fly. The packing is done in >+ * Big Endian byte order DHCHAP_Reply. >+ * >+ * uint32_t ResonseLength; >+ * uint8_t ResponseValue[]; >+ * uint32_t DHLength; >+ * uint8_t DHValue[]; Our Public key >+ * uint32_t ChallengeLength; Used for bi-directional authentication >+ * uint8_t ChallengeValue[]; >+ * >+ * The combined key ( g^x mod p )^y mod p is used as the last >+ * hash of the password. >+ * >+ * g is the base 2 or 5. >+ * y is our private key. >+ * ( g^y mod p ) is our public key which we send. >+ * ( g^x mod p ) is their public key which we received. >+ */ >+ /* Response Value Length */ >+ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u.dhchap_reply. >+ our_challenge_rsp_len); >+ >+ message += sizeof(uint32_t); >+ /* Response Value */ >+ memcpy(message, fc_rsp->u.dhchap_reply.data, >+ fc_rsp->u.dhchap_reply.our_challenge_rsp_len); >+ >+ message += fc_rsp->u.dhchap_reply.our_challenge_rsp_len; >+ /* DH Value Length */ >+ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u.dhchap_reply. >+ our_public_key_len); >+ >+ message += sizeof(uint32_t); >+ /* DH Value */ >+ memcpy(message, fc_rsp->u.dhchap_reply.data + >+ fc_rsp->u.dhchap_reply.our_challenge_rsp_len, >+ fc_rsp->u.dhchap_reply.our_public_key_len); >+ >+ message += fc_rsp->u.dhchap_reply.our_public_key_len; >+ >+ if (vport->auth.bidirectional) { >+ >+ /* copy to vport to save */ >+ if (vport->auth.challenge) >+ kfree(vport->auth.challenge); >+ vport->auth.challenge_len = fc_rsp->u.dhchap_reply. >+ our_challenge_len; >+ vport->auth.challenge = kmalloc(vport->auth.challenge_len, >+ GFP_KERNEL); >+ if (!vport->auth.challenge) >+ return 0; >+ >+ memcpy(vport->auth.challenge, fc_rsp->u.dhchap_reply.data + >+ fc_rsp->u.dhchap_reply.our_challenge_rsp_len + >+ fc_rsp->u.dhchap_reply.our_public_key_len, >+ fc_rsp->u.dhchap_reply.our_challenge_len); >+ /* Challenge Value Length */ >+ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u. >+ dhchap_reply.our_challenge_len); >+ message += sizeof(uint32_t); >+ /* Challenge Value */ >+ memcpy(message, fc_rsp->u.dhchap_reply.data + >+ fc_rsp->u.dhchap_reply.our_challenge_rsp_len + >+ fc_rsp->u.dhchap_reply.our_public_key_len, >+ fc_rsp->u.dhchap_reply.our_challenge_len); >+ >+ message += fc_rsp->u.dhchap_reply.our_challenge_len; >+ >+ } else { >+ *((uint32_t *)message) = 0; /* Challenge Len for No >+ bidirectional authentication */ >+ message += sizeof(uint32_t); /* Challenge Value Not Present */ >+ } >+ >+ return ((uint32_t)(message - message_start)); >+ >+} >+ >+int >+lpfc_build_dhchap_success(struct lpfc_vport *vport, uint8_t *message, >+ struct fc_auth_rsp *fc_rsp) >+{ >+ uint8_t *message_start = message; >+ >+ /* >+ * Because some of the fields are not static in length and number >+ * we will pack on the fly. The packing is done in Big Endian byte >+ * order DHCHAP_Success. >+ * uint32_t responseValueLen; >+ * uint8_t response[];. >+ */ >+ >+ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u. >+ dhchap_success.response_len); >+ message += sizeof(uint32_t); >+ >+ memcpy(message, fc_rsp->u.dhchap_success.data, >+ fc_rsp->u.dhchap_success.response_len); >+ message += fc_rsp->u.dhchap_success.response_len; >+ >+ return ((uint32_t)(message - message_start)); >+} >+ >diff -urpN a/drivers/scsi/lpfc/lpfc_auth.h b/drivers/scsi/lpfc/lpfc_auth.h >--- a/drivers/scsi/lpfc/lpfc_auth.h 1969-12-31 19:00:00.000000000 -0500 >+++ b/drivers/scsi/lpfc/lpfc_auth.h 2008-09-05 17:47:49.627877000 -0400 >@@ -0,0 +1,92 @@ >+/******************************************************************* >+ * This file is part of the Emulex Linux Device Driver for * >+ * Fibre Channel Host Bus Adapters. * >+ * Copyright (C) 2006-2007 Emulex. All rights reserved. * >+ * EMULEX and SLI are trademarks of Emulex. * >+ * www.emulex.com * >+ * * >+ * This program is free software; you can redistribute it and/or * >+ * modify it under the terms of version 2 of the GNU General * >+ * Public License as published by the Free Software Foundation. * >+ * This program is distributed in the hope that it will be useful. * >+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * >+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * >+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * >+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * >+ * TO BE LEGALLY INVALID. See the GNU General Public License for * >+ * more details, a copy of which can be found in the file COPYING * >+ * included with this package. * >+ *******************************************************************/ >+ >+#define N_DH_GROUP 4 >+#define ELS_CMD_AUTH_BYTE 0x90 >+ >+#define AUTH_REJECT 0xA >+#define AUTH_NEGOTIATE 0xB >+#define AUTH_DONE 0xC >+ >+#define DHCHAP_CHALLENGE 0x10 >+#define DHCHAP_REPLY 0x11 >+#define DHCHAP_SUCCESS 0x12 >+ >+#define FCAP_REQUEST 0x13 >+#define FCAP_ACK 0x14 >+#define FCAP_CONFIRM 0x15 >+ >+#define PROTS_NUM 0x01 >+ >+#define NAME_TAG 0x01 >+#define NAME_LEN 0x08 >+ >+#define HASH_LIST_TAG 0x01 >+ >+#define DHGID_LIST_TAG 0x02 >+ >+#define HBA_SECURITY 0x20 >+ >+#define AUTH_ERR 0x1 >+#define LOGIC_ERR 0x2 >+ >+#define BAD_DHGROUP 0x2 >+#define BAD_ALGORITHM 0x3 >+#define AUTHENTICATION_FAILED 0x5 >+#define BAD_PAYLOAD 0x6 >+#define BAD_PROTOCOL 0x7 >+#define RESTART 0x8 >+ >+#define AUTH_VERSION 0x1 >+ >+#define MAX_AUTH_MESSAGE_SIZE 1024 >+ >+struct lpfc_auth_reject { >+ uint8_t reason; >+ uint8_t explanation; >+ uint8_t reserved[2]; >+} __attribute__ ((packed)); >+ >+struct lpfc_auth_message { /* Structure is in Big Endian format */ >+ uint8_t command_code; >+ uint8_t flags; >+ uint8_t message_code; >+ uint8_t protocol_ver; >+ uint32_t message_len; >+ uint32_t trans_id; >+ uint8_t data[0]; >+} __attribute__ ((packed)); >+ >+int lpfc_build_auth_neg(struct lpfc_vport *vport, uint8_t *message); >+int lpfc_build_dhchap_challenge(struct lpfc_vport *vport, uint8_t *message, >+ struct fc_auth_rsp *fc_rsp); >+int lpfc_build_dhchap_reply(struct lpfc_vport *vport, uint8_t *message, >+ struct fc_auth_rsp *fc_rsp); >+int lpfc_build_dhchap_success(struct lpfc_vport *vport, uint8_t *message, >+ struct fc_auth_rsp *fc_rsp); >+ >+int lpfc_unpack_auth_negotiate(struct lpfc_vport *vport, uint8_t *message, >+ uint8_t *reason, uint8_t *explanation); >+int lpfc_unpack_dhchap_challenge(struct lpfc_vport *vport, uint8_t *message, >+ uint8_t *reason, uint8_t *explanation); >+int lpfc_unpack_dhchap_reply(struct lpfc_vport *vport, uint8_t *message, >+ struct fc_auth_req *fc_req); >+int lpfc_unpack_dhchap_success(struct lpfc_vport *vport, uint8_t *message, >+ struct fc_auth_req *fc_req); >diff -urpN a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h >--- a/drivers/scsi/lpfc/lpfc_compat.h 2008-09-05 17:47:41.633246000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_compat.h 2008-09-05 17:47:49.675879000 -0400 >@@ -32,6 +32,11 @@ using writel() and readl(). > *******************************************************************/ > #include <asm/byteorder.h> > >+/* >+ * This definition is to support older versions of scsi_transport_fc which >+ * do not have 8Gig speed definition. >+ */ >+ > #ifdef __BIG_ENDIAN > > static inline void >diff -urpN a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h >--- a/drivers/scsi/lpfc/lpfc_crtn.h 2008-09-05 17:47:41.636245000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_crtn.h 2008-09-05 17:47:49.643876000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * * >@@ -21,8 +21,16 @@ > typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param); > > struct fc_rport; >+int lpfc_issue_els_auth(struct lpfc_vport *, struct lpfc_nodelist *, >+ uint8_t message_code, uint8_t *payload, >+ uint32_t payload_len); >+int lpfc_issue_els_auth_reject(struct lpfc_vport *vport, >+ struct lpfc_nodelist *ndlp, >+ uint8_t reason, uint8_t explanation); > void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); > void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); >+void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); >+ > void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *); > int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, > struct lpfc_dmabuf *mp); >@@ -34,6 +42,7 @@ void lpfc_read_config(struct lpfc_hba *, > void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); > int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, > LPFC_MBOXQ_t *, uint32_t); >+void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); > void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); > void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); > void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); >@@ -43,6 +52,7 @@ void lpfc_init_link(struct lpfc_hba *, L > struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); > void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove); > int lpfc_linkdown(struct lpfc_hba *); >+void lpfc_port_link_failure(struct lpfc_vport *); > void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); > > void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); >@@ -51,7 +61,11 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_ > void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); > void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); > void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); >+void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); > void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); >+void lpfc_disable_node(struct lpfc_vport *, struct lpfc_nodelist *); >+struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, >+ struct lpfc_nodelist *, int); > void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); > void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *); > void lpfc_set_disctmo(struct lpfc_vport *); >@@ -66,16 +80,21 @@ int lpfc_check_sli_ndlp(struct lpfc_hba > void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t); > struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *); > int lpfc_nlp_put(struct lpfc_nodelist *); >+int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp); > struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t); > void lpfc_disc_list_loopmap(struct lpfc_vport *); > void lpfc_disc_start(struct lpfc_vport *); > void lpfc_disc_flush_list(struct lpfc_vport *); > void lpfc_cleanup_discovery_resources(struct lpfc_vport *); >+void lpfc_cleanup(struct lpfc_vport *); > void lpfc_disc_timeout(unsigned long); > > struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); > struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); >+struct lpfc_nodelist *lpfc_findnode_wwnn(struct lpfc_vport *, >+ struct lpfc_name *); > >+void lpfc_port_auth_failed(struct lpfc_nodelist *); > void lpfc_worker_wake_up(struct lpfc_hba *); > int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t); > int lpfc_do_work(void *); >@@ -88,7 +107,13 @@ void lpfc_do_scr_ns_plogi(struct lpfc_hb > int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, > struct serv_parm *, uint32_t); > int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); >+void lpfc_more_plogi(struct lpfc_vport *); >+void lpfc_more_adisc(struct lpfc_vport *); >+void lpfc_end_rscn(struct lpfc_vport *); > int lpfc_els_chk_latt(struct lpfc_vport *); >+struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t, >+ uint8_t, struct lpfc_nodelist *, uint32_t, >+ uint32_t); > int lpfc_els_abort_flogi(struct lpfc_hba *); > int lpfc_initial_flogi(struct lpfc_vport *); > int lpfc_initial_fdisc(struct lpfc_vport *); >@@ -112,6 +137,8 @@ int lpfc_els_rsp_prli_acc(struct lpfc_vp > void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *); > void lpfc_els_retry_delay(unsigned long); > void lpfc_els_retry_delay_handler(struct lpfc_nodelist *); >+void lpfc_reauth_node(unsigned long); >+void lpfc_reauthentication_handler(struct lpfc_nodelist *); > void lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *); > void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, > struct lpfc_iocbq *); >@@ -149,6 +176,7 @@ void lpfc_offline(struct lpfc_hba *); > > int lpfc_sli_setup(struct lpfc_hba *); > int lpfc_sli_queue_setup(struct lpfc_hba *); >+int lpfc_sli_set_dma_length(struct lpfc_hba *, uint32_t); > > void lpfc_handle_eratt(struct lpfc_hba *); > void lpfc_handle_latt(struct lpfc_hba *); >@@ -204,6 +232,11 @@ int lpfc_sli_ringpostbuf_put(struct lpfc > struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, > struct lpfc_sli_ring *, > dma_addr_t); >+ >+uint32_t lpfc_sli_get_buffer_tag(struct lpfc_hba *); >+struct lpfc_dmabuf * lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *, >+ struct lpfc_sli_ring *, uint32_t ); >+ > int lpfc_sli_hbq_count(void); > int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *, uint32_t); > int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t); >@@ -260,18 +293,36 @@ extern struct scsi_host_template lpfc_vp > extern struct fc_function_template lpfc_transport_functions; > extern struct fc_function_template lpfc_vport_transport_functions; > extern int lpfc_sli_mode; >+extern int lpfc_enable_npiv; > > int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); >+int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t); > void lpfc_terminate_rport_io(struct fc_rport *); > void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport); > > struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *); > int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable); >-void lpfc_mbx_unreg_vpi(struct lpfc_vport *); >+int lpfc_mbx_unreg_vpi(struct lpfc_vport *); > void destroy_port(struct lpfc_vport *); > int lpfc_get_instance(void); > void lpfc_host_attrib_init(struct Scsi_Host *); > >+int lpfc_selective_reset(struct lpfc_hba *); >+int lpfc_security_wait(struct lpfc_hba *); >+int lpfc_get_security_enabled(struct Scsi_Host *); >+void lpfc_security_service_online(struct Scsi_Host *); >+void lpfc_security_service_offline(struct Scsi_Host *); >+void lpfc_security_config(struct Scsi_Host *, int status, void *); >+int lpfc_security_config_wait(struct lpfc_vport *vport); >+void lpfc_dhchap_make_challenge(struct Scsi_Host *, int , void *, uint32_t); >+void lpfc_dhchap_make_response(struct Scsi_Host *, int , void *, uint32_t); >+void lpfc_dhchap_authenticate(struct Scsi_Host *, int , void *, uint32_t); >+int lpfc_start_node_authentication(struct lpfc_nodelist *); >+int lpfc_get_auth_config(struct lpfc_nodelist *, struct lpfc_name *); >+void lpfc_start_discovery(struct lpfc_vport *vport); >+ >+void lpfc_start_authentication(struct lpfc_vport *, struct lpfc_nodelist *); >+ > extern void lpfc_debugfs_initialize(struct lpfc_vport *); > extern void lpfc_debugfs_terminate(struct lpfc_vport *); > extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t, >@@ -280,6 +331,11 @@ extern void lpfc_debugfs_slow_ring_trc(s > uint32_t, uint32_t); > extern struct lpfc_hbq_init *lpfc_hbq_defs[]; > >+extern uint8_t lpfc_security_service_state; >+extern spinlock_t fc_security_user_lock; >+extern struct list_head fc_security_user_list; >+extern int fc_service_state; >+ > /* Interface exported by fabric iocb scheduler */ > int lpfc_issue_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *); > void lpfc_fabric_abort_vport(struct lpfc_vport *); >diff -urpN a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c >--- a/drivers/scsi/lpfc/lpfc_ct.c 2008-09-05 17:47:41.642303000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_ct.c 2008-09-05 17:47:49.744876000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * * >@@ -19,7 +19,7 @@ > *******************************************************************/ > > /* >- * Fibre Channel SCSI LAN Device Driver CT support >+ * Fibre Channel SCSI LAN Device Driver CT support: FC Generic Services FC-GS > */ > > #include <linux/blkdev.h> >@@ -57,45 +57,27 @@ > > static char *lpfc_release_version = LPFC_DRIVER_VERSION; > >-/* >- * lpfc_ct_unsol_event >- */ > static void >-lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, >- struct lpfc_dmabuf *mp, uint32_t size) >+lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, >+ struct lpfc_dmabuf *mp, uint32_t size) > { > if (!mp) { >- printk(KERN_ERR "%s (%d): Unsolited CT, no buffer, " >- "piocbq = %p, status = x%x, mp = %p, size = %d\n", >- __FUNCTION__, __LINE__, >- piocbq, piocbq->iocb.ulpStatus, mp, size); >- } >- >- printk(KERN_ERR "%s (%d): Ignoring unsolicted CT piocbq = %p, " >- "buffer = %p, size = %d, status = x%x\n", >- __FUNCTION__, __LINE__, >- piocbq, mp, size, >- piocbq->iocb.ulpStatus); >- >+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, >+ "0146 Ignoring unsolicited CT No HBQ " >+ "status = x%x\n", >+ piocbq->iocb.ulpStatus); >+ } >+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, >+ "0145 Ignoring unsolicted CT HBQ Size:%d " >+ "status = x%x\n", >+ size, piocbq->iocb.ulpStatus); > } > > static void >-lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, >- struct lpfc_dmabuf *mp, uint32_t size) >+lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, >+ struct lpfc_dmabuf *mp, uint32_t size) > { >- if (!mp) { >- printk(KERN_ERR "%s (%d): Unsolited CT, no " >- "HBQ buffer, piocbq = %p, status = x%x\n", >- __FUNCTION__, __LINE__, >- piocbq, piocbq->iocb.ulpStatus); >- } else { >- lpfc_ct_unsol_buffer(phba, piocbq, mp, size); >- printk(KERN_ERR "%s (%d): Ignoring unsolicted CT " >- "piocbq = %p, buffer = %p, size = %d, " >- "status = x%x\n", >- __FUNCTION__, __LINE__, >- piocbq, mp, size, piocbq->iocb.ulpStatus); >- } >+ lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size); > } > > void >@@ -109,11 +91,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phb > struct lpfc_iocbq *iocbq; > dma_addr_t paddr; > uint32_t size; >- struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; >- struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; >- >- piocbq->context2 = NULL; >- piocbq->context3 = NULL; >+ struct list_head head; >+ struct lpfc_dmabuf *bdeBuf; > > if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { > lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); >@@ -122,7 +101,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phb > /* Not enough posted buffers; Try posting more buffers */ > phba->fc_stat.NoRcvBuf++; > if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) >- lpfc_post_buffer(phba, pring, 0, 1); >+ lpfc_post_buffer(phba, pring, 2, 1); > return; > } > >@@ -133,50 +112,46 @@ lpfc_ct_unsol_event(struct lpfc_hba *phb > return; > > if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { >- list_for_each_entry(iocbq, &piocbq->list, list) { >+ INIT_LIST_HEAD(&head); >+ list_add_tail(&head, &piocbq->list); >+ list_for_each_entry(iocbq, &head, list) { > icmd = &iocbq->iocb; >- if (icmd->ulpBdeCount == 0) { >- printk(KERN_ERR "%s (%d): Unsolited CT, no " >- "BDE, iocbq = %p, status = x%x\n", >- __FUNCTION__, __LINE__, >- iocbq, iocbq->iocb.ulpStatus); >+ if (icmd->ulpBdeCount == 0) > continue; >- } >- >+ bdeBuf = iocbq->context2; >+ iocbq->context2 = NULL; > size = icmd->un.cont64[0].tus.f.bdeSize; >- lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf1, size); >- lpfc_in_buf_free(phba, bdeBuf1); >+ lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size); >+ lpfc_in_buf_free(phba, bdeBuf); > if (icmd->ulpBdeCount == 2) { >- lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf2, >- size); >- lpfc_in_buf_free(phba, bdeBuf2); >+ bdeBuf = iocbq->context3; >+ iocbq->context3 = NULL; >+ size = icmd->unsli3.rcvsli3.bde2.tus.f.bdeSize; >+ lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, >+ size); >+ lpfc_in_buf_free(phba, bdeBuf); > } > } >+ list_del(&head); > } else { >- struct lpfc_iocbq *next; >- >- list_for_each_entry_safe(iocbq, next, &piocbq->list, list) { >+ INIT_LIST_HEAD(&head); >+ list_add_tail(&head, &piocbq->list); >+ list_for_each_entry(iocbq, &head, list) { > icmd = &iocbq->iocb; >- if (icmd->ulpBdeCount == 0) { >- printk(KERN_ERR "%s (%d): Unsolited CT, no " >- "BDE, iocbq = %p, status = x%x\n", >- __FUNCTION__, __LINE__, >- iocbq, iocbq->iocb.ulpStatus); >- continue; >- } >- >+ if (icmd->ulpBdeCount == 0) >+ lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0); > for (i = 0; i < icmd->ulpBdeCount; i++) { > paddr = getPaddr(icmd->un.cont64[i].addrHigh, > icmd->un.cont64[i].addrLow); > mp = lpfc_sli_ringpostbuf_get(phba, pring, > paddr); > size = icmd->un.cont64[i].tus.f.bdeSize; >- lpfc_ct_unsol_buffer(phba, piocbq, mp, size); >+ lpfc_ct_unsol_buffer(phba, iocbq, mp, size); > lpfc_in_buf_free(phba, mp); > } >- list_del(&iocbq->list); >- lpfc_sli_release_iocbq(phba, iocbq); >+ lpfc_post_buffer(phba, pring, i, 1); > } >+ list_del(&head); > } > } > >@@ -203,7 +178,7 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, > struct lpfc_dmabuf *mp; > int cnt, i = 0; > >- /* We get chucks of FCELSSIZE */ >+ /* We get chunks of FCELSSIZE */ > cnt = size > FCELSSIZE ? FCELSSIZE: size; > > while (size) { >@@ -318,7 +293,7 @@ lpfc_gen_req(struct lpfc_vport *vport, s > /* Save for completion so we can release these resources */ > geniocb->context1 = (uint8_t *) inp; > geniocb->context2 = (uint8_t *) outp; >- geniocb->context_un.ndlp = ndlp; >+ geniocb->context_un.ndlp = lpfc_nlp_get(ndlp); > > /* Fill in payload, bp points to frame payload */ > icmd->ulpCommand = CMD_GEN_REQUEST64_CR; >@@ -426,6 +401,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, st > > lpfc_set_disctmo(vport); > vport->num_disc_nodes = 0; >+ vport->fc_ns_retry = 0; > > > list_add_tail(&head, &mp->list); >@@ -458,10 +434,10 @@ lpfc_ns_rsp(struct lpfc_vport *vport, st > ((lpfc_find_vport_by_did(phba, Did) == NULL) || > vport->cfg_peer_port_login)) { > if ((vport->port_type != LPFC_NPIV_PORT) || >- (vport->fc_flag & FC_RFF_NOT_SUPPORTED) || >+ (!(vport->ct_flags & FC_CT_RFF_ID)) || > (!vport->cfg_restrict_login)) { > ndlp = lpfc_setup_disc_node(vport, Did); >- if (ndlp) { >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { > lpfc_debugfs_disc_trc(vport, > LPFC_DISC_TRC_CT, > "Parse GID_FTrsp: " >@@ -506,7 +482,19 @@ lpfc_ns_rsp(struct lpfc_vport *vport, st > Did, vport->fc_flag, > vport->fc_rscn_id_cnt); > >- if (lpfc_ns_cmd(vport, >+ /* This NPortID was previously >+ * a FCP target, * Don't even >+ * bother to send GFF_ID. >+ */ >+ ndlp = lpfc_findnode_did(vport, >+ Did); >+ if (ndlp && >+ NLP_CHK_NODE_ACT(ndlp) >+ && (ndlp->nlp_type & >+ NLP_FCP_TARGET)) >+ lpfc_setup_disc_node >+ (vport, Did); >+ else if (lpfc_ns_cmd(vport, > SLI_CTNS_GFF_ID, > 0, Did) == 0) > vport->num_disc_nodes++; >@@ -574,20 +562,24 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba > if (vport->load_flag & FC_UNLOADING) > goto out; > >- >- if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) { >+ if (lpfc_els_chk_latt(vport)) { > lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, > "0216 Link event during NS query\n"); > lpfc_vport_set_state(vport, FC_VPORT_FAILED); > goto out; > } >- >+ if (lpfc_error_lost_link(irsp)) { >+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, >+ "0226 NS query failed due to link event\n"); >+ goto out; >+ } > if (irsp->ulpStatus) { > /* Check for retry */ > if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { >- if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || >- (irsp->un.ulpWord[4] != IOERR_NO_RESOURCES)) >+ if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT || >+ irsp->un.ulpWord[4] != IOERR_NO_RESOURCES) > vport->fc_ns_retry++; >+ > /* CT command is being retried */ > rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, > vport->fc_ns_retry, 0); >@@ -698,7 +690,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba > struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1; > struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2; > struct lpfc_sli_ct_request *CTrsp; >- int did; >+ int did, rc, retry; > uint8_t fbits; > struct lpfc_nodelist *ndlp; > >@@ -729,16 +721,49 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba > } > } > else { >+ /* Check for retry */ >+ if (cmdiocb->retry < LPFC_MAX_NS_RETRY) { >+ retry = 1; >+ if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { >+ switch (irsp->un.ulpWord[4]) { >+ case IOERR_NO_RESOURCES: >+ /* We don't increment the retry >+ * count for this case. >+ */ >+ break; >+ case IOERR_LINK_DOWN: >+ case IOERR_SLI_ABORTED: >+ case IOERR_SLI_DOWN: >+ retry = 0; >+ break; >+ default: >+ cmdiocb->retry++; >+ } >+ } >+ else >+ cmdiocb->retry++; >+ >+ if (retry) { >+ /* CT command is being retried */ >+ rc = lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, >+ cmdiocb->retry, did); >+ if (rc == 0) { >+ /* success */ >+ lpfc_ct_free_iocb(phba, cmdiocb); >+ return; >+ } >+ } >+ } > lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, > "0267 NameServer GFF Rsp " > "x%x Error (%d %d) Data: x%x x%x\n", > did, irsp->ulpStatus, irsp->un.ulpWord[4], >- vport->fc_flag, vport->fc_rscn_id_cnt) >+ vport->fc_flag, vport->fc_rscn_id_cnt); > } > > /* This is a target port, unregistered port, or the GFF_ID failed */ > ndlp = lpfc_setup_disc_node(vport, did); >- if (ndlp) { >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { > lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, > "0242 Process x%x GFF " > "NameServer Rsp Data: x%x x%x x%x\n", >@@ -778,8 +803,8 @@ out: > > > static void >-lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, >- struct lpfc_iocbq *rspiocb) >+lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, >+ struct lpfc_iocbq *rspiocb) > { > struct lpfc_vport *vport = cmdiocb->vport; > struct lpfc_dmabuf *inp; >@@ -809,7 +834,7 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba > > /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */ > lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, >- "0209 RFT request completes, latt %d, " >+ "0209 CT Request completes, latt %d, " > "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n", > latt, irsp->ulpStatus, > CTrsp->CommandResponse.bits.CmdRsp, >@@ -835,7 +860,7 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba > > retry++; > lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, >- "0216 Retrying NS cmd %x\n", cmdcode); >+ "0250 Retrying NS cmd %x\n", cmdcode); > rc = lpfc_ns_cmd(vport, cmdcode, retry, 0); > if (rc == 0) > goto out; >@@ -848,10 +873,44 @@ out: > } > > static void >+lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, >+ struct lpfc_iocbq *rspiocb) >+{ >+ IOCB_t *irsp = &rspiocb->iocb; >+ struct lpfc_vport *vport = cmdiocb->vport; >+ >+ if (irsp->ulpStatus == IOSTAT_SUCCESS) { >+ struct lpfc_dmabuf *outp; >+ struct lpfc_sli_ct_request *CTrsp; >+ >+ outp = (struct lpfc_dmabuf *) cmdiocb->context2; >+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt; >+ if (CTrsp->CommandResponse.bits.CmdRsp == >+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) >+ vport->ct_flags |= FC_CT_RFT_ID; >+ } >+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb); >+ return; >+} >+ >+static void > lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, > struct lpfc_iocbq *rspiocb) > { >- lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); >+ IOCB_t *irsp = &rspiocb->iocb; >+ struct lpfc_vport *vport = cmdiocb->vport; >+ >+ if (irsp->ulpStatus == IOSTAT_SUCCESS) { >+ struct lpfc_dmabuf *outp; >+ struct lpfc_sli_ct_request *CTrsp; >+ >+ outp = (struct lpfc_dmabuf *) cmdiocb->context2; >+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt; >+ if (CTrsp->CommandResponse.bits.CmdRsp == >+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) >+ vport->ct_flags |= FC_CT_RNN_ID; >+ } >+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb); > return; > } > >@@ -859,7 +918,20 @@ static void > lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, > struct lpfc_iocbq *rspiocb) > { >- lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); >+ IOCB_t *irsp = &rspiocb->iocb; >+ struct lpfc_vport *vport = cmdiocb->vport; >+ >+ if (irsp->ulpStatus == IOSTAT_SUCCESS) { >+ struct lpfc_dmabuf *outp; >+ struct lpfc_sli_ct_request *CTrsp; >+ >+ outp = (struct lpfc_dmabuf *) cmdiocb->context2; >+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt; >+ if (CTrsp->CommandResponse.bits.CmdRsp == >+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) >+ vport->ct_flags |= FC_CT_RSPN_ID; >+ } >+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb); > return; > } > >@@ -867,7 +939,32 @@ static void > lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, > struct lpfc_iocbq *rspiocb) > { >- lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); >+ IOCB_t *irsp = &rspiocb->iocb; >+ struct lpfc_vport *vport = cmdiocb->vport; >+ >+ if (irsp->ulpStatus == IOSTAT_SUCCESS) { >+ struct lpfc_dmabuf *outp; >+ struct lpfc_sli_ct_request *CTrsp; >+ >+ outp = (struct lpfc_dmabuf *) cmdiocb->context2; >+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt; >+ if (CTrsp->CommandResponse.bits.CmdRsp == >+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) >+ vport->ct_flags |= FC_CT_RSNN_NN; >+ } >+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb); >+ return; >+} >+ >+static void >+lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, >+ struct lpfc_iocbq *rspiocb) >+{ >+ struct lpfc_vport *vport = cmdiocb->vport; >+ >+ /* even if it fails we will act as though it succeeded. */ >+ vport->ct_flags = 0; >+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb); > return; > } > >@@ -878,14 +975,21 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba > IOCB_t *irsp = &rspiocb->iocb; > struct lpfc_vport *vport = cmdiocb->vport; > >- if (irsp->ulpStatus != IOSTAT_SUCCESS) >- vport->fc_flag |= FC_RFF_NOT_SUPPORTED; >+ if (irsp->ulpStatus == IOSTAT_SUCCESS) { >+ struct lpfc_dmabuf *outp; >+ struct lpfc_sli_ct_request *CTrsp; > >- lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); >+ outp = (struct lpfc_dmabuf *) cmdiocb->context2; >+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt; >+ if (CTrsp->CommandResponse.bits.CmdRsp == >+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) >+ vport->ct_flags |= FC_CT_RFF_ID; >+ } >+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb); > return; > } > >-static int >+int > lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol, > size_t size) > { >@@ -945,7 +1049,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, in > int rc = 0; > > ndlp = lpfc_findnode_did(vport, NameServer_DID); >- if (ndlp == NULL || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) >+ || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { > rc=1; > goto ns_cmd_exit; > } >@@ -1001,6 +1106,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, in > bpl->tus.f.bdeSize = RSPN_REQUEST_SZ; > else if (cmdcode == SLI_CTNS_RSNN_NN) > bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; >+ else if (cmdcode == SLI_CTNS_DA_ID) >+ bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ; > else if (cmdcode == SLI_CTNS_RFF_ID) > bpl->tus.f.bdeSize = RFF_REQUEST_SZ; > else >@@ -1029,31 +1136,34 @@ lpfc_ns_cmd(struct lpfc_vport *vport, in > case SLI_CTNS_GFF_ID: > CtReq->CommandResponse.bits.CmdRsp = > be16_to_cpu(SLI_CTNS_GFF_ID); >- CtReq->un.gff.PortId = be32_to_cpu(context); >+ CtReq->un.gff.PortId = cpu_to_be32(context); > cmpl = lpfc_cmpl_ct_cmd_gff_id; > break; > > case SLI_CTNS_RFT_ID: >+ vport->ct_flags &= ~FC_CT_RFT_ID; > CtReq->CommandResponse.bits.CmdRsp = > be16_to_cpu(SLI_CTNS_RFT_ID); >- CtReq->un.rft.PortId = be32_to_cpu(vport->fc_myDID); >+ CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID); > CtReq->un.rft.fcpReg = 1; > cmpl = lpfc_cmpl_ct_cmd_rft_id; > break; > > case SLI_CTNS_RNN_ID: >+ vport->ct_flags &= ~FC_CT_RNN_ID; > CtReq->CommandResponse.bits.CmdRsp = > be16_to_cpu(SLI_CTNS_RNN_ID); >- CtReq->un.rnn.PortId = be32_to_cpu(vport->fc_myDID); >+ CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID); > memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename, > sizeof (struct lpfc_name)); > cmpl = lpfc_cmpl_ct_cmd_rnn_id; > break; > > case SLI_CTNS_RSPN_ID: >+ vport->ct_flags &= ~FC_CT_RSPN_ID; > CtReq->CommandResponse.bits.CmdRsp = > be16_to_cpu(SLI_CTNS_RSPN_ID); >- CtReq->un.rspn.PortId = be32_to_cpu(vport->fc_myDID); >+ CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID); > size = sizeof(CtReq->un.rspn.symbname); > CtReq->un.rspn.len = > lpfc_vport_symbolic_port_name(vport, >@@ -1061,6 +1171,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, in > cmpl = lpfc_cmpl_ct_cmd_rspn_id; > break; > case SLI_CTNS_RSNN_NN: >+ vport->ct_flags &= ~FC_CT_RSNN_NN; > CtReq->CommandResponse.bits.CmdRsp = > be16_to_cpu(SLI_CTNS_RSNN_NN); > memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename, >@@ -1071,18 +1182,26 @@ lpfc_ns_cmd(struct lpfc_vport *vport, in > CtReq->un.rsnn.symbname, size); > cmpl = lpfc_cmpl_ct_cmd_rsnn_nn; > break; >+ case SLI_CTNS_DA_ID: >+ /* Implement DA_ID Nameserver request */ >+ CtReq->CommandResponse.bits.CmdRsp = >+ be16_to_cpu(SLI_CTNS_DA_ID); >+ CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID); >+ cmpl = lpfc_cmpl_ct_cmd_da_id; >+ break; > case SLI_CTNS_RFF_ID: >- vport->fc_flag &= ~FC_RFF_NOT_SUPPORTED; >+ vport->ct_flags &= ~FC_CT_RFF_ID; > CtReq->CommandResponse.bits.CmdRsp = > be16_to_cpu(SLI_CTNS_RFF_ID); >- CtReq->un.rff.PortId = be32_to_cpu(vport->fc_myDID);; >+ CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);; > CtReq->un.rff.fbits = FC4_FEATURE_INIT; > CtReq->un.rff.type_code = FC_FCP_DATA; > cmpl = lpfc_cmpl_ct_cmd_rff_id; > break; > } >- lpfc_nlp_get(ndlp); >- >+ /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count >+ * to hold ndlp reference for the corresponding callback function. >+ */ > if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) { > /* On success, The cmpl function will free the buffers */ > lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, >@@ -1090,9 +1209,13 @@ lpfc_ns_cmd(struct lpfc_vport *vport, in > cmdcode, ndlp->nlp_DID, 0); > return 0; > } >- > rc=6; >+ >+ /* Decrement ndlp reference count to release ndlp reference held >+ * for the failed command's callback function. >+ */ > lpfc_nlp_put(ndlp); >+ > lpfc_mbuf_free(phba, bmp->virt, bmp->phys); > ns_cmd_free_bmp: > kfree(bmp); >@@ -1139,6 +1262,9 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *p > } > > ndlp = lpfc_findnode_did(vport, FDMI_DID); >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) >+ goto fail_out; >+ > if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { > /* FDMI rsp failed */ > lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, >@@ -1162,6 +1288,8 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *p > lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA); > break; > } >+ >+fail_out: > lpfc_ct_free_iocb(phba, cmdiocb); > return; > } >@@ -1518,12 +1646,18 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, > bpl->tus.w = le32_to_cpu(bpl->tus.w); > > cmpl = lpfc_cmpl_ct_cmd_fdmi; >- lpfc_nlp_get(ndlp); > >+ /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count >+ * to hold ndlp reference for the corresponding callback function. >+ */ > if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0)) > return 0; > >+ /* Decrement ndlp reference count to release ndlp reference held >+ * for the failed command's callback function. >+ */ > lpfc_nlp_put(ndlp); >+ > lpfc_mbuf_free(phba, bmp->virt, bmp->phys); > fdmi_cmd_free_bmp: > kfree(bmp); >@@ -1544,20 +1678,18 @@ lpfc_fdmi_tmo(unsigned long ptr) > { > struct lpfc_vport *vport = (struct lpfc_vport *)ptr; > struct lpfc_hba *phba = vport->phba; >+ uint32_t tmo_posted; > unsigned long iflag; > > spin_lock_irqsave(&vport->work_port_lock, iflag); >- if (!(vport->work_port_events & WORKER_FDMI_TMO)) { >+ tmo_posted = vport->work_port_events & WORKER_FDMI_TMO; >+ if (!tmo_posted) > vport->work_port_events |= WORKER_FDMI_TMO; >- spin_unlock_irqrestore(&vport->work_port_lock, iflag); >+ spin_unlock_irqrestore(&vport->work_port_lock, iflag); > >- spin_lock_irqsave(&phba->hbalock, iflag); >- if (phba->work_wait) >- lpfc_worker_wake_up(phba); >- spin_unlock_irqrestore(&phba->hbalock, iflag); >- } >- else >- spin_unlock_irqrestore(&vport->work_port_lock, iflag); >+ if (!tmo_posted) >+ lpfc_worker_wake_up(phba); >+ return; > } > > void >@@ -1566,7 +1698,7 @@ lpfc_fdmi_timeout_handler(struct lpfc_vp > struct lpfc_nodelist *ndlp; > > ndlp = lpfc_findnode_did(vport, FDMI_DID); >- if (ndlp) { >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { > if (init_utsname()->nodename[0] != '\0') > lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); > else >diff -urpN a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c >--- a/drivers/scsi/lpfc/lpfc_debugfs.c 2008-09-05 17:47:41.646245000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_debugfs.c 2008-09-05 17:47:49.652876000 -0400 >@@ -43,6 +43,7 @@ > #include "lpfc_crtn.h" > #include "lpfc_vport.h" > #include "lpfc_version.h" >+#include "lpfc_compat.h" > #include "lpfc_debugfs.h" > > #ifdef CONFIG_LPFC_DEBUG_FS >@@ -75,18 +76,18 @@ module_param(lpfc_debugfs_enable, int, 0 > MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services"); > > /* This MUST be a power of 2 */ >-static int lpfc_debugfs_max_disc_trc = 0; >+static int lpfc_debugfs_max_disc_trc; > module_param(lpfc_debugfs_max_disc_trc, int, 0); > MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc, > "Set debugfs discovery trace depth"); > > /* This MUST be a power of 2 */ >-static int lpfc_debugfs_max_slow_ring_trc = 0; >+static int lpfc_debugfs_max_slow_ring_trc; > module_param(lpfc_debugfs_max_slow_ring_trc, int, 0); > MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc, > "Set debugfs slow ring trace depth"); > >-static int lpfc_debugfs_mask_disc_trc = 0; >+int lpfc_debugfs_mask_disc_trc; > module_param(lpfc_debugfs_mask_disc_trc, int, 0); > MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, > "Set debugfs discovery trace mask"); >@@ -100,8 +101,11 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_ > #define LPFC_NODELIST_SIZE 8192 > #define LPFC_NODELIST_ENTRY_SIZE 120 > >-/* dumpslim output buffer size */ >-#define LPFC_DUMPSLIM_SIZE 4096 >+/* dumpHBASlim output buffer size */ >+#define LPFC_DUMPHBASLIM_SIZE 4096 >+ >+/* dumpHostSlim output buffer size */ >+#define LPFC_DUMPHOSTSLIM_SIZE 4096 > > /* hbqinfo output buffer size */ > #define LPFC_HBQINFO_SIZE 8192 >@@ -243,16 +247,17 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hb > raw_index = phba->hbq_get[i]; > getidx = le32_to_cpu(raw_index); > len += snprintf(buf+len, size-len, >- "entrys:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n", >- hbqs->entry_count, hbqs->hbqPutIdx, hbqs->next_hbqPutIdx, >- hbqs->local_hbqGetIdx, getidx); >+ "entrys:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n", >+ hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx, >+ hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx); > > hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt; > for (j=0; j<hbqs->entry_count; j++) { > len += snprintf(buf+len, size-len, > "%03d: %08x %04x %05x ", j, >- hbqe->bde.addrLow, hbqe->bde.tus.w, hbqe->buffer_tag); >- >+ le32_to_cpu(hbqe->bde.addrLow), >+ le32_to_cpu(hbqe->bde.tus.w), >+ le32_to_cpu(hbqe->buffer_tag)); > i = 0; > found = 0; > >@@ -276,7 +281,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hb > list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) { > hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); > phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff); >- if (phys == hbqe->bde.addrLow) { >+ if (phys == le32_to_cpu(hbqe->bde.addrLow)) { > len += snprintf(buf+len, size-len, > "Buf%d: %p %06x\n", i, > hbq_buf->dbuf.virt, hbq_buf->tag); >@@ -297,18 +302,58 @@ skipit: > return len; > } > >+static int lpfc_debugfs_last_hba_slim_off; >+ > static int >-lpfc_debugfs_dumpslim_data(struct lpfc_hba *phba, char *buf, int size) >+lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) > { > int len = 0; >- int cnt, i, off; >+ int i, off; >+ uint32_t *ptr; >+ char buffer[1024]; >+ >+ off = 0; >+ spin_lock_irq(&phba->hbalock); >+ >+ len += snprintf(buf+len, size-len, "HBA SLIM\n"); >+ lpfc_memcpy_from_slim(buffer, >+ ((uint8_t *)phba->MBslimaddr) + lpfc_debugfs_last_hba_slim_off, >+ 1024); >+ >+ ptr = (uint32_t *)&buffer[0]; >+ off = lpfc_debugfs_last_hba_slim_off; >+ >+ /* Set it up for the next time */ >+ lpfc_debugfs_last_hba_slim_off += 1024; >+ if (lpfc_debugfs_last_hba_slim_off >= 4096) >+ lpfc_debugfs_last_hba_slim_off = 0; >+ >+ i = 1024; >+ while (i > 0) { >+ len += snprintf(buf+len, size-len, >+ "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", >+ off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), >+ *(ptr+5), *(ptr+6), *(ptr+7)); >+ ptr += 8; >+ i -= (8 * sizeof(uint32_t)); >+ off += (8 * sizeof(uint32_t)); >+ } >+ >+ spin_unlock_irq(&phba->hbalock); >+ return len; >+} >+ >+static int >+lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) >+{ >+ int len = 0; >+ int i, off; > uint32_t word0, word1, word2, word3; > uint32_t *ptr; > struct lpfc_pgp *pgpp; > struct lpfc_sli *psli = &phba->sli; > struct lpfc_sli_ring *pring; > >- cnt = LPFC_DUMPSLIM_SIZE; > off = 0; > spin_lock_irq(&phba->hbalock); > >@@ -458,6 +503,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_v > ndlp->nlp_sid); > if (ndlp->nlp_type & NLP_FCP_INITIATOR) > len += snprintf(buf+len, size-len, "FCP_INITIATOR "); >+ len += snprintf(buf+len, size-len, "usgmap:%x ", >+ ndlp->nlp_usg_map); > len += snprintf(buf+len, size-len, "refcnt:%x", > atomic_read(&ndlp->kref.refcount)); > len += snprintf(buf+len, size-len, "\n"); >@@ -620,7 +667,7 @@ out: > } > > static int >-lpfc_debugfs_dumpslim_open(struct inode *inode, struct file *file) >+lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file) > { > struct lpfc_hba *phba = inode->i_private; > struct lpfc_debug *debug; >@@ -631,14 +678,41 @@ lpfc_debugfs_dumpslim_open(struct inode > goto out; > > /* Round to page boundry */ >- debug->buffer = kmalloc(LPFC_DUMPSLIM_SIZE, GFP_KERNEL); >+ debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL); > if (!debug->buffer) { > kfree(debug); > goto out; > } > >- debug->len = lpfc_debugfs_dumpslim_data(phba, debug->buffer, >- LPFC_DUMPSLIM_SIZE); >+ debug->len = lpfc_debugfs_dumpHBASlim_data(phba, debug->buffer, >+ LPFC_DUMPHBASLIM_SIZE); >+ file->private_data = debug; >+ >+ rc = 0; >+out: >+ return rc; >+} >+ >+static int >+lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file) >+{ >+ struct lpfc_hba *phba = inode->i_private; >+ struct lpfc_debug *debug; >+ int rc = -ENOMEM; >+ >+ debug = kmalloc(sizeof(*debug), GFP_KERNEL); >+ if (!debug) >+ goto out; >+ >+ /* Round to page boundry */ >+ debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL); >+ if (!debug->buffer) { >+ kfree(debug); >+ goto out; >+ } >+ >+ debug->len = lpfc_debugfs_dumpHostSlim_data(phba, debug->buffer, >+ LPFC_DUMPHOSTSLIM_SIZE); > file->private_data = debug; > > rc = 0; >@@ -741,10 +815,19 @@ static struct file_operations lpfc_debug > .release = lpfc_debugfs_release, > }; > >-#undef lpfc_debugfs_op_dumpslim >-static struct file_operations lpfc_debugfs_op_dumpslim = { >+#undef lpfc_debugfs_op_dumpHBASlim >+static struct file_operations lpfc_debugfs_op_dumpHBASlim = { > .owner = THIS_MODULE, >- .open = lpfc_debugfs_dumpslim_open, >+ .open = lpfc_debugfs_dumpHBASlim_open, >+ .llseek = lpfc_debugfs_lseek, >+ .read = lpfc_debugfs_read, >+ .release = lpfc_debugfs_release, >+}; >+ >+#undef lpfc_debugfs_op_dumpHostSlim >+static struct file_operations lpfc_debugfs_op_dumpHostSlim = { >+ .owner = THIS_MODULE, >+ .open = lpfc_debugfs_dumpHostSlim_open, > .llseek = lpfc_debugfs_lseek, > .read = lpfc_debugfs_read, > .release = lpfc_debugfs_release, >@@ -780,7 +863,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor > atomic_set(&lpfc_debugfs_hba_count, 0); > if (!lpfc_debugfs_root) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, >- "0409 Cannot create debugfs root\n"); >+ "0408 Cannot create debugfs root\n"); > goto debug_failed; > } > } >@@ -794,7 +877,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor > debugfs_create_dir(name, lpfc_debugfs_root); > if (!phba->hba_debugfs_root) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, >- "0409 Cannot create debugfs hba\n"); >+ "0412 Cannot create debugfs hba\n"); > goto debug_failed; > } > atomic_inc(&lpfc_debugfs_hba_count); >@@ -808,19 +891,31 @@ lpfc_debugfs_initialize(struct lpfc_vpor > phba, &lpfc_debugfs_op_hbqinfo); > if (!phba->debug_hbqinfo) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, >- "0409 Cannot create debugfs hbqinfo\n"); >+ "0411 Cannot create debugfs hbqinfo\n"); >+ goto debug_failed; >+ } >+ >+ /* Setup dumpHBASlim */ >+ snprintf(name, sizeof(name), "dumpHBASlim"); >+ phba->debug_dumpHBASlim = >+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, >+ phba->hba_debugfs_root, >+ phba, &lpfc_debugfs_op_dumpHBASlim); >+ if (!phba->debug_dumpHBASlim) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, >+ "0413 Cannot create debugfs dumpHBASlim\n"); > goto debug_failed; > } > >- /* Setup dumpslim */ >- snprintf(name, sizeof(name), "dumpslim"); >- phba->debug_dumpslim = >+ /* Setup dumpHostSlim */ >+ snprintf(name, sizeof(name), "dumpHostSlim"); >+ phba->debug_dumpHostSlim = > debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, > phba->hba_debugfs_root, >- phba, &lpfc_debugfs_op_dumpslim); >- if (!phba->debug_dumpslim) { >+ phba, &lpfc_debugfs_op_dumpHostSlim); >+ if (!phba->debug_dumpHostSlim) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, >- "0409 Cannot create debugfs dumpslim\n"); >+ "0414 Cannot create debugfs dumpHostSlim\n"); > goto debug_failed; > } > >@@ -850,7 +945,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor > phba, &lpfc_debugfs_op_slow_ring_trc); > if (!phba->debug_slow_ring_trc) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, >- "0409 Cannot create debugfs " >+ "0415 Cannot create debugfs " > "slow_ring_trace\n"); > goto debug_failed; > } >@@ -861,7 +956,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor > GFP_KERNEL); > if (!phba->slow_ring_trc) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, >- "0409 Cannot create debugfs " >+ "0416 Cannot create debugfs " > "slow_ring buffer\n"); > goto debug_failed; > } >@@ -878,7 +973,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor > debugfs_create_dir(name, phba->hba_debugfs_root); > if (!vport->vport_debugfs_root) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, >- "0409 Cant create debugfs"); >+ "0417 Cant create debugfs"); > goto debug_failed; > } > atomic_inc(&phba->debugfs_vport_count); >@@ -907,7 +1002,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor > > if (!vport->disc_trc) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, >- "0409 Cannot create debugfs disc trace " >+ "0418 Cannot create debugfs disc trace " > "buffer\n"); > goto debug_failed; > } >@@ -920,7 +1015,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor > vport, &lpfc_debugfs_op_disc_trc); > if (!vport->debug_disc_trc) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, >- "0409 Cannot create debugfs " >+ "0419 Cannot create debugfs " > "discovery_trace\n"); > goto debug_failed; > } >@@ -970,9 +1065,13 @@ lpfc_debugfs_terminate(struct lpfc_vport > debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ > phba->debug_hbqinfo = NULL; > } >- if (phba->debug_dumpslim) { >- debugfs_remove(phba->debug_dumpslim); /* dumpslim */ >- phba->debug_dumpslim = NULL; >+ if (phba->debug_dumpHBASlim) { >+ debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */ >+ phba->debug_dumpHBASlim = NULL; >+ } >+ if (phba->debug_dumpHostSlim) { >+ debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ >+ phba->debug_dumpHostSlim = NULL; > } > if (phba->slow_ring_trc) { > kfree(phba->slow_ring_trc); >diff -urpN a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h >--- a/drivers/scsi/lpfc/lpfc_disc.h 2008-09-05 17:47:41.651247000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_disc.h 2008-09-05 17:47:49.817878000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * * >@@ -38,6 +38,7 @@ enum lpfc_work_type { > LPFC_EVT_ELS_RETRY, > LPFC_EVT_DEV_LOSS_DELAY, > LPFC_EVT_DEV_LOSS, >+ LPFC_EVT_REAUTH, > }; > > /* structure used to queue event to the discovery tasklet */ >@@ -74,36 +75,72 @@ struct lpfc_nodelist { > uint8_t nlp_fcp_info; /* class info, bits 0-3 */ > #define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ > >+ uint16_t nlp_usg_map; /* ndlp management usage bitmap */ >+#define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */ >+#define NLP_USG_IACT_REQ_BIT 0x2 /* Request to inactivate ndlp */ >+#define NLP_USG_FREE_REQ_BIT 0x4 /* Request to invoke ndlp memory free */ >+#define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */ >+ > struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ >+ struct timer_list nlp_reauth_tmr; /* Used for re-authentication */ > struct fc_rport *rport; /* Corresponding FC transport > port structure */ > struct lpfc_vport *vport; > struct lpfc_work_evt els_retry_evt; >+ struct lpfc_work_evt els_reauth_evt; > struct lpfc_work_evt dev_loss_evt; > unsigned long last_ramp_up_time; /* jiffy of last ramp up */ > unsigned long last_q_full_time; /* jiffy of last queue full */ > struct kref kref; >+ unsigned long unreg_time; /* Last time ndlp was unreged */ > }; > > /* Defines for nlp_flag (uint32) */ >-#define NLP_PLOGI_SND 0x20 /* sent PLOGI request for this entry */ >-#define NLP_PRLI_SND 0x40 /* sent PRLI request for this entry */ >-#define NLP_ADISC_SND 0x80 /* sent ADISC request for this entry */ >-#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */ >-#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */ >-#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */ >-#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */ >-#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */ >-#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */ >-#define NLP_LOGO_ACC 0x100000 /* Process LOGO after ACC completes */ >-#define NLP_TGT_NO_SCSIID 0x200000 /* good PRLI but no binding for scsid */ >-#define NLP_ACC_REGLOGIN 0x1000000 /* Issue Reg Login after successful >+#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ >+#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ >+#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ >+#define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */ >+#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */ >+#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ >+#define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */ >+#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ >+#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ >+#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */ >+#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ >+#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ >+#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful > ACC */ >-#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from >+#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from > NPR list */ >-#define NLP_RM_DFLT_RPI 0x4000000 /* need to remove leftover dflt RPI */ >-#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ >+#define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */ >+#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ > #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ >+#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ >+ >+/* ndlp usage management macros */ >+#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ >+ & NLP_USG_NODE_ACT_BIT) \ >+ && \ >+ !((ndlp)->nlp_usg_map \ >+ & NLP_USG_FREE_ACK_BIT)) >+#define NLP_SET_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ >+ |= NLP_USG_NODE_ACT_BIT) >+#define NLP_INT_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ >+ = NLP_USG_NODE_ACT_BIT) >+#define NLP_CLR_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ >+ &= ~NLP_USG_NODE_ACT_BIT) >+#define NLP_CHK_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \ >+ & NLP_USG_IACT_REQ_BIT) >+#define NLP_SET_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \ >+ |= NLP_USG_IACT_REQ_BIT) >+#define NLP_CHK_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \ >+ & NLP_USG_FREE_REQ_BIT) >+#define NLP_SET_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \ >+ |= NLP_USG_FREE_REQ_BIT) >+#define NLP_CHK_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \ >+ & NLP_USG_FREE_ACK_BIT) >+#define NLP_SET_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \ >+ |= NLP_USG_FREE_ACK_BIT) > > /* There are 4 different double linked lists nodelist entries can reside on. > * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used >diff -urpN a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c >--- a/drivers/scsi/lpfc/lpfc_els.c 2008-09-05 17:47:41.666244000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_els.c 2008-09-05 17:47:49.791876000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * Portions Copyright (C) 2004-2005 Christoph Hellwig * >@@ -18,7 +18,7 @@ > * more details, a copy of which can be found in the file COPYING * > * included with this package. * > *******************************************************************/ >- >+/* See Fibre Channel protocol T11 FC-LS for details */ > #include <linux/blkdev.h> > #include <linux/pci.h> > #include <linux/interrupt.h> >@@ -37,6 +37,9 @@ > #include "lpfc_crtn.h" > #include "lpfc_vport.h" > #include "lpfc_debugfs.h" >+#include "lpfc_auth_access.h" >+#include "lpfc_auth.h" >+#include "lpfc_security.h" > > static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, > struct lpfc_iocbq *); >@@ -84,7 +87,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vpo > return 1; > } > >-static struct lpfc_iocbq * >+struct lpfc_iocbq * > lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, > uint16_t cmdSize, uint8_t retry, > struct lpfc_nodelist *ndlp, uint32_t did, >@@ -105,18 +108,16 @@ lpfc_prep_els_iocb(struct lpfc_vport *vp > > if (elsiocb == NULL) > return NULL; >+ > icmd = &elsiocb->iocb; > > /* fill in BDEs for command */ > /* Allocate buffer for command payload */ >- if (((pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL)) == 0) || >- ((pcmd->virt = lpfc_mbuf_alloc(phba, >- MEM_PRI, &(pcmd->phys))) == 0)) { >- kfree(pcmd); >- >- lpfc_sli_release_iocbq(phba, elsiocb); >- return NULL; >- } >+ pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); >+ if (pcmd) >+ pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); >+ if (!pcmd || !pcmd->virt) >+ goto els_iocb_free_pcmb_exit; > > INIT_LIST_HEAD(&pcmd->list); > >@@ -126,32 +127,19 @@ lpfc_prep_els_iocb(struct lpfc_vport *vp > if (prsp) > prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, > &prsp->phys); >- if (prsp == 0 || prsp->virt == 0) { >- kfree(prsp); >- lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); >- kfree(pcmd); >- lpfc_sli_release_iocbq(phba, elsiocb); >- return NULL; >- } >+ if (!prsp || !prsp->virt) >+ goto els_iocb_free_prsp_exit; > INIT_LIST_HEAD(&prsp->list); >- } else { >+ } else > prsp = NULL; >- } > > /* Allocate buffer for Buffer ptr list */ > pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); > if (pbuflist) > pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, > &pbuflist->phys); >- if (pbuflist == 0 || pbuflist->virt == 0) { >- lpfc_sli_release_iocbq(phba, elsiocb); >- lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); >- lpfc_mbuf_free(phba, prsp->virt, prsp->phys); >- kfree(pcmd); >- kfree(prsp); >- kfree(pbuflist); >- return NULL; >- } >+ if (!pbuflist || !pbuflist->virt) >+ goto els_iocb_free_pbuf_exit; > > INIT_LIST_HEAD(&pbuflist->list); > >@@ -196,7 +184,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vp > bpl->tus.w = le32_to_cpu(bpl->tus.w); > } > >+ /* prevent preparing iocb with NULL ndlp reference */ > elsiocb->context1 = lpfc_nlp_get(ndlp); >+ if (!elsiocb->context1) >+ goto els_iocb_free_pbuf_exit; > elsiocb->context2 = pcmd; > elsiocb->context3 = pbuflist; > elsiocb->retry = retry; >@@ -222,8 +213,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vp > cmdSize); > } > return elsiocb; >-} > >+els_iocb_free_pbuf_exit: >+ lpfc_mbuf_free(phba, prsp->virt, prsp->phys); >+ kfree(pbuflist); >+ >+els_iocb_free_prsp_exit: >+ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); >+ kfree(prsp); >+ >+els_iocb_free_pcmb_exit: >+ kfree(pcmd); >+ lpfc_sli_release_iocbq(phba, elsiocb); >+ return NULL; >+} > > static int > lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) >@@ -234,44 +237,63 @@ lpfc_issue_fabric_reglogin(struct lpfc_v > struct lpfc_nodelist *ndlp; > struct serv_parm *sp; > int rc; >+ int err = 0; > > sp = &phba->fc_fabparam; > ndlp = lpfc_findnode_did(vport, Fabric_DID); >- if (!ndlp) >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { >+ err = 1; > goto fail; >+ } > > mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); >- if (!mbox) >+ if (!mbox) { >+ err = 2; > goto fail; >+ } > > vport->port_state = LPFC_FABRIC_CFG_LINK; > lpfc_config_link(phba, mbox); > mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; > mbox->vport = vport; > >- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); >- if (rc == MBX_NOT_FINISHED) >+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); >+ if (rc == MBX_NOT_FINISHED) { >+ err = 3; > goto fail_free_mbox; >+ } > > mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); >- if (!mbox) >+ if (!mbox) { >+ err = 4; > goto fail; >+ } > rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, > 0); >- if (rc) >+ if (rc) { >+ err = 5; > goto fail_free_mbox; >+ } > > mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; > mbox->vport = vport; >+ /* increment the reference count on ndlp to hold reference >+ * for the callback routine. >+ */ > mbox->context2 = lpfc_nlp_get(ndlp); > >- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); >- if (rc == MBX_NOT_FINISHED) >+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); >+ if (rc == MBX_NOT_FINISHED) { >+ err = 6; > goto fail_issue_reg_login; >+ } > > return 0; > > fail_issue_reg_login: >+ /* decrement the reference count on ndlp just incremented >+ * for the failed mbox command. >+ */ > lpfc_nlp_put(ndlp); > mp = (struct lpfc_dmabuf *) mbox->context1; > lpfc_mbuf_free(phba, mp->virt, mp->phys); >@@ -282,7 +304,7 @@ fail_free_mbox: > fail: > lpfc_vport_set_state(vport, FC_VPORT_FAILED); > lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >- "0249 Cannot issue Register Fabric login\n"); >+ "0249 Cannot issue Register Fabric login: Err %d\n", err); > return -ENXIO; > } > >@@ -360,6 +382,8 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_v > */ > list_for_each_entry_safe(np, next_np, > &vport->fc_nodes, nlp_listp) { >+ if (!NLP_CHK_NODE_ACT(np)) >+ continue; > if ((np->nlp_state != NLP_STE_NPR_NODE) || > !(np->nlp_flag & NLP_NPR_ADISC)) > continue; >@@ -370,11 +394,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_v > } > if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { > lpfc_mbx_unreg_vpi(vport); >+ spin_lock_irq(shost->host_lock); > vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; >+ spin_unlock_irq(shost->host_lock); > } > } > >- ndlp->nlp_sid = irsp->un.ulpWord[4] & Mask_DID; > lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); > > if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && >@@ -429,12 +454,14 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vp > > mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; > mbox->vport = vport; >- rc = lpfc_sli_issue_mbox(phba, mbox, >- MBX_NOWAIT | MBX_STOP_IOCB); >+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); > if (rc == MBX_NOT_FINISHED) { > mempool_free(mbox, phba->mbox_mem_pool); > goto fail; > } >+ /* Decrement ndlp reference count indicating that ndlp can be >+ * safely released when other references to it are done. >+ */ > lpfc_nlp_put(ndlp); > > ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); >@@ -446,22 +473,32 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vp > ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); > if (!ndlp) > goto fail; >- > lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); >+ } else if (!NLP_CHK_NODE_ACT(ndlp)) { >+ ndlp = lpfc_enable_node(vport, ndlp, >+ NLP_STE_UNUSED_NODE); >+ if(!ndlp) >+ goto fail; > } > > memcpy(&ndlp->nlp_portname, &sp->portName, > sizeof(struct lpfc_name)); > memcpy(&ndlp->nlp_nodename, &sp->nodeName, > sizeof(struct lpfc_name)); >+ /* Set state will put ndlp onto node list if not already done */ > lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); > spin_lock_irq(shost->host_lock); > ndlp->nlp_flag |= NLP_NPR_2B_DISC; > spin_unlock_irq(shost->host_lock); >- } else { >- /* This side will wait for the PLOGI */ >+ } else >+ /* This side will wait for the PLOGI, decrement ndlp reference >+ * count indicating that ndlp can be released when other >+ * references to it are done. >+ */ > lpfc_nlp_put(ndlp); >- } >+ >+ /* If we are pt2pt with another NPort, force NPIV off! */ >+ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; > > spin_lock_irq(shost->host_lock); > vport->fc_flag |= FC_PT2PT; >@@ -484,10 +521,14 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phb > struct lpfc_nodelist *ndlp = cmdiocb->context1; > struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; > struct serv_parm *sp; >+ struct lpfc_name wwpn; > int rc; > > /* Check to see if link went down during discovery */ > if (lpfc_els_chk_latt(vport)) { >+ /* One additional decrement on node reference count to >+ * trigger the release of the node >+ */ > lpfc_nlp_put(ndlp); > goto out; > } >@@ -531,7 +572,10 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phb > prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); > > sp = prsp->virt + sizeof(uint32_t); >- >+ if (sp->cmn.security) >+ ndlp->nlp_flag |= NLP_SC_REQ; >+ else >+ ndlp->nlp_flag &= ~NLP_SC_REQ; > /* FLOGI completes successfully */ > lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, > "0101 FLOGI completes sucessfully " >@@ -539,6 +583,20 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phb > irsp->un.ulpWord[4], sp->cmn.e_d_tov, > sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); > >+ if (vport->cfg_enable_auth) { >+ u64_to_wwn(AUTH_FABRIC_WWN, wwpn.u.wwn); >+ if (lpfc_get_auth_config(ndlp, &wwpn)) >+ goto flogifail; >+ } else { >+ vport->auth.security_active = 0; >+ if (sp->cmn.security) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1055 Authentication parameter is " >+ "disabled, but is required by " >+ "the fabric.\n"); >+ goto flogifail; >+ } >+ } > if (vport->port_state == LPFC_FLOGI) { > /* > * If Common Service Parameters indicate Nport >@@ -554,6 +612,9 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phb > } > > flogifail: >+ /* One additional decrement to node reference count to trigger >+ * the release of the node >+ */ > lpfc_nlp_put(ndlp); > > if (!lpfc_error_lost_link(irsp)) { >@@ -562,8 +623,13 @@ flogifail: > > /* Start discovery */ > lpfc_disc_start(vport); >+ } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || >+ ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) && >+ (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) && >+ (phba->link_state != LPFC_CLEAR_LA)) { >+ /* If FLOGI failed enable link interrupt. */ >+ lpfc_issue_clear_la(phba, vport); > } >- > out: > lpfc_els_free_iocb(phba, cmdiocb); > } >@@ -601,6 +667,10 @@ lpfc_issue_els_flogi(struct lpfc_vport * > sp = (struct serv_parm *) pcmd; > > /* Setup CSPs accordingly for Fabric */ >+ >+ if (vport->cfg_enable_auth) >+ sp->cmn.security = 1; >+ > sp->cmn.e_d_tov = 0; > sp->cmn.w2.r_a_tov = 0; > sp->cls1.classValid = 0; >@@ -669,7 +739,8 @@ lpfc_els_abort_flogi(struct lpfc_hba *ph > if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR && > icmd->un.elsreq64.bdl.ulpIoTag32) { > ndlp = (struct lpfc_nodelist *)(iocb->context1); >- if (ndlp && (ndlp->nlp_DID == Fabric_DID)) { >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && >+ (ndlp->nlp_DID == Fabric_DID)) { > lpfc_sli_issue_abort_iotag(phba, pring, iocb); > } > } >@@ -685,6 +756,9 @@ lpfc_initial_flogi(struct lpfc_vport *vp > struct lpfc_hba *phba = vport->phba; > struct lpfc_nodelist *ndlp; > >+ vport->port_state = LPFC_FLOGI; >+ lpfc_set_disctmo(vport); >+ > /* First look for the Fabric ndlp */ > ndlp = lpfc_findnode_did(vport, Fabric_DID); > if (!ndlp) { >@@ -693,12 +767,21 @@ lpfc_initial_flogi(struct lpfc_vport *vp > if (!ndlp) > return 0; > lpfc_nlp_init(vport, ndlp, Fabric_DID); >- } else { >- lpfc_dequeue_node(vport, ndlp); >+ /* Put ndlp onto node list */ >+ lpfc_enqueue_node(vport, ndlp); >+ } else if (!NLP_CHK_NODE_ACT(ndlp)) { >+ /* re-setup ndlp without removing from node list */ >+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); >+ if (!ndlp) >+ return 0; > } >- if (lpfc_issue_els_flogi(vport, ndlp, 0)) { >+ >+ if (lpfc_issue_els_flogi(vport, ndlp, 0)) >+ /* This decrement of reference count to node shall kick off >+ * the release of the node. >+ */ > lpfc_nlp_put(ndlp); >- } >+ > return 1; > } > >@@ -708,6 +791,17 @@ lpfc_initial_fdisc(struct lpfc_vport *vp > struct lpfc_hba *phba = vport->phba; > struct lpfc_nodelist *ndlp; > >+ if (vport->cfg_enable_auth) { >+ if (lpfc_security_wait(phba)) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1049 Authentication is enabled but " >+ "authentication service is not " >+ "running\n"); >+ vport->auth.auth_mode = FC_AUTHMODE_UNKNOWN; >+ return 0; >+ } >+ } >+ > /* First look for the Fabric ndlp */ > ndlp = lpfc_findnode_did(vport, Fabric_DID); > if (!ndlp) { >@@ -716,15 +810,26 @@ lpfc_initial_fdisc(struct lpfc_vport *vp > if (!ndlp) > return 0; > lpfc_nlp_init(vport, ndlp, Fabric_DID); >- } else { >- lpfc_dequeue_node(vport, ndlp); >+ /* Put ndlp onto node list */ >+ lpfc_enqueue_node(vport, ndlp); >+ } else if (!NLP_CHK_NODE_ACT(ndlp)) { >+ /* re-setup ndlp without removing from node list */ >+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); >+ if (!ndlp) >+ return 0; > } >+ > if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { >+ /* decrement node reference count to trigger the release of >+ * the node. >+ */ > lpfc_nlp_put(ndlp); >+ return 0; > } > return 1; > } >-static void >+ >+void > lpfc_more_plogi(struct lpfc_vport *vport) > { > int sentplogi; >@@ -752,9 +857,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba > { > struct lpfc_vport *vport = ndlp->vport; > struct lpfc_nodelist *new_ndlp; >+ struct lpfc_rport_data *rdata; >+ struct fc_rport *rport; > struct serv_parm *sp; > uint8_t name[sizeof(struct lpfc_name)]; >- uint32_t rc; >+ uint32_t rc, keepDID = 0; > > /* Fabric nodes can have the same WWPN so we don't bother searching > * by WWPN. Just return the ndlp that was given to us. >@@ -770,7 +877,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba > */ > new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); > >- if (new_ndlp == ndlp) >+ if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) > return ndlp; > > if (!new_ndlp) { >@@ -781,26 +888,93 @@ lpfc_plogi_confirm_nport(struct lpfc_hba > new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); > if (!new_ndlp) > return ndlp; >- > lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); >- } >+ } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { >+ rc = memcmp(&ndlp->nlp_portname, name, >+ sizeof(struct lpfc_name)); >+ if (!rc) >+ return ndlp; >+ new_ndlp = lpfc_enable_node(vport, new_ndlp, >+ NLP_STE_UNUSED_NODE); >+ if (!new_ndlp) >+ return ndlp; >+ keepDID = new_ndlp->nlp_DID; >+ } else >+ keepDID = new_ndlp->nlp_DID; > > lpfc_unreg_rpi(vport, new_ndlp); > new_ndlp->nlp_DID = ndlp->nlp_DID; > new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; >+ >+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) >+ new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; >+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; >+ >+ /* Set state will put new_ndlp on to node list if not already done */ > lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); > > /* Move this back to NPR state */ >- if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) >+ if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { >+ /* The new_ndlp is replacing ndlp totally, so we need >+ * to put ndlp on UNUSED list and try to free it. >+ */ >+ >+ /* Fix up the rport accordingly */ >+ rport = ndlp->rport; >+ if (rport) { >+ rdata = rport->dd_data; >+ if (rdata->pnode == ndlp) { >+ lpfc_nlp_put(ndlp); >+ ndlp->rport = NULL; >+ rdata->pnode = lpfc_nlp_get(new_ndlp); >+ new_ndlp->rport = rport; >+ } >+ new_ndlp->nlp_type = ndlp->nlp_type; >+ } >+ /* We shall actually free the ndlp with both nlp_DID and >+ * nlp_portname fields equals 0 to avoid any ndlp on the >+ * nodelist never to be used. >+ */ >+ if (ndlp->nlp_DID == 0) { >+ spin_lock_irq(&phba->ndlp_lock); >+ NLP_SET_FREE_REQ(ndlp); >+ spin_unlock_irq(&phba->ndlp_lock); >+ } >+ >+ /* Two ndlps cannot have the same did on the nodelist */ >+ ndlp->nlp_DID = keepDID; > lpfc_drop_node(vport, ndlp); >+ } > else { > lpfc_unreg_rpi(vport, ndlp); >- ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ >+ /* Two ndlps cannot have the same did */ >+ ndlp->nlp_DID = keepDID; > lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); > } > return new_ndlp; > } > >+void >+lpfc_end_rscn(struct lpfc_vport *vport) >+{ >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); >+ >+ if (vport->fc_flag & FC_RSCN_MODE) { >+ /* >+ * Check to see if more RSCNs came in while we were >+ * processing this one. >+ */ >+ if (vport->fc_rscn_id_cnt || >+ (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) >+ lpfc_els_handle_rscn(vport); >+ else { >+ spin_lock_irq(shost->host_lock); >+ vport->fc_flag &= ~FC_RSCN_MODE; >+ spin_unlock_irq(shost->host_lock); >+ } >+ } >+} >+ > static void > lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, > struct lpfc_iocbq *rspiocb) >@@ -822,7 +996,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phb > irsp->un.elsreq64.remoteID); > > ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); >- if (!ndlp) { >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, > "0136 PLOGI completes to NPort x%x " > "with no ndlp. Data: x%x x%x x%x\n", >@@ -871,20 +1045,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phb > goto out; > } > /* PLOGI failed */ >- if (ndlp->nlp_DID == NameServer_DID) { >- lpfc_vport_set_state(vport, FC_VPORT_FAILED); >- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >- "0250 Nameserver login error: " >- "0x%x / 0x%x\n", >- irsp->ulpStatus, irsp->un.ulpWord[4]); >- } > /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ >- if (lpfc_error_lost_link(irsp)) { >+ if (lpfc_error_lost_link(irsp)) > rc = NLP_STE_FREED_NODE; >- } else { >+ else > rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, > NLP_EVT_CMPL_PLOGI); >- } > } else { > /* Good status, call state machine */ > prsp = list_entry(((struct lpfc_dmabuf *) >@@ -905,20 +1071,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phb > spin_unlock_irq(shost->host_lock); > > lpfc_can_disctmo(vport); >- if (vport->fc_flag & FC_RSCN_MODE) { >- /* >- * Check to see if more RSCNs came in while >- * we were processing this one. >- */ >- if ((vport->fc_rscn_id_cnt == 0) && >- (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { >- spin_lock_irq(shost->host_lock); >- vport->fc_flag &= ~FC_RSCN_MODE; >- spin_unlock_irq(shost->host_lock); >- } else { >- lpfc_els_handle_rscn(vport); >- } >- } >+ lpfc_end_rscn(vport); > } > } > >@@ -933,6 +1086,7 @@ lpfc_issue_els_plogi(struct lpfc_vport * > struct lpfc_hba *phba = vport->phba; > struct serv_parm *sp; > IOCB_t *icmd; >+ struct lpfc_nodelist *ndlp; > struct lpfc_iocbq *elsiocb; > struct lpfc_sli_ring *pring; > struct lpfc_sli *psli; >@@ -943,8 +1097,13 @@ lpfc_issue_els_plogi(struct lpfc_vport * > psli = &phba->sli; > pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ > >+ ndlp = lpfc_findnode_did(vport, did); >+ if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) >+ ndlp = NULL; >+ >+ /* If ndlp is not NULL, we will bump the reference count on it */ > cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); >- elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, NULL, did, >+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, > ELS_CMD_PLOGI); > if (!elsiocb) > return 1; >@@ -1023,18 +1182,15 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba > } > /* PRLI failed */ > /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ >- if (lpfc_error_lost_link(irsp)) { >+ if (lpfc_error_lost_link(irsp)) > goto out; >- } else { >+ else > lpfc_disc_state_machine(vport, ndlp, cmdiocb, > NLP_EVT_CMPL_PRLI); >- } >- } else { >+ } else > /* Good status, call state machine */ > lpfc_disc_state_machine(vport, ndlp, cmdiocb, > NLP_EVT_CMPL_PRLI); >- } >- > out: > lpfc_els_free_iocb(phba, cmdiocb); > return; >@@ -1109,7 +1265,7 @@ lpfc_issue_els_prli(struct lpfc_vport *v > return 0; > } > >-static void >+void > lpfc_more_adisc(struct lpfc_vport *vport) > { > int sentadisc; >@@ -1134,8 +1290,6 @@ lpfc_more_adisc(struct lpfc_vport *vport > static void > lpfc_rscn_disc(struct lpfc_vport *vport) > { >- struct Scsi_Host *shost = lpfc_shost_from_vport(vport); >- > lpfc_can_disctmo(vport); > > /* RSCN discovery */ >@@ -1144,19 +1298,7 @@ lpfc_rscn_disc(struct lpfc_vport *vport) > if (lpfc_els_disc_plogi(vport)) > return; > >- if (vport->fc_flag & FC_RSCN_MODE) { >- /* Check to see if more RSCNs came in while we were >- * processing this one. >- */ >- if ((vport->fc_rscn_id_cnt == 0) && >- (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { >- spin_lock_irq(shost->host_lock); >- vport->fc_flag &= ~FC_RSCN_MODE; >- spin_unlock_irq(shost->host_lock); >- } else { >- lpfc_els_handle_rscn(vport); >- } >- } >+ lpfc_end_rscn(vport); > } > > static void >@@ -1215,15 +1357,13 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phb > } > /* ADISC failed */ > /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ >- if (!lpfc_error_lost_link(irsp)) { >+ if (!lpfc_error_lost_link(irsp)) > lpfc_disc_state_machine(vport, ndlp, cmdiocb, > NLP_EVT_CMPL_ADISC); >- } >- } else { >+ } else > /* Good status, call state machine */ > lpfc_disc_state_machine(vport, ndlp, cmdiocb, > NLP_EVT_CMPL_ADISC); >- } > > if (disc && vport->num_disc_nodes) { > /* Check to see if there are more ADISCs to be sent */ >@@ -1383,14 +1523,12 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba > else > lpfc_disc_state_machine(vport, ndlp, cmdiocb, > NLP_EVT_CMPL_LOGO); >- } else { >+ } else > /* Good status, call state machine. > * This will unregister the rpi if needed. > */ > lpfc_disc_state_machine(vport, ndlp, cmdiocb, > NLP_EVT_CMPL_LOGO); >- } >- > out: > lpfc_els_free_iocb(phba, cmdiocb); > return; >@@ -1413,6 +1551,13 @@ lpfc_issue_els_logo(struct lpfc_vport *v > psli = &phba->sli; > pring = &psli->ring[LPFC_ELS_RING]; > >+ spin_lock_irq(shost->host_lock); >+ if (ndlp->nlp_flag & NLP_LOGO_SND) { >+ spin_unlock_irq(shost->host_lock); >+ return 0; >+ } >+ spin_unlock_irq(shost->host_lock); >+ > cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); > elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, > ndlp->nlp_DID, ELS_CMD_LOGO); >@@ -1489,16 +1634,27 @@ lpfc_issue_els_scr(struct lpfc_vport *vp > psli = &phba->sli; > pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ > cmdsize = (sizeof(uint32_t) + sizeof(SCR)); >- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); >- if (!ndlp) >- return 1; > >- lpfc_nlp_init(vport, ndlp, nportid); >+ ndlp = lpfc_findnode_did(vport, nportid); >+ if (!ndlp) { >+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); >+ if (!ndlp) >+ return 1; >+ lpfc_nlp_init(vport, ndlp, nportid); >+ lpfc_enqueue_node(vport, ndlp); >+ } else if (!NLP_CHK_NODE_ACT(ndlp)) { >+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); >+ if (!ndlp) >+ return 1; >+ } > > elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, > ndlp->nlp_DID, ELS_CMD_SCR); > > if (!elsiocb) { >+ /* This will trigger the release of the node just >+ * allocated >+ */ > lpfc_nlp_put(ndlp); > return 1; > } >@@ -1520,10 +1676,17 @@ lpfc_issue_els_scr(struct lpfc_vport *vp > phba->fc_stat.elsXmitSCR++; > elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; > if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { >+ /* The additional lpfc_nlp_put will cause the following >+ * lpfc_els_free_iocb routine to trigger the rlease of >+ * the node. >+ */ > lpfc_nlp_put(ndlp); > lpfc_els_free_iocb(phba, elsiocb); > return 1; > } >+ /* This will cause the callback-function lpfc_cmpl_els_cmd to >+ * trigger the release of node. >+ */ > lpfc_nlp_put(ndlp); > return 0; > } >@@ -1546,15 +1709,26 @@ lpfc_issue_els_farpr(struct lpfc_vport * > psli = &phba->sli; > pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ > cmdsize = (sizeof(uint32_t) + sizeof(FARP)); >- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); >- if (!ndlp) >- return 1; > >- lpfc_nlp_init(vport, ndlp, nportid); >+ ndlp = lpfc_findnode_did(vport, nportid); >+ if (!ndlp) { >+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); >+ if (!ndlp) >+ return 1; >+ lpfc_nlp_init(vport, ndlp, nportid); >+ lpfc_enqueue_node(vport, ndlp); >+ } else if (!NLP_CHK_NODE_ACT(ndlp)) { >+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); >+ if (!ndlp) >+ return 1; >+ } > > elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, > ndlp->nlp_DID, ELS_CMD_RNID); > if (!elsiocb) { >+ /* This will trigger the release of the node just >+ * allocated >+ */ > lpfc_nlp_put(ndlp); > return 1; > } >@@ -1577,7 +1751,7 @@ lpfc_issue_els_farpr(struct lpfc_vport * > memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); > memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); > ondlp = lpfc_findnode_did(vport, nportid); >- if (ondlp) { >+ if (ondlp && NLP_CHK_NODE_ACT(ondlp)) { > memcpy(&fp->OportName, &ondlp->nlp_portname, > sizeof(struct lpfc_name)); > memcpy(&fp->OnodeName, &ondlp->nlp_nodename, >@@ -1591,59 +1765,55 @@ lpfc_issue_els_farpr(struct lpfc_vport * > phba->fc_stat.elsXmitFARPR++; > elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; > if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { >+ /* The additional lpfc_nlp_put will cause the following >+ * lpfc_els_free_iocb routine to trigger the release of >+ * the node. >+ */ > lpfc_nlp_put(ndlp); > lpfc_els_free_iocb(phba, elsiocb); > return 1; > } >+ /* This will cause the callback-function lpfc_cmpl_els_cmd to >+ * trigger the release of the node. >+ */ > lpfc_nlp_put(ndlp); > return 0; > } > >-static void >-lpfc_end_rscn(struct lpfc_vport *vport) >-{ >- struct Scsi_Host *shost = lpfc_shost_from_vport(vport); >- >- if (vport->fc_flag & FC_RSCN_MODE) { >- /* >- * Check to see if more RSCNs came in while we were >- * processing this one. >- */ >- if (vport->fc_rscn_id_cnt || >- (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) >- lpfc_els_handle_rscn(vport); >- else { >- spin_lock_irq(shost->host_lock); >- vport->fc_flag &= ~FC_RSCN_MODE; >- spin_unlock_irq(shost->host_lock); >- } >- } >-} >- > void > lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) > { > struct Scsi_Host *shost = lpfc_shost_from_vport(vport); >+ struct lpfc_work_evt *evtp; > >+ if (!(nlp->nlp_flag & NLP_DELAY_TMO)) >+ return; > spin_lock_irq(shost->host_lock); > nlp->nlp_flag &= ~NLP_DELAY_TMO; > spin_unlock_irq(shost->host_lock); > del_timer_sync(&nlp->nlp_delayfunc); > nlp->nlp_last_elscmd = 0; >- >- if (!list_empty(&nlp->els_retry_evt.evt_listp)) >+ if (!list_empty(&nlp->els_retry_evt.evt_listp)) { > list_del_init(&nlp->els_retry_evt.evt_listp); >- >+ /* Decrement nlp reference count held for the delayed retry */ >+ evtp = &nlp->els_retry_evt; >+ lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); >+ } > if (nlp->nlp_flag & NLP_NPR_2B_DISC) { > spin_lock_irq(shost->host_lock); > nlp->nlp_flag &= ~NLP_NPR_2B_DISC; > spin_unlock_irq(shost->host_lock); > if (vport->num_disc_nodes) { >- /* Check to see if there are more >- * PLOGIs to be sent >- */ >- lpfc_more_plogi(vport); >- >+ if (vport->port_state < LPFC_VPORT_READY) { >+ /* Check if there are more ADISCs to be sent */ >+ lpfc_more_adisc(vport); >+ if ((vport->num_disc_nodes == 0) && >+ (vport->fc_npr_cnt)) >+ lpfc_els_disc_plogi(vport); >+ } else { >+ /* Check if there are more PLOGIs to be sent */ >+ lpfc_more_plogi(vport); >+ } > if (vport->num_disc_nodes == 0) { > spin_lock_irq(shost->host_lock); > vport->fc_flag &= ~FC_NDISC_ACTIVE; >@@ -1665,22 +1835,21 @@ lpfc_els_retry_delay(unsigned long ptr) > unsigned long flags; > struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; > >- ndlp = (struct lpfc_nodelist *) ptr; >- phba = ndlp->vport->phba; >- evtp = &ndlp->els_retry_evt; >- > spin_lock_irqsave(&phba->hbalock, flags); > if (!list_empty(&evtp->evt_listp)) { > spin_unlock_irqrestore(&phba->hbalock, flags); > return; > } > >- evtp->evt_arg1 = ndlp; >- evtp->evt = LPFC_EVT_ELS_RETRY; >- list_add_tail(&evtp->evt_listp, &phba->work_list); >- if (phba->work_wait) >+ /* We need to hold the node by incrementing the reference >+ * count until the queued work is done >+ */ >+ evtp->evt_arg1 = lpfc_nlp_get(ndlp); >+ if (evtp->evt_arg1) { >+ evtp->evt = LPFC_EVT_ELS_RETRY; >+ list_add_tail(&evtp->evt_listp, &phba->work_list); > lpfc_worker_wake_up(phba); >- >+ } > spin_unlock_irqrestore(&phba->hbalock, flags); > return; > } >@@ -1759,6 +1928,7 @@ lpfc_els_retry(struct lpfc_hba *phba, st > uint32_t *elscmd; > struct ls_rjt stat; > int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; >+ int logerr = 0; > uint32_t cmd = 0; > uint32_t did; > >@@ -1772,13 +1942,14 @@ lpfc_els_retry(struct lpfc_hba *phba, st > cmd = *elscmd++; > } > >- if (ndlp) >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) > did = ndlp->nlp_DID; > else { > /* We should only hit this case for retrying PLOGI */ > did = irsp->un.elsreq64.remoteID; > ndlp = lpfc_findnode_did(vport, did); >- if (!ndlp && (cmd != ELS_CMD_PLOGI)) >+ if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp)) >+ && (cmd != ELS_CMD_PLOGI)) > return 1; > } > >@@ -1800,21 +1971,19 @@ lpfc_els_retry(struct lpfc_hba *phba, st > break; > > case IOERR_ILLEGAL_COMMAND: >- if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) && >- (cmd == ELS_CMD_FDISC)) { >- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >- "0124 FDISC failed (3/6) " >- "retrying...\n"); >- lpfc_mbx_unreg_vpi(vport); >- retry = 1; >- /* FDISC retry policy */ >- maxretry = 48; >- if (cmdiocb->retry >= 32) >- delay = 1000; >- } >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >+ "0124 Retry illegal cmd x%x " >+ "retry:x%x delay:x%x\n", >+ cmd, cmdiocb->retry, delay); >+ retry = 1; >+ /* All command's retry policy */ >+ maxretry = 8; >+ if (cmdiocb->retry > 2) >+ delay = 1000; > break; > > case IOERR_NO_RESOURCES: >+ logerr = 1; /* HBA out of resources */ > retry = 1; > if (cmdiocb->retry > 100) > delay = 100; >@@ -1843,6 +2012,7 @@ lpfc_els_retry(struct lpfc_hba *phba, st > > case IOSTAT_NPORT_BSY: > case IOSTAT_FABRIC_BSY: >+ logerr = 1; /* Fabric / Remote NPort out of resources */ > retry = 1; > break; > >@@ -1895,6 +2065,17 @@ lpfc_els_retry(struct lpfc_hba *phba, st > break; > > case LSRJT_LOGICAL_ERR: >+ /* There are some cases where switches return this >+ * error when they are not ready and should be returning >+ * Logical Busy. We should delay every time. >+ */ >+ if (cmd == ELS_CMD_FDISC && >+ stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { >+ maxretry = 3; >+ delay = 1000; >+ retry = 1; >+ break; >+ } > case LSRJT_PROTOCOL_ERR: > if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && > (cmd == ELS_CMD_FDISC) && >@@ -1902,7 +2083,7 @@ lpfc_els_retry(struct lpfc_hba *phba, st > (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) > ) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >- "0123 FDISC Failed (x%x). " >+ "0122 FDISC Failed (x%x). " > "Fabric Detected Bad WWN\n", > stat.un.lsRjtError); > lpfc_vport_set_state(vport, >@@ -1923,6 +2104,16 @@ lpfc_els_retry(struct lpfc_hba *phba, st > if (did == FDMI_DID) > retry = 1; > >+ if ((cmd == ELS_CMD_FLOGI) && >+ (phba->fc_topology != TOPOLOGY_LOOP) && >+ !lpfc_error_lost_link(irsp)) { >+ /* FLOGI retry policy */ >+ retry = 1; >+ maxretry = 48; >+ if (cmdiocb->retry >= 32) >+ delay = 1000; >+ } >+ > if ((++cmdiocb->retry) >= maxretry) { > phba->fc_stat.elsRetryExceeded++; > retry = 0; >@@ -1951,7 +2142,7 @@ lpfc_els_retry(struct lpfc_hba *phba, st > } > > phba->fc_stat.elsXmitRetry++; >- if (ndlp && delay) { >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) { > phba->fc_stat.elsDelayRetry++; > ndlp->nlp_retry = cmdiocb->retry; > >@@ -1981,7 +2172,7 @@ lpfc_els_retry(struct lpfc_hba *phba, st > lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); > return 1; > case ELS_CMD_PLOGI: >- if (ndlp) { >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { > ndlp->nlp_prev_state = ndlp->nlp_state; > lpfc_nlp_set_state(vport, ndlp, > NLP_STE_PLOGI_ISSUE); >@@ -2003,56 +2194,136 @@ lpfc_els_retry(struct lpfc_hba *phba, st > lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); > lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); > return 1; >+ case ELS_CMD_AUTH_NEG: >+ case ELS_CMD_DH_CHA: >+ case ELS_CMD_DH_REP: >+ case ELS_CMD_DH_SUC: >+ ndlp->nlp_prev_state = ndlp->nlp_state; >+ ndlp->nlp_state = NLP_STE_NPR_NODE; >+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, >+ "0143 Authentication LS_RJT Logical " >+ "busy\n"); >+ lpfc_start_authentication(vport, ndlp); >+ return 1; > } > } > /* No retry ELS command <elsCmd> to remote NPORT <did> */ >- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >+ if (logerr) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >+ "0137 No retry ELS command x%x to remote " >+ "NPORT x%x: Out of Resources: Error:x%x/%x\n", >+ cmd, did, irsp->ulpStatus, >+ irsp->un.ulpWord[4]); >+ } >+ else { >+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, > "0108 No retry ELS command x%x to remote " > "NPORT x%x Retried:%d Error:x%x/%x\n", > cmd, did, cmdiocb->retry, irsp->ulpStatus, > irsp->un.ulpWord[4]); >+ } > return 0; > } > > int >-lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) >+lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) > { >- struct lpfc_dmabuf *buf_ptr, *buf_ptr1; >- >- if (elsiocb->context1) { >- lpfc_nlp_put(elsiocb->context1); >- elsiocb->context1 = NULL; >- } >- /* context2 = cmd, context2->next = rsp, context3 = bpl */ >- if (elsiocb->context2) { >- buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; >- /* Free the response before processing the command. */ >- if (!list_empty(&buf_ptr1->list)) { >- list_remove_head(&buf_ptr1->list, buf_ptr, >- struct lpfc_dmabuf, >- list); >- lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); >- kfree(buf_ptr); >- } >- lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); >- kfree(buf_ptr1); >- } >+ struct lpfc_dmabuf *buf_ptr; > >- if (elsiocb->context3) { >- buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; >+ /* Free the response before processing the command. */ >+ if (!list_empty(&buf_ptr1->list)) { >+ list_remove_head(&buf_ptr1->list, buf_ptr, >+ struct lpfc_dmabuf, >+ list); > lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); > kfree(buf_ptr); > } >- lpfc_sli_release_iocbq(phba, elsiocb); >+ lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); >+ kfree(buf_ptr1); > return 0; > } > >-static void >-lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, >- struct lpfc_iocbq *rspiocb) >+int >+lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) > { >- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; >- struct lpfc_vport *vport = cmdiocb->vport; >+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); >+ kfree(buf_ptr); >+ return 0; >+} >+ >+int >+lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) >+{ >+ struct lpfc_dmabuf *buf_ptr, *buf_ptr1; >+ struct lpfc_nodelist *ndlp; >+ >+ ndlp = (struct lpfc_nodelist *)elsiocb->context1; >+ if (ndlp) { >+ if (ndlp->nlp_flag & NLP_DEFER_RM) { >+ lpfc_nlp_put(ndlp); >+ >+ /* If the ndlp is not being used by another discovery >+ * thread, free it. >+ */ >+ if (!lpfc_nlp_not_used(ndlp)) { >+ /* If ndlp is being used by another discovery >+ * thread, just clear NLP_DEFER_RM >+ */ >+ ndlp->nlp_flag &= ~NLP_DEFER_RM; >+ } >+ } >+ else >+ lpfc_nlp_put(ndlp); >+ elsiocb->context1 = NULL; >+ } >+ /* context2 = cmd, context2->next = rsp, context3 = bpl */ >+ if (elsiocb->context2) { >+ if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { >+ /* Firmware could still be in progress of DMAing >+ * payload, so don't free data buffer till after >+ * a hbeat. >+ */ >+ elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; >+ buf_ptr = elsiocb->context2; >+ elsiocb->context2 = NULL; >+ if (buf_ptr) { >+ buf_ptr1 = NULL; >+ spin_lock_irq(&phba->hbalock); >+ if (!list_empty(&buf_ptr->list)) { >+ list_remove_head(&buf_ptr->list, >+ buf_ptr1, struct lpfc_dmabuf, >+ list); >+ INIT_LIST_HEAD(&buf_ptr1->list); >+ list_add_tail(&buf_ptr1->list, >+ &phba->elsbuf); >+ phba->elsbuf_cnt++; >+ } >+ INIT_LIST_HEAD(&buf_ptr->list); >+ list_add_tail(&buf_ptr->list, &phba->elsbuf); >+ phba->elsbuf_cnt++; >+ spin_unlock_irq(&phba->hbalock); >+ } >+ } >+ else { >+ buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; >+ lpfc_els_free_data(phba, buf_ptr1); >+ } >+ } >+ >+ if (elsiocb->context3) { >+ buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; >+ lpfc_els_free_bpl(phba, buf_ptr); >+ } >+ lpfc_sli_release_iocbq(phba, elsiocb); >+ return 0; >+} >+ >+static void >+lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, >+ struct lpfc_iocbq *rspiocb) >+{ >+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; >+ struct lpfc_vport *vport = cmdiocb->vport; > IOCB_t *irsp; > > irsp = &rspiocb->iocb; >@@ -2065,15 +2336,20 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba * > "Data: x%x x%x x%x\n", > ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, > ndlp->nlp_rpi); >- switch (ndlp->nlp_state) { >- case NLP_STE_UNUSED_NODE: /* node is just allocated */ >- lpfc_drop_node(vport, ndlp); >- break; >- case NLP_STE_NPR_NODE: /* NPort Recovery mode */ >- lpfc_unreg_rpi(vport, ndlp); >- break; >- default: >- break; >+ >+ if (ndlp->nlp_state == NLP_STE_NPR_NODE) { >+ /* NPort Recovery mode or node is just allocated */ >+ if (!lpfc_nlp_not_used(ndlp)) { >+ /* If the ndlp is being used by another discovery >+ * thread, just unregister the RPI. >+ */ >+ lpfc_unreg_rpi(vport, ndlp); >+ } else { >+ /* Indicate the node has already released, should >+ * not reference to it from within lpfc_els_free_iocb. >+ */ >+ cmdiocb->context1 = NULL; >+ } > } > lpfc_els_free_iocb(phba, cmdiocb); > return; >@@ -2089,7 +2365,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba * > lpfc_mbuf_free(phba, mp->virt, mp->phys); > kfree(mp); > mempool_free(pmb, phba->mbox_mem_pool); >- lpfc_nlp_put(ndlp); >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { >+ lpfc_nlp_put(ndlp); >+ /* This is the end of the default RPI cleanup logic for this >+ * ndlp. If no other discovery threads are using this ndlp. >+ * we should free all resources associated with it. >+ */ >+ lpfc_nlp_not_used(ndlp); >+ } > return; > } > >@@ -2100,17 +2383,32 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, > struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; > struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; > struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; >- IOCB_t *irsp; >+ IOCB_t *irsp; >+ uint8_t *pcmd; > LPFC_MBOXQ_t *mbox = NULL; > struct lpfc_dmabuf *mp = NULL; >+ uint32_t ls_rjt = 0; > > irsp = &rspiocb->iocb; > > if (cmdiocb->context_un.mbox) > mbox = cmdiocb->context_un.mbox; > >+ /* First determine if this is a LS_RJT cmpl. Note, this callback >+ * function can have cmdiocb->contest1 (ndlp) field set to NULL. >+ */ >+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && >+ (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { >+ /* A LS_RJT associated with Default RPI cleanup has its own >+ * seperate code path. >+ */ >+ if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI)) >+ ls_rjt = 1; >+ } >+ > /* Check to see if link went down during discovery */ >- if (!ndlp || lpfc_els_chk_latt(vport)) { >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) { > if (mbox) { > mp = (struct lpfc_dmabuf *) mbox->context1; > if (mp) { >@@ -2119,6 +2417,16 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, > } > mempool_free(mbox, phba->mbox_mem_pool); > } >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && >+ (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) >+ if (lpfc_nlp_not_used(ndlp)) { >+ ndlp = NULL; >+ /* Indicate the node has already released, >+ * should not reference to it from within >+ * the routine lpfc_els_free_iocb. >+ */ >+ cmdiocb->context1 = NULL; >+ } > goto out; > } > >@@ -2138,6 +2446,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, > if ((rspiocb->iocb.ulpStatus == 0) > && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { > lpfc_unreg_rpi(vport, ndlp); >+ /* Increment reference count to ndlp to hold the >+ * reference to ndlp for the callback function. >+ */ > mbox->context2 = lpfc_nlp_get(ndlp); > mbox->vport = vport; > if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { >@@ -2150,20 +2461,43 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, > lpfc_nlp_set_state(vport, ndlp, > NLP_STE_REG_LOGIN_ISSUE); > } >- if (lpfc_sli_issue_mbox(phba, mbox, >- (MBX_NOWAIT | MBX_STOP_IOCB)) >- != MBX_NOT_FINISHED) { >+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) >+ != MBX_NOT_FINISHED) > goto out; >+ else >+ /* Decrement the ndlp reference count we >+ * set for this failed mailbox command. >+ */ >+ lpfc_nlp_put(ndlp); >+ >+ /* ELS rsp: Cannot issue reg_login for <NPortid> */ >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >+ "0138 ELS rsp: Cannot issue reg_login for x%x " >+ "Data: x%x x%x x%x\n", >+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, >+ ndlp->nlp_rpi); >+ >+ if (lpfc_nlp_not_used(ndlp)) { >+ ndlp = NULL; >+ /* Indicate node has already been released, >+ * should not reference to it from within >+ * the routine lpfc_els_free_iocb. >+ */ >+ cmdiocb->context1 = NULL; > } >- lpfc_nlp_put(ndlp); >- /* NOTE: we should have messages for unsuccessful >- reglogin */ > } else { > /* Do not drop node for lpfc_els_abort'ed ELS cmds */ > if (!lpfc_error_lost_link(irsp) && > ndlp->nlp_flag & NLP_ACC_REGLOGIN) { >- lpfc_drop_node(vport, ndlp); >- ndlp = NULL; >+ if (lpfc_nlp_not_used(ndlp)) { >+ ndlp = NULL; >+ /* Indicate node has already been >+ * released, should not reference >+ * to it from within the routine >+ * lpfc_els_free_iocb. >+ */ >+ cmdiocb->context1 = NULL; >+ } > } > } > mp = (struct lpfc_dmabuf *) mbox->context1; >@@ -2174,11 +2508,25 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, > mempool_free(mbox, phba->mbox_mem_pool); > } > out: >- if (ndlp) { >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { > spin_lock_irq(shost->host_lock); > ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); > spin_unlock_irq(shost->host_lock); >+ >+ /* If the node is not being used by another discovery thread, >+ * and we are sending a reject, we are done with it. >+ * Release driver reference count here and free associated >+ * resources. >+ */ >+ if (ls_rjt) >+ if (lpfc_nlp_not_used(ndlp)) >+ /* Indicate node has already been released, >+ * should not reference to it from within >+ * the routine lpfc_els_free_iocb. >+ */ >+ cmdiocb->context1 = NULL; > } >+ > lpfc_els_free_iocb(phba, cmdiocb); > return; > } >@@ -2332,7 +2680,6 @@ lpfc_els_rsp_reject(struct lpfc_vport *v > > if (mbox) > elsiocb->context_un.mbox = mbox; >- > /* Xmit ELS RJT <err> response tag <ulpIoTag> */ > lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, > "0129 Xmit ELS RJT x%x response tag x%x " >@@ -2349,14 +2696,6 @@ lpfc_els_rsp_reject(struct lpfc_vport *v > elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; > rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); > >- /* If the node is in the UNUSED state, and we are sending >- * a reject, we are done with it. Release driver reference >- * count here. The outstanding els will release its reference on >- * completion and the node can be freed then. >- */ >- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) >- lpfc_nlp_put(ndlp); >- > if (rc == IOCB_ERROR) { > lpfc_els_free_iocb(phba, elsiocb); > return 1; >@@ -2466,10 +2805,11 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport > npr = (PRLI *) pcmd; > vpd = &phba->vpd; > /* >- * If our firmware version is 3.20 or later, >- * set the following bits for FC-TAPE support. >+ * If the remote port is a target and our firmware version is 3.20 or >+ * later, set the following bits for FC-TAPE support. > */ >- if (vpd->rev.feaLevelHigh >= 0x02) { >+ if ((ndlp->nlp_type & NLP_FCP_TARGET) && >+ (vpd->rev.feaLevelHigh >= 0x02)) { > npr->ConfmComplAllowed = 1; > npr->Retry = 1; > npr->TaskRetryIdReq = 1; >@@ -2587,6 +2927,8 @@ lpfc_els_disc_adisc(struct lpfc_vport *v > > /* go thru NPR nodes and issue any remaining ELS ADISCs */ > list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { >+ if (!NLP_CHK_NODE_ACT(ndlp)) >+ continue; > if (ndlp->nlp_state == NLP_STE_NPR_NODE && > (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && > (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { >@@ -2624,6 +2966,8 @@ lpfc_els_disc_plogi(struct lpfc_vport *v > > /* go thru NPR nodes and issue any remaining ELS PLOGIs */ > list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { >+ if (!NLP_CHK_NODE_ACT(ndlp)) >+ continue; > if (ndlp->nlp_state == NLP_STE_NPR_NODE && > (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && > (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && >@@ -2642,7 +2986,10 @@ lpfc_els_disc_plogi(struct lpfc_vport *v > } > } > } >- if (sentplogi == 0) { >+ if (sentplogi) { >+ lpfc_set_disctmo(vport); >+ } >+ else { > spin_lock_irq(shost->host_lock); > vport->fc_flag &= ~FC_NLP_MORE; > spin_unlock_irq(shost->host_lock); >@@ -2657,6 +3004,16 @@ lpfc_els_flush_rscn(struct lpfc_vport *v > struct lpfc_hba *phba = vport->phba; > int i; > >+ spin_lock_irq(shost->host_lock); >+ if (vport->fc_rscn_flush) { >+ /* Another thread is walking fc_rscn_id_list on this vport */ >+ spin_unlock_irq(shost->host_lock); >+ return; >+ } >+ /* Indicate we are walking lpfc_els_flush_rscn on this vport */ >+ vport->fc_rscn_flush = 1; >+ spin_unlock_irq(shost->host_lock); >+ > for (i = 0; i < vport->fc_rscn_id_cnt; i++) { > lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); > vport->fc_rscn_id_list[i] = NULL; >@@ -2666,6 +3023,8 @@ lpfc_els_flush_rscn(struct lpfc_vport *v > vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); > spin_unlock_irq(shost->host_lock); > lpfc_can_disctmo(vport); >+ /* Indicate we are done walking this fc_rscn_id_list */ >+ vport->fc_rscn_flush = 0; > } > > int >@@ -2675,6 +3034,7 @@ lpfc_rscn_payload_check(struct lpfc_vpor > D_ID rscn_did; > uint32_t *lp; > uint32_t payload_len, i; >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); > > ns_did.un.word = did; > >@@ -2686,6 +3046,15 @@ lpfc_rscn_payload_check(struct lpfc_vpor > if (vport->fc_flag & FC_RSCN_DISCOVERY) > return did; > >+ spin_lock_irq(shost->host_lock); >+ if (vport->fc_rscn_flush) { >+ /* Another thread is walking fc_rscn_id_list on this vport */ >+ spin_unlock_irq(shost->host_lock); >+ return 0; >+ } >+ /* Indicate we are walking fc_rscn_id_list on this vport */ >+ vport->fc_rscn_flush = 1; >+ spin_unlock_irq(shost->host_lock); > for (i = 0; i < vport->fc_rscn_id_cnt; i++) { > lp = vport->fc_rscn_id_list[i]->virt; > payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); >@@ -2696,16 +3065,16 @@ lpfc_rscn_payload_check(struct lpfc_vpor > switch (rscn_did.un.b.resv) { > case 0: /* Single N_Port ID effected */ > if (ns_did.un.word == rscn_did.un.word) >- return did; >+ goto return_did_out; > break; > case 1: /* Whole N_Port Area effected */ > if ((ns_did.un.b.domain == rscn_did.un.b.domain) > && (ns_did.un.b.area == rscn_did.un.b.area)) >- return did; >+ goto return_did_out; > break; > case 2: /* Whole N_Port Domain effected */ > if (ns_did.un.b.domain == rscn_did.un.b.domain) >- return did; >+ goto return_did_out; > break; > default: > /* Unknown Identifier in RSCN node */ >@@ -2714,11 +3083,17 @@ lpfc_rscn_payload_check(struct lpfc_vpor > "RSCN payload Data: x%x\n", > rscn_did.un.word); > case 3: /* Whole Fabric effected */ >- return did; >+ goto return_did_out; > } > } > } >+ /* Indicate we are done with walking fc_rscn_id_list on this vport */ >+ vport->fc_rscn_flush = 0; > return 0; >+return_did_out: >+ /* Indicate we are done with walking fc_rscn_id_list on this vport */ >+ vport->fc_rscn_flush = 0; >+ return did; > } > > static int >@@ -2726,26 +3101,16 @@ lpfc_rscn_recovery_check(struct lpfc_vpo > { > struct lpfc_nodelist *ndlp = NULL; > >- /* Look at all nodes effected by pending RSCNs and move >- * them to NPR state. >- */ >- >+ /* Move all affected nodes by pending RSCNs to NPR state. */ > list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { >- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE || >- lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) >+ if (!NLP_CHK_NODE_ACT(ndlp) || >+ (ndlp->nlp_state == NLP_STE_UNUSED_NODE) || >+ !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) > continue; >- > lpfc_disc_state_machine(vport, ndlp, NULL, >- NLP_EVT_DEVICE_RECOVERY); >- >- /* >- * Make sure NLP_DELAY_TMO is NOT running after a device >- * recovery event. >- */ >- if (ndlp->nlp_flag & NLP_DELAY_TMO) >- lpfc_cancel_retry_delay_tmo(vport, ndlp); >+ NLP_EVT_DEVICE_RECOVERY); >+ lpfc_cancel_retry_delay_tmo(vport, ndlp); > } >- > return 0; > } > >@@ -2759,7 +3124,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vpo > uint32_t *lp, *datap; > IOCB_t *icmd; > uint32_t payload_len, length, nportid, *cmd; >- int rscn_cnt = vport->fc_rscn_id_cnt; >+ int rscn_cnt; > int rscn_id = 0, hba_id = 0; > int i; > >@@ -2772,7 +3137,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vpo > /* RSCN received */ > lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, > "0214 RSCN received Data: x%x x%x x%x x%x\n", >- vport->fc_flag, payload_len, *lp, rscn_cnt); >+ vport->fc_flag, payload_len, *lp, >+ vport->fc_rscn_id_cnt); > for (i = 0; i < payload_len/sizeof(uint32_t); i++) > fc_host_post_event(shost, fc_get_event_number(), > FCH_EVT_RSCN, lp[i]); >@@ -2807,10 +3173,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vpo > if (rscn_id == hba_id) { > /* ALL NPortIDs in RSCN are on HBA */ > lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, >- "0214 Ignore RSCN " >+ "0219 Ignore RSCN " > "Data: x%x x%x x%x x%x\n", > vport->fc_flag, payload_len, >- *lp, rscn_cnt); >+ *lp, vport->fc_rscn_id_cnt); > lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, > "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", > ndlp->nlp_DID, vport->port_state, >@@ -2822,6 +3188,20 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vpo > } > } > >+ spin_lock_irq(shost->host_lock); >+ if (vport->fc_rscn_flush) { >+ /* Another thread is walking fc_rscn_id_list on this vport */ >+ spin_unlock_irq(shost->host_lock); >+ vport->fc_flag |= FC_RSCN_DISCOVERY; >+ /* Send back ACC */ >+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); >+ return 0; >+ } >+ /* Indicate we are walking fc_rscn_id_list on this vport */ >+ vport->fc_rscn_flush = 1; >+ spin_unlock_irq(shost->host_lock); >+ /* Get the array count after sucessfully have the token */ >+ rscn_cnt = vport->fc_rscn_id_cnt; > /* If we are already processing an RSCN, save the received > * RSCN payload buffer, cmdiocb->context2 to process later. > */ >@@ -2830,10 +3210,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vpo > "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", > ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); > >+ spin_lock_irq(shost->host_lock); > vport->fc_flag |= FC_RSCN_DEFERRED; > if ((rscn_cnt < FC_MAX_HOLD_RSCN) && > !(vport->fc_flag & FC_RSCN_DISCOVERY)) { >- spin_lock_irq(shost->host_lock); > vport->fc_flag |= FC_RSCN_MODE; > spin_unlock_irq(shost->host_lock); > if (rscn_cnt) { >@@ -2843,7 +3223,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vpo > if ((rscn_cnt) && > (payload_len + length <= LPFC_BPL_SIZE)) { > *cmd &= ELS_CMD_MASK; >- *cmd |= be32_to_cpu(payload_len + length); >+ *cmd |= cpu_to_be32(payload_len + length); > memcpy(((uint8_t *)cmd) + length, lp, > payload_len); > } else { >@@ -2854,7 +3234,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vpo > */ > cmdiocb->context2 = NULL; > } >- > /* Deferred RSCN */ > lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, > "0235 Deferred RSCN " >@@ -2862,7 +3241,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vpo > vport->fc_rscn_id_cnt, vport->fc_flag, > vport->port_state); > } else { >- spin_lock_irq(shost->host_lock); > vport->fc_flag |= FC_RSCN_DISCOVERY; > spin_unlock_irq(shost->host_lock); > /* ReDiscovery RSCN */ >@@ -2872,15 +3250,17 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vpo > vport->fc_rscn_id_cnt, vport->fc_flag, > vport->port_state); > } >+ /* Indicate we are done walking fc_rscn_id_list on this vport */ >+ vport->fc_rscn_flush = 0; > /* Send back ACC */ > lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); >- > /* send RECOVERY event for ALL nodes that match RSCN payload */ > lpfc_rscn_recovery_check(vport); >+ spin_lock_irq(shost->host_lock); > vport->fc_flag &= ~FC_RSCN_DEFERRED; >+ spin_unlock_irq(shost->host_lock); > return 0; > } >- > lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, > "RCV RSCN: did:x%x/ste:x%x flg:x%x", > ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); >@@ -2889,20 +3269,18 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vpo > vport->fc_flag |= FC_RSCN_MODE; > spin_unlock_irq(shost->host_lock); > vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; >+ /* Indicate we are done walking fc_rscn_id_list on this vport */ >+ vport->fc_rscn_flush = 0; > /* > * If we zero, cmdiocb->context2, the calling routine will > * not try to free it. > */ > cmdiocb->context2 = NULL; >- > lpfc_set_disctmo(vport); >- > /* Send back ACC */ > lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); >- > /* send RECOVERY event for ALL nodes that match RSCN payload */ > lpfc_rscn_recovery_check(vport); >- > return lpfc_els_handle_rscn(vport); > } > >@@ -2929,8 +3307,11 @@ lpfc_els_handle_rscn(struct lpfc_vport * > > /* To process RSCN, first compare RSCN data with NameServer */ > vport->fc_ns_retry = 0; >+ vport->num_disc_nodes = 0; >+ > ndlp = lpfc_findnode_did(vport, NameServer_DID); >- if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) >+ && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { > /* Good ndlp, issue CT Request to NameServer */ > if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) > /* Wait for NameServer query cmpl before we can >@@ -2940,25 +3321,35 @@ lpfc_els_handle_rscn(struct lpfc_vport * > /* If login to NameServer does not exist, issue one */ > /* Good status, issue PLOGI to NameServer */ > ndlp = lpfc_findnode_did(vport, NameServer_DID); >- if (ndlp) >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) > /* Wait for NameServer login cmpl before we can > continue */ > return 1; > >- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); >- if (!ndlp) { >- lpfc_els_flush_rscn(vport); >- return 0; >+ if (ndlp) { >+ ndlp = lpfc_enable_node(vport, ndlp, >+ NLP_STE_PLOGI_ISSUE); >+ if (!ndlp) { >+ lpfc_els_flush_rscn(vport); >+ return 0; >+ } >+ ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; > } else { >+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); >+ if (!ndlp) { >+ lpfc_els_flush_rscn(vport); >+ return 0; >+ } > lpfc_nlp_init(vport, ndlp, NameServer_DID); >- ndlp->nlp_type |= NLP_FABRIC; > ndlp->nlp_prev_state = ndlp->nlp_state; > lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); >- lpfc_issue_els_plogi(vport, NameServer_DID, 0); >- /* Wait for NameServer login cmpl before we can >- continue */ >- return 1; > } >+ ndlp->nlp_type |= NLP_FABRIC; >+ lpfc_issue_els_plogi(vport, NameServer_DID, 0); >+ /* Wait for NameServer login cmpl before we can >+ * continue >+ */ >+ return 1; > } > > lpfc_els_flush_rscn(vport); >@@ -3022,8 +3413,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vp > mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; > mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; > mbox->vport = vport; >- rc = lpfc_sli_issue_mbox >- (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); >+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); > lpfc_set_loopback_flag(phba); > if (rc == MBX_NOT_FINISHED) { > mempool_free(mbox, phba->mbox_mem_pool); >@@ -3140,7 +3530,10 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *ph > elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, > lpfc_max_els_tries, ndlp, > ndlp->nlp_DID, ELS_CMD_ACC); >+ >+ /* Decrement the ndlp reference count from previous mbox command */ > lpfc_nlp_put(ndlp); >+ > if (!elsiocb) > return; > >@@ -3160,13 +3553,13 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *ph > status |= 0x4; > > rps_rsp->rsvd1 = 0; >- rps_rsp->portStatus = be16_to_cpu(status); >- rps_rsp->linkFailureCnt = be32_to_cpu(mb->un.varRdLnk.linkFailureCnt); >- rps_rsp->lossSyncCnt = be32_to_cpu(mb->un.varRdLnk.lossSyncCnt); >- rps_rsp->lossSignalCnt = be32_to_cpu(mb->un.varRdLnk.lossSignalCnt); >- rps_rsp->primSeqErrCnt = be32_to_cpu(mb->un.varRdLnk.primSeqErrCnt); >- rps_rsp->invalidXmitWord = be32_to_cpu(mb->un.varRdLnk.invalidXmitWord); >- rps_rsp->crcCnt = be32_to_cpu(mb->un.varRdLnk.crcCnt); >+ rps_rsp->portStatus = cpu_to_be16(status); >+ rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); >+ rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); >+ rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); >+ rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); >+ rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); >+ rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); > /* Xmit ELS RPS ACC response tag <ulpIoTag> */ > lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, > "0118 Xmit ELS RPS ACC response tag x%x xri x%x, " >@@ -3194,14 +3587,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vpor > struct ls_rjt stat; > > if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && >- (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { >- stat.un.b.lsRjtRsvd0 = 0; >- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; >- stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; >- stat.un.b.vendorUnique = 0; >- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, >- NULL); >- } >+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) >+ /* reject the unsolicited RPS request and done with it */ >+ goto reject_out; > > pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; > lp = (uint32_t *) pcmd->virt; >@@ -3223,15 +3611,20 @@ lpfc_els_rcv_rps(struct lpfc_vport *vpor > mbox->context2 = lpfc_nlp_get(ndlp); > mbox->vport = vport; > mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; >- if (lpfc_sli_issue_mbox (phba, mbox, >- (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) >+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) >+ != MBX_NOT_FINISHED) > /* Mbox completion will send ELS Response */ > return 0; >- >+ /* Decrement reference count used for the failed mbox >+ * command. >+ */ > lpfc_nlp_put(ndlp); > mempool_free(mbox, phba->mbox_mem_pool); > } > } >+ >+reject_out: >+ /* issue rejection response */ > stat.un.b.lsRjtRsvd0 = 0; > stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; > stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; >@@ -3306,12 +3699,15 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vpor > > if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && > (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { >+ /* issue rejection response */ > stat.un.b.lsRjtRsvd0 = 0; > stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; > stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; > stat.un.b.vendorUnique = 0; > lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, > NULL); >+ /* rejected the unsolicited RPL request and done with it */ >+ return 0; > } > > pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; >@@ -3420,88 +3816,27 @@ static int > lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, > struct lpfc_nodelist *fan_ndlp) > { >- struct lpfc_dmabuf *pcmd; >+ struct lpfc_hba *phba = vport->phba; > uint32_t *lp; >- IOCB_t *icmd; >- uint32_t cmd, did; > FAN *fp; >- struct lpfc_nodelist *ndlp, *next_ndlp; >- struct lpfc_hba *phba = vport->phba; >- >- /* FAN received */ >- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, >- "0265 FAN received\n"); >- icmd = &cmdiocb->iocb; >- did = icmd->un.elsreq64.remoteID; >- pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; >- lp = (uint32_t *)pcmd->virt; >- >- cmd = *lp++; >- fp = (FAN *) lp; > >+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); >+ lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; >+ fp = (FAN *) ++lp; > /* FAN received; Fan does not have a reply sequence */ >- >- if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) { >+ if ((vport == phba->pport) && >+ (vport->port_state == LPFC_LOCAL_CFG_LINK)) { > if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, >- sizeof(struct lpfc_name)) != 0) || >+ sizeof(struct lpfc_name))) || > (memcmp(&phba->fc_fabparam.portName, &fp->FportName, >- sizeof(struct lpfc_name)) != 0)) { >- /* >- * This node has switched fabrics. FLOGI is required >- * Clean up the old rpi's >- */ >- >- list_for_each_entry_safe(ndlp, next_ndlp, >- &vport->fc_nodes, nlp_listp) { >- if (ndlp->nlp_state != NLP_STE_NPR_NODE) >- continue; >- if (ndlp->nlp_type & NLP_FABRIC) { >- /* >- * Clean up old Fabric, Nameserver and >- * other NLP_FABRIC logins >- */ >- lpfc_drop_node(vport, ndlp); >- } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { >- /* Fail outstanding I/O now since this >- * device is marked for PLOGI >- */ >- lpfc_unreg_rpi(vport, ndlp); >- } >- } >- >- vport->port_state = LPFC_FLOGI; >- lpfc_set_disctmo(vport); >+ sizeof(struct lpfc_name)))) { >+ /* This port has switched fabrics. FLOGI is required */ > lpfc_initial_flogi(vport); >- return 0; >- } >- /* Discovery not needed, >- * move the nodes to their original state. >- */ >- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, >- nlp_listp) { >- if (ndlp->nlp_state != NLP_STE_NPR_NODE) >- continue; >- >- switch (ndlp->nlp_prev_state) { >- case NLP_STE_UNMAPPED_NODE: >- ndlp->nlp_prev_state = NLP_STE_NPR_NODE; >- lpfc_nlp_set_state(vport, ndlp, >- NLP_STE_UNMAPPED_NODE); >- break; >- >- case NLP_STE_MAPPED_NODE: >- ndlp->nlp_prev_state = NLP_STE_NPR_NODE; >- lpfc_nlp_set_state(vport, ndlp, >- NLP_STE_MAPPED_NODE); >- break; >- >- default: >- break; >- } >+ } else { >+ /* FAN verified - skip FLOGI */ >+ vport->fc_myDID = vport->fc_prevDID; >+ lpfc_issue_fabric_reglogin(vport); > } >- >- /* Start discovery - this should just do CLEAR_LA */ >- lpfc_disc_start(vport); > } > return 0; > } >@@ -3511,20 +3846,17 @@ lpfc_els_timeout(unsigned long ptr) > { > struct lpfc_vport *vport = (struct lpfc_vport *) ptr; > struct lpfc_hba *phba = vport->phba; >+ uint32_t tmo_posted; > unsigned long iflag; > > spin_lock_irqsave(&vport->work_port_lock, iflag); >- if ((vport->work_port_events & WORKER_ELS_TMO) == 0) { >+ tmo_posted = vport->work_port_events & WORKER_ELS_TMO; >+ if (!tmo_posted) > vport->work_port_events |= WORKER_ELS_TMO; >- spin_unlock_irqrestore(&vport->work_port_lock, iflag); >+ spin_unlock_irqrestore(&vport->work_port_lock, iflag); > >- spin_lock_irqsave(&phba->hbalock, iflag); >- if (phba->work_wait) >- lpfc_worker_wake_up(phba); >- spin_unlock_irqrestore(&phba->hbalock, iflag); >- } >- else >- spin_unlock_irqrestore(&vport->work_port_lock, iflag); >+ if (!tmo_posted) >+ lpfc_worker_wake_up(phba); > return; > } > >@@ -3569,9 +3901,6 @@ lpfc_els_timeout_handler(struct lpfc_vpo > els_command == ELS_CMD_FDISC) > continue; > >- if (vport != piocb->vport) >- continue; >- > if (piocb->drvrTimeout > 0) { > if (piocb->drvrTimeout >= timeout) > piocb->drvrTimeout -= timeout; >@@ -3586,7 +3915,7 @@ lpfc_els_timeout_handler(struct lpfc_vpo > else { > struct lpfc_nodelist *ndlp; > ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); >- if (ndlp) >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) > remote_ID = ndlp->nlp_DID; > } > lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >@@ -3708,59 +4037,433 @@ lpfc_els_flush_all_cmd(struct lpfc_hba > } > > static void >-lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, >- struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) >+lpfc_els_rcv_auth_neg(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, >+ struct lpfc_nodelist *ndlp) > { >- struct lpfc_nodelist *ndlp; >- struct ls_rjt stat; >- uint32_t *payload; >- uint32_t cmd, did, newnode, rjt_err = 0; >- IOCB_t *icmd = &elsiocb->iocb; >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); >+ struct lpfc_dmabuf *pcmd = cmdiocb->context2; >+ struct lpfc_auth_message *authcmd; >+ uint8_t reason, explanation; >+ uint32_t message_len; >+ uint32_t trans_id; >+ struct fc_auth_req *fc_req; >+ struct fc_auth_rsp *fc_rsp; >+ >+ authcmd = pcmd->virt; >+ message_len = be32_to_cpu(authcmd->message_len); >+ trans_id = be32_to_cpu(authcmd->trans_id); > >- if (vport == NULL || elsiocb->context2 == NULL) >- goto dropit; >+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); > >- newnode = 0; >- payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; >- cmd = *payload; >- if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) >- lpfc_post_buffer(phba, pring, 1, 1); >+ vport->auth.trans_id = trans_id; > >- did = icmd->un.rcvels.remoteID; >- if (icmd->ulpStatus) { >- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, >- "RCV Unsol ELS: status:x%x/x%x did:x%x", >- icmd->ulpStatus, icmd->un.ulpWord[4], did); >- goto dropit; >+ if(lpfc_unpack_auth_negotiate(vport, authcmd->data, >+ &reason, &explanation)) { >+ lpfc_issue_els_auth_reject(vport, ndlp, reason, explanation); >+ return; > } >+ vport->auth.direction = AUTH_DIRECTION_NONE; >+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SECURITY, >+ "1033 Received auth_negotiate from Nport:x%x\n", >+ ndlp->nlp_DID); > >- /* Check to see if link went down during discovery */ >- if (lpfc_els_chk_latt(vport)) >- goto dropit; >+ fc_req = kzalloc(sizeof(struct fc_auth_req), GFP_KERNEL); > >- /* Ignore traffic recevied during vport shutdown. */ >- if (vport->load_flag & FC_UNLOADING) >- goto dropit; >+ fc_req->local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn); >+ if (ndlp->nlp_type & NLP_FABRIC) >+ fc_req->remote_wwpn = AUTH_FABRIC_WWN; >+ else >+ fc_req->remote_wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn); >+ fc_req->u.dhchap_challenge.transaction_id = vport->auth.trans_id; >+ fc_req->u.dhchap_challenge.dh_group_id = vport->auth.group_id; >+ fc_req->u.dhchap_challenge.hash_id = vport->auth.hash_id; > >- ndlp = lpfc_findnode_did(vport, did); >- if (!ndlp) { >- /* Cannot find existing Fabric ndlp, so allocate a new one */ >- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); >- if (!ndlp) >- goto dropit; >+ fc_rsp = kzalloc(MAX_AUTH_RSP_SIZE, GFP_KERNEL); > >- lpfc_nlp_init(vport, ndlp, did); >- newnode = 1; >- if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { >- ndlp->nlp_type |= NLP_FABRIC; >- } >- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); >+ if (lpfc_fc_security_dhchap_make_challenge(shost, >+ fc_req, sizeof(struct fc_auth_req), >+ fc_rsp, MAX_AUTH_RSP_SIZE)) { >+ kfree(fc_rsp); >+ lpfc_issue_els_auth_reject(vport, ndlp, LOGIC_ERR, 0); > } > >- phba->fc_stat.elsRcvFrame++; >- if (elsiocb->context1) >- lpfc_nlp_put(elsiocb->context1); >- elsiocb->context1 = lpfc_nlp_get(ndlp); >+ kfree(fc_req); >+ >+} >+ >+static void >+lpfc_els_rcv_chap_chal(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, >+ struct lpfc_nodelist *ndlp) >+{ >+ >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); >+ struct lpfc_dmabuf *pcmd = cmdiocb->context2; >+ struct lpfc_auth_message *authcmd; >+ uint8_t reason, explanation; >+ uint32_t message_len; >+ uint32_t trans_id; >+ struct fc_auth_req *fc_req; >+ struct fc_auth_rsp *fc_rsp; >+ uint32_t fc_req_len; >+ >+ authcmd = pcmd->virt; >+ message_len = be32_to_cpu(authcmd->message_len); >+ trans_id = be32_to_cpu(authcmd->trans_id); >+ >+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); >+ >+ if (vport->auth.auth_msg_state != LPFC_AUTH_NEGOTIATE) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1034 Not Expecting Challenge - Rejecting " >+ "Challenge.\n"); >+ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PROTOCOL); >+ return; >+ } >+ >+ if (trans_id != vport->auth.trans_id) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1035 Transport ID does not match - Rejecting " >+ "Challenge.\n"); >+ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PAYLOAD); >+ return; >+ } >+ >+ if (lpfc_unpack_dhchap_challenge(vport, authcmd->data, >+ &reason, &explanation)) { >+ lpfc_issue_els_auth_reject(vport, ndlp, reason, explanation); >+ return; >+ } >+ vport->auth.direction = AUTH_DIRECTION_NONE; >+ >+ fc_req_len = (sizeof(struct fc_auth_req) + >+ vport->auth.challenge_len + >+ vport->auth.dh_pub_key_len); >+ fc_req = kzalloc(fc_req_len, GFP_KERNEL); >+ fc_req->local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn); >+ if (ndlp->nlp_type & NLP_FABRIC) >+ fc_req->remote_wwpn = AUTH_FABRIC_WWN; >+ else >+ fc_req->remote_wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn); >+ fc_req->u.dhchap_reply.transaction_id = vport->auth.trans_id; >+ fc_req->u.dhchap_reply.dh_group_id = vport->auth.group_id; >+ fc_req->u.dhchap_reply.hash_id = vport->auth.hash_id; >+ fc_req->u.dhchap_reply.bidirectional = vport->auth.bidirectional; >+ fc_req->u.dhchap_reply.received_challenge_len = >+ vport->auth.challenge_len; >+ fc_req->u.dhchap_reply.received_public_key_len = >+ vport->auth.dh_pub_key_len; >+ memcpy (fc_req->u.dhchap_reply.data, vport->auth.challenge, >+ vport->auth.challenge_len); >+ if (vport->auth.group_id != DH_GROUP_NULL) { >+ memcpy (fc_req->u.dhchap_reply.data + vport->auth.challenge_len, >+ vport->auth.dh_pub_key, vport->auth.dh_pub_key_len); >+ } >+ >+ fc_rsp = kzalloc(MAX_AUTH_RSP_SIZE, GFP_KERNEL); >+ >+ if (lpfc_fc_security_dhchap_make_response(shost, >+ fc_req, fc_req_len, >+ fc_rsp, MAX_AUTH_RSP_SIZE)) { >+ kfree(fc_rsp); >+ lpfc_issue_els_auth_reject(vport, ndlp, LOGIC_ERR, 0); >+ } >+ >+ kfree(fc_req); >+ >+} >+ >+static void >+lpfc_els_rcv_auth_rjt(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, >+ struct lpfc_nodelist *ndlp) >+{ >+ >+ struct lpfc_dmabuf *pcmd = cmdiocb->context2; >+ struct lpfc_auth_message *authcmd; >+ uint32_t message_len; >+ uint32_t trans_id; >+ struct lpfc_auth_reject *rjt; >+ struct lpfc_hba *phba = vport->phba; >+ >+ authcmd = pcmd->virt; >+ rjt = (struct lpfc_auth_reject *)authcmd->data; >+ >+ message_len = be32_to_cpu(authcmd->message_len); >+ trans_id = be32_to_cpu(authcmd->trans_id); >+ >+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); >+ >+ if (vport->auth.auth_state == LPFC_AUTH_SUCCESS) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1036 Authentication transaction reject - " >+ "re-auth request reason 0x%x exp 0x%x\n", >+ rjt->reason, rjt->explanation); >+ lpfc_port_auth_failed(ndlp); >+ if (vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS) { >+ /* start authentication */ >+ lpfc_start_authentication(vport, ndlp); >+ } >+ } else { >+ if (rjt->reason == LOGIC_ERR && rjt->explanation == RESTART) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1037 Authentication transaction " >+ "reject - restarting authentication. " >+ "reason 0x%x exp 0x%x\n", >+ rjt->reason, rjt->explanation); >+ /* restart auth */ >+ lpfc_start_authentication(vport, ndlp); >+ } else { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1057 Authentication transaction " >+ "reject. reason 0x%x exp 0x%x\n", >+ rjt->reason, rjt->explanation); >+ vport->auth.auth_msg_state = LPFC_AUTH_REJECT; >+ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && >+ (phba->link_state != LPFC_CLEAR_LA)) { >+ /* If Auth failed enable link interrupt. */ >+ lpfc_issue_clear_la(phba, vport); >+ } >+ } >+ } >+} >+ >+static void >+lpfc_els_rcv_chap_reply(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, >+ struct lpfc_nodelist *ndlp) >+{ >+ >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); >+ struct lpfc_dmabuf *pcmd = cmdiocb->context2; >+ struct lpfc_auth_message *authcmd; >+ uint32_t message_len; >+ uint32_t trans_id; >+ struct fc_auth_req *fc_req; >+ struct fc_auth_rsp *fc_rsp; >+ uint32_t data_len; >+ >+ authcmd = pcmd->virt; >+ message_len = be32_to_cpu(authcmd->message_len); >+ trans_id = be32_to_cpu(authcmd->trans_id); >+ >+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); >+ >+ fc_req = kzalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL); >+ >+ fc_req->local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn); >+ if (ndlp->nlp_type & NLP_FABRIC) >+ fc_req->remote_wwpn = AUTH_FABRIC_WWN; >+ else >+ fc_req->remote_wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn); >+ >+ if (vport->auth.auth_msg_state != LPFC_DHCHAP_CHALLENGE) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1039 Not Expecting Reply - rejecting. State " >+ "0x%x\n", vport->auth.auth_state); >+ >+ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PROTOCOL); >+ return; >+ } >+ >+ if (trans_id != vport->auth.trans_id) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1040 Bad Reply trans_id- rejecting. " >+ "Trans_id: 0x%x Expecting: 0x%x \n", >+ trans_id, vport->auth.trans_id); >+ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PAYLOAD); >+ return; >+ } >+ >+ /* Zero is a valid length to be returned */ >+ data_len = lpfc_unpack_dhchap_reply(vport, authcmd->data, fc_req); >+ fc_req->u.dhchap_success.hash_id = vport->auth.hash_id; >+ fc_req->u.dhchap_success.dh_group_id = vport->auth.group_id; >+ fc_req->u.dhchap_success.transaction_id = vport->auth.trans_id; >+ fc_req->u.dhchap_success.our_challenge_len = vport->auth.challenge_len; >+ memcpy (fc_req->u.dhchap_success.data, vport->auth.challenge, >+ vport->auth.challenge_len); >+ >+ fc_rsp = kzalloc(MAX_AUTH_RSP_SIZE, GFP_KERNEL); >+ >+ if (lpfc_fc_security_dhchap_authenticate(shost, fc_req, >+ (sizeof(struct fc_auth_req) + >+ data_len + vport->auth.challenge_len), >+ fc_rsp, MAX_AUTH_RSP_SIZE)) { >+ kfree(fc_rsp); >+ lpfc_issue_els_auth_reject(vport, ndlp, LOGIC_ERR, 0); >+ } >+ >+ kfree(fc_req); >+ >+} >+ >+static void >+lpfc_els_rcv_chap_suc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, >+ struct lpfc_nodelist *ndlp) >+{ >+ >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); >+ struct lpfc_dmabuf *pcmd = cmdiocb->context2; >+ struct lpfc_auth_message *authcmd; >+ uint32_t message_len; >+ uint32_t trans_id; >+ struct fc_auth_req *fc_req; >+ struct fc_auth_rsp *fc_rsp; >+ uint32_t data_len; >+ >+ authcmd = pcmd->virt; >+ message_len = be32_to_cpu(authcmd->message_len); >+ trans_id = be32_to_cpu(authcmd->trans_id); >+ >+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); >+ >+ if (vport->auth.auth_msg_state != LPFC_DHCHAP_REPLY && >+ vport->auth.auth_msg_state != LPFC_DHCHAP_SUCCESS_REPLY) { >+ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PROTOCOL); >+ return; >+ } >+ >+ if (trans_id != vport->auth.trans_id) { >+ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PAYLOAD); >+ return; >+ } >+ >+ if (vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY && >+ vport->auth.bidirectional) { >+ >+ fc_req = kzalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL); >+ if (!fc_req) { >+ return; >+ } >+ >+ fc_req->local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn); >+ if (ndlp->nlp_type & NLP_FABRIC) >+ fc_req->remote_wwpn = AUTH_FABRIC_WWN; >+ else >+ fc_req->remote_wwpn = >+ wwn_to_u64(ndlp->nlp_portname.u.wwn); >+ fc_req->u.dhchap_success.hash_id = vport->auth.hash_id; >+ fc_req->u.dhchap_success.dh_group_id = vport->auth.group_id; >+ fc_req->u.dhchap_success.transaction_id = vport->auth.trans_id; >+ fc_req->u.dhchap_success.our_challenge_len = >+ vport->auth.challenge_len; >+ >+ memcpy(fc_req->u.dhchap_success.data, vport->auth.challenge, >+ vport->auth.challenge_len); >+ >+ /* Zero is a valid return length */ >+ data_len = lpfc_unpack_dhchap_success(vport, >+ authcmd->data, >+ fc_req); >+ >+ fc_rsp = kzalloc(MAX_AUTH_RSP_SIZE, GFP_KERNEL); >+ if (!fc_rsp) { >+ return; >+ } >+ >+ if (lpfc_fc_security_dhchap_authenticate(shost, >+ fc_req, sizeof(struct fc_auth_req) + data_len, >+ fc_rsp, MAX_AUTH_RSP_SIZE)) { >+ kfree(fc_rsp); >+ lpfc_issue_els_auth_reject(vport, ndlp, LOGIC_ERR, 0); >+ } >+ >+ kfree(fc_req); >+ >+ } else { >+ vport->auth.auth_msg_state = LPFC_DHCHAP_SUCCESS; >+ >+ if (vport->auth.challenge) >+ kfree(vport->auth.challenge); >+ vport->auth.challenge = NULL; >+ vport->auth.challenge_len = 0; >+ >+ if (vport->auth.auth_state != LPFC_AUTH_SUCCESS) { >+ vport->auth.auth_state = LPFC_AUTH_SUCCESS; >+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY, >+ "1041 Authentication Successful\n"); >+ >+ lpfc_start_discovery(vport); >+ >+ } else { >+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY, >+ "1042 Re-Authentication Successful\n"); >+ } >+ /* If config requires re-authentication start the timer */ >+ vport->auth.last_auth = jiffies; >+ if (vport->auth.reauth_interval) >+ mod_timer(&ndlp->nlp_reauth_tmr, jiffies + >+ vport->auth.reauth_interval * 60 * HZ); >+ } >+ vport->auth.direction |= AUTH_DIRECTION_REMOTE; >+} >+static void >+lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, >+ struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) >+{ >+ struct Scsi_Host *shost; >+ struct lpfc_nodelist *ndlp; >+ struct ls_rjt stat; >+ uint32_t *payload; >+ uint32_t cmd, did, newnode, rjt_err = 0; >+ IOCB_t *icmd = &elsiocb->iocb; >+ >+ if (!vport || !(elsiocb->context2)) >+ goto dropit; >+ >+ newnode = 0; >+ payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; >+ cmd = *payload; >+ if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) >+ lpfc_post_buffer(phba, pring, 1, 1); >+ >+ did = icmd->un.rcvels.remoteID; >+ if (icmd->ulpStatus) { >+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, >+ "RCV Unsol ELS: status:x%x/x%x did:x%x", >+ icmd->ulpStatus, icmd->un.ulpWord[4], did); >+ goto dropit; >+ } >+ >+ /* Check to see if link went down during discovery */ >+ if (lpfc_els_chk_latt(vport)) >+ goto dropit; >+ >+ /* Ignore traffic recevied during vport shutdown. */ >+ if (vport->load_flag & FC_UNLOADING) >+ goto dropit; >+ >+ ndlp = lpfc_findnode_did(vport, did); >+ if (!ndlp) { >+ /* Cannot find existing Fabric ndlp, so allocate a new one */ >+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); >+ if (!ndlp) >+ goto dropit; >+ >+ lpfc_nlp_init(vport, ndlp, did); >+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); >+ newnode = 1; >+ if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) >+ ndlp->nlp_type |= NLP_FABRIC; >+ } else if (!NLP_CHK_NODE_ACT(ndlp)) { >+ ndlp = lpfc_enable_node(vport, ndlp, >+ NLP_STE_UNUSED_NODE); >+ if (!ndlp) >+ goto dropit; >+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); >+ newnode = 1; >+ if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) >+ ndlp->nlp_type |= NLP_FABRIC; >+ } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { >+ /* This is similar to the new node path */ >+ ndlp = lpfc_nlp_get(ndlp); >+ if (!ndlp) >+ goto dropit; >+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); >+ newnode = 1; >+ } >+ >+ phba->fc_stat.elsRcvFrame++; >+ >+ elsiocb->context1 = lpfc_nlp_get(ndlp); > elsiocb->vport = vport; > > if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { >@@ -3780,9 +4483,22 @@ lpfc_els_unsol_buffer(struct lpfc_hba *p > ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); > > if (vport->port_state < LPFC_DISC_AUTH) { >- rjt_err = LSRJT_UNABLE_TPC; >- break; >+ if (!(phba->pport->fc_flag & FC_PT2PT) || >+ (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { >+ rjt_err = LSRJT_UNABLE_TPC; >+ break; >+ } >+ /* We get here, and drop thru, if we are PT2PT with >+ * another NPort and the other side has initiated >+ * the PLOGI before responding to our FLOGI. >+ */ > } >+ >+ shost = lpfc_shost_from_vport(vport); >+ spin_lock_irq(shost->host_lock); >+ ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; >+ spin_unlock_irq(shost->host_lock); >+ > lpfc_disc_state_machine(vport, ndlp, elsiocb, > NLP_EVT_RCV_PLOGI); > >@@ -3795,7 +4511,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *p > phba->fc_stat.elsRcvFLOGI++; > lpfc_els_rcv_flogi(vport, elsiocb, ndlp); > if (newnode) >- lpfc_drop_node(vport, ndlp); >+ lpfc_nlp_put(ndlp); > break; > case ELS_CMD_LOGO: > lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, >@@ -3825,7 +4541,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *p > phba->fc_stat.elsRcvRSCN++; > lpfc_els_rcv_rscn(vport, elsiocb, ndlp); > if (newnode) >- lpfc_drop_node(vport, ndlp); >+ lpfc_nlp_put(ndlp); > break; > case ELS_CMD_ADISC: > lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, >@@ -3897,7 +4613,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *p > phba->fc_stat.elsRcvLIRR++; > lpfc_els_rcv_lirr(vport, elsiocb, ndlp); > if (newnode) >- lpfc_drop_node(vport, ndlp); >+ lpfc_nlp_put(ndlp); > break; > case ELS_CMD_RPS: > lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, >@@ -3907,7 +4623,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *p > phba->fc_stat.elsRcvRPS++; > lpfc_els_rcv_rps(vport, elsiocb, ndlp); > if (newnode) >- lpfc_drop_node(vport, ndlp); >+ lpfc_nlp_put(ndlp); > break; > case ELS_CMD_RPL: > lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, >@@ -3917,7 +4633,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *p > phba->fc_stat.elsRcvRPL++; > lpfc_els_rcv_rpl(vport, elsiocb, ndlp); > if (newnode) >- lpfc_drop_node(vport, ndlp); >+ lpfc_nlp_put(ndlp); > break; > case ELS_CMD_RNID: > lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, >@@ -3927,8 +4643,50 @@ lpfc_els_unsol_buffer(struct lpfc_hba *p > phba->fc_stat.elsRcvRNID++; > lpfc_els_rcv_rnid(vport, elsiocb, ndlp); > if (newnode) >- lpfc_drop_node(vport, ndlp); >+ lpfc_nlp_put(ndlp); >+ break; >+ case ELS_CMD_AUTH_RJT: >+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, >+ "RCV AUTH_RJT: did:x%x/ste:x%x flg:x%x", >+ did, vport->port_state, ndlp->nlp_flag); >+ >+ lpfc_els_rcv_auth_rjt(vport, elsiocb, ndlp); >+ break; >+ case ELS_CMD_AUTH_NEG: >+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, >+ "RCV AUTH_NEG: did:x%x/ste:x%x flg:x%x", >+ did, vport->port_state, ndlp->nlp_flag); >+ >+ lpfc_els_rcv_auth_neg(vport, elsiocb, ndlp); >+ break; >+ case ELS_CMD_DH_CHA: >+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, >+ "RCV DH_CHA: did:x%x/ste:x%x flg:x%x", >+ did, vport->port_state, ndlp->nlp_flag); >+ >+ lpfc_els_rcv_chap_chal(vport, elsiocb, ndlp); >+ break; >+ case ELS_CMD_DH_REP: >+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, >+ "RCV DH_REP: did:x%x/ste:x%x flg:x%x", >+ did, vport->port_state, ndlp->nlp_flag); >+ >+ lpfc_els_rcv_chap_reply(vport, elsiocb, ndlp); >+ break; >+ case ELS_CMD_DH_SUC: >+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, >+ "RCV DH_SUC: did:x%x/ste:x%x flg:x%x", >+ did, vport->port_state, ndlp->nlp_flag); >+ >+ lpfc_els_rcv_chap_suc(vport, elsiocb, ndlp); > break; >+ >+ case ELS_CMD_AUTH_DONE: >+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, >+ "RCV AUTH_DONE: did:x%x/ste:x%x flg:x%x", >+ did, vport->port_state, ndlp->nlp_flag); >+ >+ > default: > lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, > "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", >@@ -3942,7 +4700,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *p > "0115 Unknown ELS command x%x " > "received from NPORT x%x\n", cmd, did); > if (newnode) >- lpfc_drop_node(vport, ndlp); >+ lpfc_nlp_put(ndlp); > break; > } > >@@ -3955,13 +4713,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *p > NULL); > } > >+ lpfc_nlp_put(elsiocb->context1); >+ elsiocb->context1 = NULL; > return; > > dropit: >- lpfc_printf_log(phba, KERN_ERR, LOG_ELS, >+ if (vport && !(vport->load_flag & FC_UNLOADING)) >+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS, > "(%d):0111 Dropping received ELS cmd " > "Data: x%x x%x x%x\n", >- vport ? vport->vpi : 0xffff, icmd->ulpStatus, >+ vport->vpi, icmd->ulpStatus, > icmd->un.ulpWord[4], icmd->ulpTimeout); > phba->fc_stat.elsRcvDrop++; > } >@@ -3993,6 +4754,7 @@ lpfc_els_unsol_event(struct lpfc_hba *ph > struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; > struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; > >+ elsiocb->context1 = NULL; > elsiocb->context2 = NULL; > elsiocb->context3 = NULL; > >@@ -4017,15 +4779,15 @@ lpfc_els_unsol_event(struct lpfc_hba *ph > vport = lpfc_find_vport_by_vpid(phba, vpi); > } > } >- /* If there are no BDEs associated >- * with this IOCB, there is nothing to do. >- */ >+ /* If there are no BDEs associated >+ * with this IOCB, there is nothing to do. >+ */ > if (icmd->ulpBdeCount == 0) > return; > >- /* type of ELS cmd is first 32bit word >- * in packet >- */ >+ /* type of ELS cmd is first 32bit word >+ * in packet >+ */ > if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { > elsiocb->context2 = bdeBuf1; > } else { >@@ -4040,8 +4802,6 @@ lpfc_els_unsol_event(struct lpfc_hba *ph > * The different unsolicited event handlers would tell us > * if they are done with "mp" by setting context2 to NULL. > */ >- lpfc_nlp_put(elsiocb->context1); >- elsiocb->context1 = NULL; > if (elsiocb->context2) { > lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); > elsiocb->context2 = NULL; >@@ -4079,8 +4839,20 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *ph > return; > } > lpfc_nlp_init(vport, ndlp, NameServer_DID); >- ndlp->nlp_type |= NLP_FABRIC; >+ } else if (!NLP_CHK_NODE_ACT(ndlp)) { >+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); >+ if (!ndlp) { >+ if (phba->fc_topology == TOPOLOGY_LOOP) { >+ lpfc_disc_start(vport); >+ return; >+ } >+ lpfc_vport_set_state(vport, FC_VPORT_FAILED); >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >+ "0348 NameServer login: node freed\n"); >+ return; >+ } > } >+ ndlp->nlp_type |= NLP_FABRIC; > > lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); > >@@ -4097,8 +4869,8 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *ph > if (ndlp_fdmi) { > lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); > ndlp_fdmi->nlp_type |= NLP_FABRIC; >- ndlp_fdmi->nlp_state = >- NLP_STE_PLOGI_ISSUE; >+ lpfc_nlp_set_state(vport, ndlp_fdmi, >+ NLP_STE_PLOGI_ISSUE); > lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, > 0); > } >@@ -4114,8 +4886,9 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba > struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; > MAILBOX_t *mb = &pmb->mb; > >+ spin_lock_irq(shost->host_lock); > vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; >- lpfc_nlp_put(ndlp); >+ spin_unlock_irq(shost->host_lock); > > if (mb->mbxStatus) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, >@@ -4125,6 +4898,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba > switch (mb->mbxStatus) { > case 0x11: /* unsupported feature */ > case 0x9603: /* max_vpi exceeded */ >+ case 0x9602: /* Link event since CLEAR_LA */ > /* giving up on vport registration */ > lpfc_vport_set_state(vport, FC_VPORT_FAILED); > spin_lock_irq(shost->host_lock); >@@ -4135,17 +4909,29 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba > default: > /* Try to recover from this error */ > lpfc_mbx_unreg_vpi(vport); >+ spin_lock_irq(shost->host_lock); > vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; >- lpfc_initial_fdisc(vport); >+ spin_unlock_irq(shost->host_lock); >+ if (vport->port_type == LPFC_PHYSICAL_PORT) >+ lpfc_initial_flogi(vport); >+ else >+ lpfc_initial_fdisc(vport); > break; > } > > } else { > if (vport == phba->pport) > lpfc_issue_fabric_reglogin(vport); >- else >+ else if (!vport->cfg_enable_auth) > lpfc_do_scr_ns_plogi(phba, vport); >+ > } >+ >+ /* Now, we decrement the ndlp reference count held for this >+ * callback function >+ */ >+ lpfc_nlp_put(ndlp); >+ > mempool_free(pmb, phba->mbox_mem_pool); > return; > } >@@ -4154,6 +4940,7 @@ void > lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, > struct lpfc_nodelist *ndlp) > { >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); > LPFC_MBOXQ_t *mbox; > > mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); >@@ -4162,25 +4949,31 @@ lpfc_register_new_vport(struct lpfc_hba > mbox->vport = vport; > mbox->context2 = lpfc_nlp_get(ndlp); > mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; >- if (lpfc_sli_issue_mbox(phba, mbox, >- MBX_NOWAIT | MBX_STOP_IOCB) >+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) > == MBX_NOT_FINISHED) { >+ /* mailbox command not success, decrement ndlp >+ * reference count for this command >+ */ >+ lpfc_nlp_put(ndlp); > mempool_free(mbox, phba->mbox_mem_pool); >- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; > >- lpfc_vport_set_state(vport, FC_VPORT_FAILED); > lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, > "0253 Register VPI: Can't send mbox\n"); >+ goto mbox_err_exit; > } > } else { >- lpfc_vport_set_state(vport, FC_VPORT_FAILED); >- > lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, > "0254 Register VPI: no memory\n"); >- >- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; >- lpfc_nlp_put(ndlp); >+ goto mbox_err_exit; > } >+ return; >+ >+mbox_err_exit: >+ lpfc_vport_set_state(vport, FC_VPORT_FAILED); >+ spin_lock_irq(shost->host_lock); >+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; >+ spin_unlock_irq(shost->host_lock); >+ return; > } > > static void >@@ -4194,6 +4987,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phb > struct lpfc_nodelist *next_np; > IOCB_t *irsp = &rspiocb->iocb; > struct lpfc_iocbq *piocb; >+ struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; >+ struct serv_parm *sp; >+ struct lpfc_name wwpn; > > lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, > "0123 FDISC completes. x%x/x%x prevDID: x%x\n", >@@ -4217,53 +5013,74 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phb > goto out; > /* FDISC failed */ > lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >- "0124 FDISC failed. (%d/%d)\n", >+ "0126 FDISC failed. (%d/%d)\n", > irsp->ulpStatus, irsp->un.ulpWord[4]); >- if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) >- lpfc_vport_set_state(vport, FC_VPORT_FAILED); >- >- lpfc_nlp_put(ndlp); >- /* giving up on FDISC. Cancel discovery timer */ >- lpfc_can_disctmo(vport); >+ goto fdisc_failed; >+ } >+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); >+ sp = prsp->virt + sizeof(uint32_t); >+ if (sp->cmn.security) >+ ndlp->nlp_flag |= NLP_SC_REQ; >+ else >+ ndlp->nlp_flag &= ~NLP_SC_REQ; >+ if (vport->cfg_enable_auth) { >+ u64_to_wwn(AUTH_FABRIC_WWN, wwpn.u.wwn); >+ if (lpfc_get_auth_config(ndlp, &wwpn)) >+ goto fdisc_failed; > } else { >- spin_lock_irq(shost->host_lock); >- vport->fc_flag |= FC_FABRIC; >- if (vport->phba->fc_topology == TOPOLOGY_LOOP) >- vport->fc_flag |= FC_PUBLIC_LOOP; >- spin_unlock_irq(shost->host_lock); >- >- vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; >- lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); >- if ((vport->fc_prevDID != vport->fc_myDID) && >- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { >- /* If our NportID changed, we need to ensure all >- * remaining NPORTs get unreg_login'ed so we can >- * issue unreg_vpi. >- */ >- list_for_each_entry_safe(np, next_np, >- &vport->fc_nodes, nlp_listp) { >- if (np->nlp_state != NLP_STE_NPR_NODE >- || !(np->nlp_flag & NLP_NPR_ADISC)) >- continue; >- spin_lock_irq(shost->host_lock); >- np->nlp_flag &= ~NLP_NPR_ADISC; >- spin_unlock_irq(shost->host_lock); >- lpfc_unreg_rpi(vport, np); >- } >- lpfc_mbx_unreg_vpi(vport); >- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; >+ vport->auth.security_active = 0; >+ if (sp->cmn.security) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1056 Authentication mode is " >+ "disabled, but is required " >+ "by the fabric.\n"); >+ goto fdisc_failed; > } >+ } >+ spin_lock_irq(shost->host_lock); >+ vport->fc_flag |= FC_FABRIC; >+ if (vport->phba->fc_topology == TOPOLOGY_LOOP) >+ vport->fc_flag |= FC_PUBLIC_LOOP; >+ spin_unlock_irq(shost->host_lock); > >- if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) >- lpfc_register_new_vport(phba, vport, ndlp); >- else >- lpfc_do_scr_ns_plogi(phba, vport); >- >- lpfc_nlp_put(ndlp); /* Free Fabric ndlp for vports */ >+ vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; >+ lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); >+ if ((vport->fc_prevDID != vport->fc_myDID) && >+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { >+ /* If our NportID changed, we need to ensure all >+ * remaining NPORTs get unreg_login'ed so we can >+ * issue unreg_vpi. >+ */ >+ list_for_each_entry_safe(np, next_np, >+ &vport->fc_nodes, nlp_listp) { >+ if (!NLP_CHK_NODE_ACT(ndlp) || >+ (np->nlp_state != NLP_STE_NPR_NODE) || >+ !(np->nlp_flag & NLP_NPR_ADISC)) >+ continue; >+ spin_lock_irq(shost->host_lock); >+ np->nlp_flag &= ~NLP_NPR_ADISC; >+ spin_unlock_irq(shost->host_lock); >+ lpfc_unreg_rpi(vport, np); >+ } >+ lpfc_mbx_unreg_vpi(vport); >+ spin_lock_irq(shost->host_lock); >+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; >+ spin_unlock_irq(shost->host_lock); > } > >+ if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) >+ lpfc_register_new_vport(phba, vport, ndlp); >+ else if (!vport->cfg_enable_auth) >+ lpfc_do_scr_ns_plogi(phba, vport); >+ goto out; >+fdisc_failed: >+ lpfc_vport_set_state(vport, FC_VPORT_FAILED); >+ /* Cancel discovery timer */ >+ lpfc_can_disctmo(vport); >+ lpfc_nlp_put(ndlp); > out: > lpfc_els_free_iocb(phba, cmdiocb); >+ return; > } > > int >@@ -4309,6 +5126,10 @@ lpfc_issue_els_fdisc(struct lpfc_vport * > sp->cls2.seqDelivery = 1; > sp->cls3.seqDelivery = 1; > >+ /* Set the security service parameter */ >+ if (vport->cfg_enable_auth) >+ sp->cmn.security = 1; >+ > pcmd += sizeof(uint32_t); /* CSP Word 2 */ > pcmd += sizeof(uint32_t); /* CSP Word 3 */ > pcmd += sizeof(uint32_t); /* CSP Word 4 */ >@@ -4346,6 +5167,8 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba > { > struct lpfc_vport *vport = cmdiocb->vport; > IOCB_t *irsp; >+ struct lpfc_nodelist *ndlp; >+ ndlp = (struct lpfc_nodelist *)cmdiocb->context1; > > irsp = &rspiocb->iocb; > lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, >@@ -4354,6 +5177,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba > > lpfc_els_free_iocb(phba, cmdiocb); > vport->unreg_vpi_cmpl = VPORT_ERROR; >+ >+ /* Trigger the release of the ndlp after logo */ >+ lpfc_nlp_put(ndlp); > } > > int >@@ -4407,18 +5233,16 @@ lpfc_fabric_block_timeout(unsigned long > struct lpfc_hba *phba = (struct lpfc_hba *) ptr; > unsigned long iflags; > uint32_t tmo_posted; >+ > spin_lock_irqsave(&phba->pport->work_port_lock, iflags); > tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; > if (!tmo_posted) > phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; > spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); > >- if (!tmo_posted) { >- spin_lock_irqsave(&phba->hbalock, iflags); >- if (phba->work_wait) >- lpfc_worker_wake_up(phba); >- spin_unlock_irqrestore(&phba->hbalock, iflags); >- } >+ if (!tmo_posted) >+ lpfc_worker_wake_up(phba); >+ return; > } > > static void >@@ -4433,11 +5257,12 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba > repeat: > iocb = NULL; > spin_lock_irqsave(&phba->hbalock, iflags); >- /* Post any pending iocb to the SLI layer */ >+ /* Post any pending iocb to the SLI layer */ > if (atomic_read(&phba->fabric_iocb_count) == 0) { > list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), > list); > if (iocb) >+ /* Increment fabric iocb count to hold the position */ > atomic_inc(&phba->fabric_iocb_count); > } > spin_unlock_irqrestore(&phba->hbalock, iflags); >@@ -4484,9 +5309,7 @@ lpfc_block_fabric_iocbs(struct lpfc_hba > int blocked; > > blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); >- /* Start a timer to unblock fabric >- * iocbs after 100ms >- */ >+ /* Start a timer to unblock fabric iocbs after 100ms */ > if (!blocked) > mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); > >@@ -4534,8 +5357,8 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *p > > atomic_dec(&phba->fabric_iocb_count); > if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { >- /* Post any pending iocbs to HBA */ >- lpfc_resume_fabric_iocbs(phba); >+ /* Post any pending iocbs to HBA */ >+ lpfc_resume_fabric_iocbs(phba); > } > } > >@@ -4554,6 +5377,9 @@ lpfc_issue_fabric_iocb(struct lpfc_hba * > ready = atomic_read(&phba->fabric_iocb_count) == 0 && > !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); > >+ if (ready) >+ /* Increment fabric iocb count to hold the position */ >+ atomic_inc(&phba->fabric_iocb_count); > spin_unlock_irqrestore(&phba->hbalock, iflags); > if (ready) { > iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; >@@ -4564,7 +5390,6 @@ lpfc_issue_fabric_iocb(struct lpfc_hba * > "Fabric sched2: ste:x%x", > iocb->vport->port_state, 0, 0); > >- atomic_inc(&phba->fabric_iocb_count); > ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); > > if (ret == IOCB_ERROR) { >@@ -4695,3 +5520,183 @@ void lpfc_fabric_abort_flogi(struct lpfc > } > > >+static void >+lpfc_cmpl_els_auth(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, >+ struct lpfc_iocbq *rspiocb) >+{ >+ IOCB_t *irsp = &rspiocb->iocb; >+ struct lpfc_vport *vport = cmdiocb->vport; >+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; >+ >+ /* Check to see if link went down during discovery */ >+ if (lpfc_els_chk_latt(vport)) { >+ vport->auth.auth_msg_state = LPFC_AUTH_NONE; >+ lpfc_els_free_iocb(phba, cmdiocb); >+ return; >+ } >+ >+ if (irsp->ulpStatus) { >+ if (irsp->ulpStatus == IOSTAT_LS_RJT) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >+ "1043 Authentication LS_RJT\n"); >+ } >+ /* Check for retry */ >+ if (!lpfc_els_retry(phba, cmdiocb, rspiocb)) { >+ if (irsp->ulpStatus != IOSTAT_LS_RJT) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >+ "1045 Issue AUTH_NEG failed." >+ "Status:%x\n", >+ irsp->ulpStatus); >+ } >+ if (vport->auth.auth_mode == FC_AUTHMODE_ACTIVE) { >+ lpfc_can_disctmo(vport); >+ lpfc_port_auth_failed(ndlp); >+ } >+ } >+ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && >+ (phba->link_state != LPFC_CLEAR_LA)) >+ lpfc_issue_clear_la(phba, vport); >+ lpfc_els_free_iocb(phba, cmdiocb); >+ return; >+ } >+ >+ if (vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS || >+ vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS_REPLY) { >+ >+ if (vport->auth.challenge) >+ kfree(vport->auth.challenge); >+ vport->auth.challenge = NULL; >+ vport->auth.challenge_len = 0; >+ if (vport->auth.dh_pub_key) >+ kfree(vport->auth.dh_pub_key); >+ vport->auth.dh_pub_key = NULL; >+ vport->auth.dh_pub_key_len = 0; >+ >+ if (vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS) { >+ if (vport->auth.auth_state != LPFC_AUTH_SUCCESS) { >+ lpfc_printf_vlog(vport, KERN_WARNING, >+ LOG_SECURITY, "1046 " >+ "Authentication Successful\n"); >+ vport->auth.auth_state = LPFC_AUTH_SUCCESS; >+ lpfc_start_discovery(vport); >+ } else { >+ lpfc_printf_vlog(vport, KERN_WARNING, >+ LOG_SECURITY, >+ "1047 Re-Authentication" >+ " Successful\n"); >+ } >+ } >+ /* restart authentication timer */ >+ vport->auth.last_auth = jiffies; >+ if (vport->auth.reauth_interval) >+ mod_timer(&ndlp->nlp_reauth_tmr, >+ jiffies + >+ vport->auth.reauth_interval * 60 * HZ); >+ } >+ lpfc_els_free_iocb(phba, cmdiocb); >+} >+ >+int >+lpfc_issue_els_auth(struct lpfc_vport *vport, >+ struct lpfc_nodelist *ndlp, >+ uint8_t message_code, >+ uint8_t *payload, >+ uint32_t payload_len) >+{ >+ struct lpfc_hba *phba = vport->phba; >+ struct lpfc_iocbq *elsiocb; >+ struct lpfc_auth_message *authreq; >+ >+ elsiocb = lpfc_prep_els_iocb(vport, 1, >+ sizeof(struct lpfc_auth_message) + payload_len, >+ 0, ndlp, ndlp->nlp_DID, ELS_CMD_AUTH); >+ >+ if (!elsiocb) >+ return 1; >+ authreq = (struct lpfc_auth_message *) >+ (((struct lpfc_dmabuf *) elsiocb->context2)->virt); >+ authreq->command_code = ELS_CMD_AUTH_BYTE; >+ authreq->flags = 0; >+ authreq->message_code = message_code; >+ authreq->protocol_ver = AUTH_VERSION; >+ authreq->message_len = cpu_to_be32(payload_len); >+ authreq->trans_id = cpu_to_be32(vport->auth.trans_id); >+ memcpy(authreq->data, payload, payload_len); >+ >+ elsiocb->iocb_cmpl = lpfc_cmpl_els_auth; >+ >+ if (lpfc_sli_issue_iocb(phba, &phba->sli.ring[LPFC_ELS_RING], >+ elsiocb, 0) == IOCB_ERROR) { >+ lpfc_els_free_iocb(phba, elsiocb); >+ return 1; >+ } >+ >+ return 0; >+} >+ >+static void >+lpfc_cmpl_els_auth_reject(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, >+ struct lpfc_iocbq *rspiocb) >+{ >+ struct lpfc_vport *vport = cmdiocb->vport; >+ IOCB_t *irsp = &rspiocb->iocb; >+ >+ if (irsp->ulpStatus) { >+ /* Check for retry */ >+ if (!lpfc_els_retry(phba, cmdiocb, rspiocb)) { >+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS, >+ "1048 Issue AUTH_REJECT failed.\n"); >+ } >+ } >+ else >+ vport->port_state = LPFC_VPORT_UNKNOWN; >+ >+ lpfc_els_free_iocb(phba, cmdiocb); >+} >+ >+int >+lpfc_issue_els_auth_reject(struct lpfc_vport *vport, >+ struct lpfc_nodelist *ndlp, >+ uint8_t reason, uint8_t explanation) >+{ >+ struct lpfc_hba *phba = vport->phba; >+ struct lpfc_iocbq *elsiocb; >+ struct lpfc_sli_ring *pring; >+ struct lpfc_sli *psli; >+ struct lpfc_auth_message *authreq; >+ struct lpfc_auth_reject *reject; >+ >+ psli = &phba->sli; >+ pring = &psli->ring[LPFC_ELS_RING]; >+ >+ vport->auth.auth_msg_state = LPFC_AUTH_REJECT; >+ >+ elsiocb = lpfc_prep_els_iocb(vport, 1, sizeof(struct lpfc_auth_message) >+ + sizeof(struct lpfc_auth_reject), 0, ndlp, >+ ndlp->nlp_DID, ELS_CMD_AUTH); >+ >+ if (!elsiocb) >+ return 1; >+ >+ authreq = (struct lpfc_auth_message *) >+ (((struct lpfc_dmabuf *) elsiocb->context2)->virt); >+ authreq->command_code = ELS_CMD_AUTH_BYTE; >+ authreq->flags = 0; >+ authreq->message_code = AUTH_REJECT; >+ authreq->protocol_ver = AUTH_VERSION; >+ reject = (struct lpfc_auth_reject *)authreq->data; >+ memset(reject, 0, sizeof(struct lpfc_auth_reject)); >+ reject->reason = reason; >+ reject->explanation = explanation; >+ >+ authreq->message_len = cpu_to_be32(sizeof(struct lpfc_auth_reject)); >+ authreq->trans_id = cpu_to_be32(vport->auth.trans_id); >+ elsiocb->iocb_cmpl = lpfc_cmpl_els_auth_reject; >+ >+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { >+ lpfc_els_free_iocb(phba, elsiocb); >+ return 1; >+ } >+ >+ return 0; >+} >diff -urpN a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h >--- a/drivers/scsi/lpfc/lpfc.h 2008-09-05 17:47:41.617245000 -0400 >+++ b/drivers/scsi/lpfc/lpfc.h 2008-09-05 17:47:49.796876000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * Portions Copyright (C) 2004-2005 Christoph Hellwig * >@@ -23,15 +23,17 @@ > > struct lpfc_sli2_slim; > >-#define LPFC_MAX_TARGET 256 /* max number of targets supported */ >+#define LPFC_MAX_TARGET 4096 /* max number of targets supported */ > #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els > requests */ > #define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact > the NameServer before giving up. */ > #define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ >-#define LPFC_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ >+#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ >+#define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */ > #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ > #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ >+#define LPFC_VNAME_LEN 100 /* vport symbolic name length */ > > /* > * Following time intervals are used of adjusting SCSI device >@@ -58,6 +60,9 @@ struct lpfc_sli2_slim; > > #define MAX_HBAEVT 32 > >+/* lpfc wait event data ready flag */ >+#define LPFC_DATA_READY (1<<0) >+ > enum lpfc_polling_flags { > ENABLE_FCP_RING_POLLING = 0x1, > DISABLE_FCP_RING_INT = 0x2 >@@ -68,6 +73,7 @@ struct lpfc_dmabuf { > struct list_head list; > void *virt; /* virtual address ptr */ > dma_addr_t phys; /* mapped address */ >+ uint32_t buffer_tag; /* used for tagged queue ring */ > }; > > struct lpfc_dma_pool { >@@ -198,18 +204,91 @@ struct lpfc_stats { > uint32_t fcpLocalErr; > }; > >+struct lpfc_timedout_iocb_ctxt { >+ struct lpfc_iocbq *rspiocbq; >+ struct lpfc_dmabuf *mp; >+ struct lpfc_dmabuf *bmp; >+ struct lpfc_scsi_buf *lpfc_cmd; >+ struct lpfc_dmabufext *outdmp; >+ struct lpfc_dmabufext *indmp; >+}; >+ >+struct lpfc_dmabufext { >+ struct lpfc_dmabuf dma; >+ uint32_t size; >+ uint32_t flag; >+}; >+ >+ > enum sysfs_mbox_state { > SMBOX_IDLE, > SMBOX_WRITING, >- SMBOX_READING >+ SMBOX_WRITING_MBEXT, >+ SMBOX_READING_MBEXT, >+ SMBOX_READING, >+ SMBOX_WRITING_BUFF, >+ SMBOX_READING_BUFF >+}; >+ >+struct lpfc_sysfs_mbox_data { >+ MAILBOX_t mbox; >+ uint32_t mboffset; >+ uint32_t in_ext_wlen; >+ uint32_t out_ext_wlen; > }; > > struct lpfc_sysfs_mbox { >+ struct lpfc_sysfs_mbox_data mbox_data; > enum sysfs_mbox_state state; > size_t offset; > struct lpfcMboxq * mbox; >+ /* process id of the mgmt application */ >+ pid_t pid; >+ struct list_head list; >+ uint8_t * mbext; >+ uint32_t extoff; >+ struct lpfc_dmabuf * txmit_buff; >+ struct lpfc_dmabuf * rcv_buff; >+}; >+#define MENLO_DID 0x0000FC0E >+ >+enum sysfs_menlo_state { >+ SMENLO_IDLE, >+ SMENLO_WRITING, >+ SMENLO_WRITING_MBEXT, >+ SMENLO_READING >+}; >+ >+struct lpfc_sysfs_menlo_hdr { >+ uint32_t cmd; >+ uint32_t cmdsize; >+ uint32_t rspsize; >+}; >+ >+struct lpfc_menlo_genreq64 { >+ size_t offset; >+ struct lpfc_iocbq *cmdiocbq; >+ struct lpfc_iocbq *rspiocbq; >+ struct lpfc_dmabuf *bmp; >+ struct lpfc_dmabufext *indmp; >+ struct ulp_bde64 *cmdbpl; >+ struct lpfc_dmabufext *outdmp; >+ uint32_t timeout; >+ struct list_head inhead; >+ struct list_head outhead; >+}; >+ >+struct lpfc_sysfs_menlo { >+ enum sysfs_menlo_state state; >+ /* process id of the mgmt application */ >+ struct lpfc_sysfs_menlo_hdr cmdhdr; >+ struct lpfc_menlo_genreq64 cr; >+ struct lpfc_menlo_genreq64 cx; >+ pid_t pid; >+ struct list_head list; > }; > >+ > struct lpfc_hba; > > >@@ -242,6 +321,52 @@ enum hba_state { > LPFC_HBA_ERROR = -1 > }; > >+enum auth_state { >+ LPFC_AUTH_UNKNOWN = 0, >+ LPFC_AUTH_SUCCESS = 1, >+ LPFC_AUTH_FAIL = 2, >+}; >+enum auth_msg_state { >+ LPFC_AUTH_NONE = 0, >+ LPFC_AUTH_REJECT = 1, /* Sent a Reject */ >+ LPFC_AUTH_NEGOTIATE = 2, /* Auth Negotiate */ >+ LPFC_DHCHAP_CHALLENGE = 3, /* Challenge */ >+ LPFC_DHCHAP_REPLY = 4, /* Reply */ >+ LPFC_DHCHAP_SUCCESS_REPLY = 5, /* Success with Reply */ >+ LPFC_DHCHAP_SUCCESS = 6, /* Success */ >+ LPFC_AUTH_DONE = 7, >+}; >+ >+struct lpfc_auth { >+ uint8_t auth_mode; >+ uint8_t bidirectional; >+ uint8_t hash_priority[4]; >+ uint32_t hash_len; >+ uint8_t dh_group_priority[8]; >+ uint32_t dh_group_len; >+ uint32_t reauth_interval; >+ >+ uint8_t security_active; >+ enum auth_state auth_state; >+ enum auth_msg_state auth_msg_state; >+ uint32_t trans_id; /* current transaction id. Can be set >+ by incomming transactions as well */ >+ uint32_t group_id; >+ uint32_t hash_id; >+ uint32_t direction; >+#define AUTH_DIRECTION_NONE 0 >+#define AUTH_DIRECTION_REMOTE 0x1 >+#define AUTH_DIRECTION_LOCAL 0x2 >+#define AUTH_DIRECTION_BIDI (AUTH_DIRECTION_LOCAL|AUTH_DIRECTION_REMOTE) >+ >+ uint8_t *challenge; >+ uint32_t challenge_len; >+ uint8_t *dh_pub_key; >+ uint32_t dh_pub_key_len; >+ >+ unsigned long last_auth; >+}; >+ > struct lpfc_vport { > struct list_head listentry; > struct lpfc_hba *phba; >@@ -266,16 +391,21 @@ struct lpfc_vport { > #define FC_NLP_MORE 0x40 /* More node to process in node tbl */ > #define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */ > #define FC_FABRIC 0x100 /* We are fabric attached */ >-#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */ > #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ > #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ > #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ > #define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ > #define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */ >-#define FC_RFF_NOT_SUPPORTED 0x40000 /* RFF_ID was rejected by switch */ > #define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */ > #define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */ > >+ uint32_t ct_flags; >+#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ >+#define FC_CT_RNN_ID 0x2 /* RNN_ID accepted by switch */ >+#define FC_CT_RSNN_NN 0x4 /* RSNN_NN accepted by switch */ >+#define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */ >+#define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */ >+ > struct list_head fc_nodes; > > /* Keep counters for the number of entries in each list. */ >@@ -299,6 +429,7 @@ struct lpfc_vport { > > uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ > uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ >+ uint32_t fc_rscn_flush; /* flag use of fc_rscn_id_list */ > struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN]; > struct lpfc_name fc_nodename; /* fc nodename */ > struct lpfc_name fc_portname; /* fc portname */ >@@ -331,6 +462,14 @@ struct lpfc_vport { > #define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */ > char *vname; /* Application assigned name */ > >+ /* Fields used for accessing auth service */ >+ struct lpfc_auth auth; >+ uint32_t sc_tran_id; >+ struct list_head sc_response_wait_queue; >+ struct list_head sc_users; >+ struct work_struct sc_online_work; >+ struct work_struct sc_offline_work; >+ > /* Vport Config Parameters */ > uint32_t cfg_scan_down; > uint32_t cfg_lun_queue_depth; >@@ -344,11 +483,12 @@ struct lpfc_vport { > uint32_t cfg_discovery_threads; > uint32_t cfg_log_verbose; > uint32_t cfg_max_luns; >+ uint32_t cfg_enable_da_id; >+ uint32_t cfg_enable_auth; > > uint32_t dev_loss_tmo_changed; > > struct fc_vport *fc_vport; >- > #ifdef CONFIG_LPFC_DEBUG_FS > struct dentry *debug_disc_trc; > struct dentry *debug_nodelist; >@@ -360,6 +500,7 @@ struct lpfc_vport { > > struct hbq_s { > uint16_t entry_count; /* Current number of HBQ slots */ >+ uint16_t buffer_count; /* Current number of buffers posted */ > uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */ > uint32_t hbqPutIdx; /* HBQ slot to use */ > uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */ >@@ -377,6 +518,18 @@ struct hbq_s { > #define LPFC_ELS_HBQ 0 > #define LPFC_EXTRA_HBQ 1 > >+enum hba_temp_state { >+ HBA_NORMAL_TEMP, >+ HBA_OVER_TEMP >+}; >+ >+enum intr_type_t { >+ NONE = 0, >+ INTx, >+ MSI, >+ MSIX, >+}; >+ > struct lpfc_hba { > struct lpfc_sli sli; > uint32_t sli_rev; /* SLI2 or SLI3 */ >@@ -394,7 +547,7 @@ struct lpfc_hba { > /* This flag is set while issuing */ > /* INIT_LINK mailbox command */ > #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ >-#define LS_IGNORE_ERATT 0x3 /* intr handler should ignore ERATT */ >+#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ > > struct lpfc_sli2_slim *slim2p; > struct lpfc_dmabuf hbqslimp; >@@ -403,15 +556,10 @@ struct lpfc_hba { > > uint16_t pci_cfg_value; > >- uint8_t work_found; >-#define LPFC_MAX_WORKER_ITERATION 4 >- > uint8_t fc_linkspeed; /* Link speed after last READ_LA */ > > uint32_t fc_eventTag; /* event tag for link attention */ > >- >- struct timer_list fc_estabtmo; /* link establishment timer */ > /* These fields used to be binfo */ > uint32_t fc_pref_DID; /* preferred D_ID */ > uint8_t fc_pref_ALPA; /* preferred AL_PA */ >@@ -457,7 +605,9 @@ struct lpfc_hba { > uint64_t cfg_soft_wwnn; > uint64_t cfg_soft_wwpn; > uint32_t cfg_hba_queue_depth; >- >+ uint32_t cfg_enable_hba_reset; >+ uint32_t cfg_enable_hba_heartbeat; >+ uint32_t cfg_pci_max_read; > > lpfc_vpd_t vpd; /* vital product data */ > >@@ -468,9 +618,12 @@ struct lpfc_hba { > uint32_t work_hs; /* HS stored in case of ERRAT */ > uint32_t work_status[2]; /* Extra status from SLIM */ > >- wait_queue_head_t *work_wait; >+ wait_queue_head_t work_waitq; > struct task_struct *worker_thread; >+ long data_flags; > >+ uint32_t hbq_in_use; /* HBQs in use flag */ >+ struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ > uint32_t hbq_count; /* Count of configured HBQs */ > struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ > >@@ -519,7 +672,9 @@ struct lpfc_hba { > uint64_t fc4OutputRequests; > uint64_t fc4ControlRequests; > >- struct lpfc_sysfs_mbox sysfs_mbox; >+ /* List of mailbox commands issued through sysfs */ >+ struct list_head sysfs_mbox_list; >+ struct list_head sysfs_menlo_list; > > /* fastpath list. */ > spinlock_t scsi_buf_list_lock; >@@ -539,13 +694,15 @@ struct lpfc_hba { > mempool_t *nlp_mem_pool; > > struct fc_host_statistics link_stats; >- uint8_t using_msi; >+ enum intr_type_t intr_type; >+ struct msix_entry msix_entries[1]; >+ struct lpfcdfc_host *dfc_host; > > struct list_head port_list; > struct lpfc_vport *pport; /* physical lpfc_vport pointer */ > uint16_t max_vpi; /* Maximum virtual nports */ >-#define LPFC_MAX_VPI 100 /* Max number of VPI supported */ >-#define LPFC_MAX_VPORTS (LPFC_MAX_VPI+1)/* Max number of VPorts supported */ >+#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ >+#define LPFC_INTR_VPI 100 /* Intermediate VPI supported */ > unsigned long *vpi_bmask; /* vpi allocation table */ > > /* Data structure used by fabric iocb scheduler */ >@@ -563,16 +720,34 @@ struct lpfc_hba { > struct dentry *hba_debugfs_root; > atomic_t debugfs_vport_count; > struct dentry *debug_hbqinfo; >- struct dentry *debug_dumpslim; >+ struct dentry *debug_dumpHostSlim; >+ struct dentry *debug_dumpHBASlim; > struct dentry *debug_slow_ring_trc; > struct lpfc_debugfs_trc *slow_ring_trc; > atomic_t slow_ring_trc_cnt; > #endif > >+ /* Used for deferred freeing of ELS data buffers */ >+ struct list_head elsbuf; >+ int elsbuf_cnt; >+ int elsbuf_prev_cnt; >+ >+ uint8_t temp_sensor_support; > /* Fields used for heart beat. */ > unsigned long last_completion_time; > struct timer_list hb_tmofunc; > uint8_t hb_outstanding; >+ enum hba_temp_state over_temp_state; >+ /* ndlp reference management */ >+ spinlock_t ndlp_lock; >+ /* >+ * Following bit will be set for all buffer tags which are not >+ * associated with any HBQ. >+ */ >+#define QUE_BUFTAG_BIT (1<<31) >+ uint32_t buffer_tag_count; >+ int wait_4_mlo_maint_flg; >+ wait_queue_head_t wait_4_mlo_m_q; > }; > > static inline struct Scsi_Host * >@@ -598,5 +773,26 @@ lpfc_is_link_up(struct lpfc_hba *phba) > phba->link_state == LPFC_HBA_READY; > } > >-#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ >+static inline void >+lpfc_worker_wake_up(struct lpfc_hba *phba) >+{ >+ /* Set the lpfc data pending flag */ >+ set_bit(LPFC_DATA_READY, &phba->data_flags); >+ >+ /* Wake up worker thread */ >+ wake_up(&phba->work_waitq); >+ return; >+} > >+#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ >+#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature >+ event */ >+ >+struct temp_event { >+ uint32_t event_type; >+ uint32_t event_code; >+ uint32_t data; >+}; >+#define LPFC_CRIT_TEMP 0x1 >+#define LPFC_THRESHOLD_TEMP 0x2 >+#define LPFC_NORMAL_TEMP 0x3 >diff -urpN a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c >--- a/drivers/scsi/lpfc/lpfc_hbadisc.c 2008-09-05 17:47:41.674247000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c 2008-09-05 17:47:49.693875000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * Portions Copyright (C) 2004-2005 Christoph Hellwig * >@@ -56,8 +56,53 @@ static uint8_t lpfcAlpaArray[] = { > 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 > }; > >+extern void lpfc_check_menlo_cfg(struct lpfc_hba *phba); >+ > static void lpfc_disc_timeout_handler(struct lpfc_vport *); > >+ >+void >+lpfc_start_discovery(struct lpfc_vport *vport) >+{ >+ struct lpfc_hba *phba = vport->phba; >+ struct lpfc_vport **vports; >+ int i; >+ >+ if (vport->auth.security_active && >+ vport->auth.auth_state != LPFC_AUTH_SUCCESS) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, >+ "0285 Authentication not complete.\n"); >+ return; >+ } >+ if (vport->port_type == LPFC_NPIV_PORT) { >+ lpfc_do_scr_ns_plogi(phba, vport); >+ return; >+ } >+ >+ vports = lpfc_create_vport_work_array(phba); >+ if (vports != NULL) >+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { >+ if (vports[i]->port_type == LPFC_PHYSICAL_PORT) >+ continue; >+ if (phba->fc_topology == TOPOLOGY_LOOP) { >+ lpfc_vport_set_state(vports[i], >+ FC_VPORT_LINKDOWN); >+ continue; >+ } >+ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) >+ lpfc_initial_fdisc(vports[i]); >+ else { >+ lpfc_vport_set_state(vports[i], >+ FC_VPORT_NO_FABRIC_SUPP); >+ lpfc_printf_vlog(vports[i], KERN_ERR, LOG_ELS, >+ "0259 No NPIV Fabric " >+ "support\n"); >+ } >+ } >+ lpfc_destroy_vport_work_array(phba, vports); >+ lpfc_do_scr_ns_plogi(phba, vport); >+} >+ > void > lpfc_terminate_rport_io(struct fc_rport *rport) > { >@@ -68,7 +113,7 @@ lpfc_terminate_rport_io(struct fc_rport > rdata = rport->dd_data; > ndlp = rdata->pnode; > >- if (!ndlp) { >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { > if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) > printk(KERN_ERR "Cannot find remote node" > " to terminate I/O Data x%x\n", >@@ -107,20 +152,14 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport > struct lpfc_nodelist * ndlp; > struct lpfc_vport *vport; > struct lpfc_hba *phba; >- struct completion devloss_compl; > struct lpfc_work_evt *evtp; >+ int put_node; >+ int put_rport; > > rdata = rport->dd_data; > ndlp = rdata->pnode; >- >- if (!ndlp) { >- if (rport->scsi_target_id != -1) { >- printk(KERN_ERR "Cannot find remote node" >- " for rport in dev_loss_tmo_callbk x%x\n", >- rport->port_id); >- } >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) > return; >- } > > vport = ndlp->vport; > phba = vport->phba; >@@ -129,24 +168,42 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport > "rport devlosscb: sid:x%x did:x%x flg:x%x", > ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); > >- init_completion(&devloss_compl); >+ /* Don't defer this if we are in the process of deleting the vport >+ * or unloading the driver. The unload will cleanup the node >+ * appropriately we just need to cleanup the ndlp rport info here. >+ */ >+ if (vport->load_flag & FC_UNLOADING) { >+ put_node = rdata->pnode != NULL; >+ put_rport = ndlp->rport != NULL; >+ rdata->pnode = NULL; >+ ndlp->rport = NULL; >+ if (put_node) >+ lpfc_nlp_put(ndlp); >+ if (put_rport) >+ put_device(&rport->dev); >+ return; >+ } >+ >+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) >+ return; >+ > evtp = &ndlp->dev_loss_evt; > > if (!list_empty(&evtp->evt_listp)) > return; > > spin_lock_irq(&phba->hbalock); >- evtp->evt_arg1 = ndlp; >- evtp->evt_arg2 = &devloss_compl; >- evtp->evt = LPFC_EVT_DEV_LOSS; >- list_add_tail(&evtp->evt_listp, &phba->work_list); >- if (phba->work_wait) >- wake_up(phba->work_wait); >- >+ /* We need to hold the node by incrementing the reference >+ * count until this queued work is done >+ */ >+ evtp->evt_arg1 = lpfc_nlp_get(ndlp); >+ if (evtp->evt_arg1) { >+ evtp->evt = LPFC_EVT_DEV_LOSS; >+ list_add_tail(&evtp->evt_listp, &phba->work_list); >+ lpfc_worker_wake_up(phba); >+ } > spin_unlock_irq(&phba->hbalock); > >- wait_for_completion(&devloss_compl); >- > return; > } > >@@ -162,6 +219,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_no > struct lpfc_vport *vport; > struct lpfc_hba *phba; > uint8_t *name; >+ int put_node; >+ int put_rport; > int warn_on = 0; > > rport = ndlp->rport; >@@ -178,14 +237,60 @@ lpfc_dev_loss_tmo_handler(struct lpfc_no > "rport devlosstmo:did:x%x type:x%x id:x%x", > ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); > >- if (!(vport->load_flag & FC_UNLOADING) && >- ndlp->nlp_state == NLP_STE_MAPPED_NODE) >+ /* Don't defer this if we are in the process of deleting the vport >+ * or unloading the driver. The unload will cleanup the node >+ * appropriately we just need to cleanup the ndlp rport info here. >+ */ >+ if (vport->load_flag & FC_UNLOADING) { >+ if (ndlp->nlp_sid != NLP_NO_SID) { >+ /* flush the target */ >+ lpfc_sli_abort_iocb(vport, >+ &phba->sli.ring[phba->sli.fcp_ring], >+ ndlp->nlp_sid, 0, LPFC_CTX_TGT); >+ } >+ put_node = rdata->pnode != NULL; >+ put_rport = ndlp->rport != NULL; >+ rdata->pnode = NULL; >+ ndlp->rport = NULL; >+ if (put_node) >+ lpfc_nlp_put(ndlp); >+ if (put_rport) >+ put_device(&rport->dev); > return; >+ } > >- if (ndlp->nlp_type & NLP_FABRIC) { >- int put_node; >- int put_rport; >+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { >+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, >+ "0284 Devloss timeout Ignored on " >+ "WWPN %x:%x:%x:%x:%x:%x:%x:%x " >+ "NPort x%x\n", >+ *name, *(name+1), *(name+2), *(name+3), >+ *(name+4), *(name+5), *(name+6), *(name+7), >+ ndlp->nlp_DID); >+ return; >+ } >+ /* >+ * This is a workaround for an issue seen in the transport. If the >+ * driver removes, then adds, and then removes the rport again whithin >+ * devloss_tmo the transport may call devloss_tmo for the original >+ * remove even though it should have reset the timer. >+ */ >+ if (ndlp->nlp_sid != NLP_NO_SID && >+ time_after(ndlp->unreg_time + >+ msecs_to_jiffies(vport->cfg_devloss_tmo * 1000), >+ jiffies)) { >+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, >+ "0283 Devloss timeout Skipped on " >+ "WWPN %x:%x:%x:%x:%x:%x:%x:%x " >+ "NPort x%x Time %d\n", >+ *name, *(name+1), *(name+2), *(name+3), >+ *(name+4), *(name+5), *(name+6), *(name+7), >+ ndlp->nlp_DID, >+ jiffies_to_msecs(jiffies-ndlp->unreg_time)); >+ return; >+ } > >+ if (ndlp->nlp_type & NLP_FABRIC) { > /* We will clean up these Nodes in linkup */ > put_node = rdata->pnode != NULL; > put_rport = ndlp->rport != NULL; >@@ -198,6 +303,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_no > return; > } > >+ > if (ndlp->nlp_sid != NLP_NO_SID) { > warn_on = 1; > /* flush the target */ >@@ -210,8 +316,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_no > if (warn_on) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, > "0203 Devloss timeout on " >- "WWPN %x:%x:%x:%x:%x:%x:%x:%x " >- "NPort x%x Data: x%x x%x x%x\n", >+ "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " >+ "NPort x%06x Data: x%x x%x x%x\n", > *name, *(name+1), *(name+2), *(name+3), > *(name+4), *(name+5), *(name+6), *(name+7), > ndlp->nlp_DID, ndlp->nlp_flag, >@@ -219,40 +325,28 @@ lpfc_dev_loss_tmo_handler(struct lpfc_no > } else { > lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, > "0204 Devloss timeout on " >- "WWPN %x:%x:%x:%x:%x:%x:%x:%x " >- "NPort x%x Data: x%x x%x x%x\n", >+ "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " >+ "NPort x%06x Data: x%x x%x x%x\n", > *name, *(name+1), *(name+2), *(name+3), > *(name+4), *(name+5), *(name+6), *(name+7), > ndlp->nlp_DID, ndlp->nlp_flag, > ndlp->nlp_state, ndlp->nlp_rpi); > } > >+ put_node = rdata->pnode != NULL; >+ put_rport = ndlp->rport != NULL; >+ rdata->pnode = NULL; >+ ndlp->rport = NULL; >+ if (put_node) >+ lpfc_nlp_put(ndlp); >+ if (put_rport) >+ put_device(&rport->dev); >+ > if (!(vport->load_flag & FC_UNLOADING) && > !(ndlp->nlp_flag & NLP_DELAY_TMO) && > !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && > (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) > lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); >- else { >- int put_node; >- int put_rport; >- >- put_node = rdata->pnode != NULL; >- put_rport = ndlp->rport != NULL; >- rdata->pnode = NULL; >- ndlp->rport = NULL; >- if (put_node) >- lpfc_nlp_put(ndlp); >- if (put_rport) >- put_device(&rport->dev); >- } >-} >- >- >-void >-lpfc_worker_wake_up(struct lpfc_hba *phba) >-{ >- wake_up(phba->work_wait); >- return; > } > > static void >@@ -274,31 +368,54 @@ lpfc_work_list_done(struct lpfc_hba *phb > free_evt = 0; /* evt is part of ndlp */ > ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); > vport = ndlp->vport; >- if (!vport) >+ if (!vport) { >+ /* decrement the node reference count held >+ * for this queued work >+ */ >+ lpfc_nlp_put(ndlp); > break; >- >+ } > lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, > "rport devlossdly:did:x%x flg:x%x", > ndlp->nlp_DID, ndlp->nlp_flag, 0); > > if (!(vport->load_flag & FC_UNLOADING) && > !(ndlp->nlp_flag & NLP_DELAY_TMO) && >- !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { >+ !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && >+ (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) { > lpfc_disc_state_machine(vport, ndlp, NULL, > NLP_EVT_DEVICE_RM); > } >+ /* decrement the node reference count held >+ * for this queued work >+ */ >+ lpfc_nlp_put(ndlp); > break; > case LPFC_EVT_ELS_RETRY: > ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); > lpfc_els_retry_delay_handler(ndlp); > free_evt = 0; /* evt is part of ndlp */ >+ /* decrement the node reference count held >+ * for this queued work >+ */ >+ lpfc_nlp_put(ndlp); >+ break; >+ case LPFC_EVT_REAUTH: >+ ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); >+ lpfc_reauthentication_handler(ndlp); >+ free_evt = 0; /* evt is part of ndlp */ >+ /* decrement the node reference count held >+ * for this queued work >+ */ >+ lpfc_nlp_put(ndlp); > break; > case LPFC_EVT_DEV_LOSS: > ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); >- lpfc_nlp_get(ndlp); > lpfc_dev_loss_tmo_handler(ndlp); > free_evt = 0; >- complete((struct completion *)(evtp->evt_arg2)); >+ /* decrement the node reference count held for >+ * this queued work >+ */ > lpfc_nlp_put(ndlp); > break; > case LPFC_EVT_ONLINE: >@@ -373,7 +490,7 @@ lpfc_work_done(struct lpfc_hba *phba) > lpfc_handle_latt(phba); > vports = lpfc_create_vport_work_array(phba); > if (vports != NULL) >- for(i = 0; i < LPFC_MAX_VPORTS; i++) { >+ for(i = 0; i <= phba->max_vpi; i++) { > /* > * We could have no vports in array if unloading, so if > * this happens then just use the pport >@@ -384,7 +501,10 @@ lpfc_work_done(struct lpfc_hba *phba) > vport = vports[i]; > if (vport == NULL) > break; >+ spin_lock_irq(&vport->work_port_lock); > work_port_events = vport->work_port_events; >+ vport->work_port_events &= ~work_port_events; >+ spin_unlock_irq(&vport->work_port_lock); > if (work_port_events & WORKER_DISC_TMO) > lpfc_disc_timeout_handler(vport); > if (work_port_events & WORKER_ELS_TMO) >@@ -401,24 +521,23 @@ lpfc_work_done(struct lpfc_hba *phba) > lpfc_ramp_down_queue_handler(phba); > if (work_port_events & WORKER_RAMP_UP_QUEUE) > lpfc_ramp_up_queue_handler(phba); >- spin_lock_irq(&vport->work_port_lock); >- vport->work_port_events &= ~work_port_events; >- spin_unlock_irq(&vport->work_port_lock); > } >- lpfc_destroy_vport_work_array(vports); >+ lpfc_destroy_vport_work_array(phba, vports); > > pring = &phba->sli.ring[LPFC_ELS_RING]; > status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); > status >>= (4*LPFC_ELS_RING); > if ((status & HA_RXMASK) > || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { >- if (pring->flag & LPFC_STOP_IOCB_MASK) { >+ if (pring->flag & LPFC_STOP_IOCB_EVENT) { > pring->flag |= LPFC_DEFERRED_RING_EVENT; >+ /* Set the lpfc data pending flag */ >+ set_bit(LPFC_DATA_READY, &phba->data_flags); > } else { >+ pring->flag &= ~LPFC_DEFERRED_RING_EVENT; > lpfc_sli_handle_slow_ring_event(phba, pring, > (status & > HA_RXMASK)); >- pring->flag &= ~LPFC_DEFERRED_RING_EVENT; > } > /* > * Turn on Ring interrupts >@@ -444,67 +563,29 @@ lpfc_work_done(struct lpfc_hba *phba) > lpfc_work_list_done(phba); > } > >-static int >-check_work_wait_done(struct lpfc_hba *phba) >-{ >- struct lpfc_vport *vport; >- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; >- int rc = 0; >- >- spin_lock_irq(&phba->hbalock); >- list_for_each_entry(vport, &phba->port_list, listentry) { >- if (vport->work_port_events) { >- rc = 1; >- break; >- } >- } >- if (rc || phba->work_ha || (!list_empty(&phba->work_list)) || >- kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) { >- rc = 1; >- phba->work_found++; >- } else >- phba->work_found = 0; >- spin_unlock_irq(&phba->hbalock); >- return rc; >-} >- >- > int > lpfc_do_work(void *p) > { > struct lpfc_hba *phba = p; > int rc; >- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq); > > set_user_nice(current, -20); >- phba->work_wait = &work_waitq; >- phba->work_found = 0; >+ phba->data_flags = 0; > > while (1) { >- >- rc = wait_event_interruptible(work_waitq, >- check_work_wait_done(phba)); >- >+ /* wait and check worker queue activities */ >+ rc = wait_event_interruptible(phba->work_waitq, >+ (test_and_clear_bit(LPFC_DATA_READY, >+ &phba->data_flags) >+ || kthread_should_stop())); > BUG_ON(rc); > > if (kthread_should_stop()) > break; > >+ /* Attend pending lpfc data processing */ > lpfc_work_done(phba); >- >- /* If there is alot of slow ring work, like during link up >- * check_work_wait_done() may cause this thread to not give >- * up the CPU for very long periods of time. This may cause >- * soft lockups or other problems. To avoid these situations >- * give up the CPU here after LPFC_MAX_WORKER_ITERATION >- * consecutive iterations. >- */ >- if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) { >- phba->work_found = 0; >- schedule(); >- } > } >- phba->work_wait = NULL; > return 0; > } > >@@ -534,25 +615,32 @@ lpfc_workq_post_event(struct lpfc_hba *p > > spin_lock_irqsave(&phba->hbalock, flags); > list_add_tail(&evtp->evt_listp, &phba->work_list); >- if (phba->work_wait) >- lpfc_worker_wake_up(phba); > spin_unlock_irqrestore(&phba->hbalock, flags); > >+ lpfc_worker_wake_up(phba); >+ > return 1; > } > > void > lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) > { >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); > struct lpfc_hba *phba = vport->phba; > struct lpfc_nodelist *ndlp, *next_ndlp; > int rc; > > list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { >+ if (!NLP_CHK_NODE_ACT(ndlp)) >+ continue; > if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) > continue; >+ /* Stop re-authentication timer of all nodes. */ >+ del_timer_sync(&ndlp->nlp_reauth_tmr); > >- if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) >+ if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || >+ ((vport->port_type == LPFC_NPIV_PORT) && >+ (ndlp->nlp_DID == NameServer_DID))) > lpfc_unreg_rpi(vport, ndlp); > > /* Leave Fabric nodes alone on link down */ >@@ -564,15 +652,35 @@ lpfc_cleanup_rpis(struct lpfc_vport *vpo > : NLP_EVT_DEVICE_RECOVERY); > } > if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { >- lpfc_mbx_unreg_vpi(vport); >+ rc = lpfc_mbx_unreg_vpi(vport); >+ spin_lock_irq(shost->host_lock); > vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; >+ spin_unlock_irq(shost->host_lock); > } > } > >+/* >+ * This function can be called due to physical link failure >+ * or link authentication failure. >+ */ >+void >+lpfc_port_link_failure(struct lpfc_vport *vport) >+{ >+ /* Cleanup any outstanding RSCN activity */ >+ lpfc_els_flush_rscn(vport); >+ >+ /* Cleanup any outstanding ELS commands */ >+ lpfc_els_flush_cmd(vport); >+ >+ lpfc_cleanup_rpis(vport, 0); >+ >+ /* Turn off discovery timer if its running */ >+ lpfc_can_disctmo(vport); >+} >+ > static void > lpfc_linkdown_port(struct lpfc_vport *vport) > { >- struct lpfc_nodelist *ndlp, *next_ndlp; > struct Scsi_Host *shost = lpfc_shost_from_vport(vport); > > fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); >@@ -581,21 +689,25 @@ lpfc_linkdown_port(struct lpfc_vport *vp > "Link Down: state:x%x rtry:x%x flg:x%x", > vport->port_state, vport->fc_ns_retry, vport->fc_flag); > >- /* Cleanup any outstanding RSCN activity */ >- lpfc_els_flush_rscn(vport); >- >- /* Cleanup any outstanding ELS commands */ >- lpfc_els_flush_cmd(vport); >+ lpfc_port_link_failure(vport); > >- lpfc_cleanup_rpis(vport, 0); >+ vport->auth.auth_state = LPFC_AUTH_UNKNOWN; >+ vport->auth.auth_msg_state = LPFC_AUTH_NONE; >+} > >- /* free any ndlp's on unused list */ >- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) >- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) >- lpfc_drop_node(vport, ndlp); >+void >+lpfc_port_auth_failed(struct lpfc_nodelist *ndlp) >+{ >+ struct lpfc_vport *vport = ndlp->vport; > >- /* Turn off discovery timer if its running */ >- lpfc_can_disctmo(vport); >+ vport->auth.auth_state = LPFC_AUTH_FAIL; >+ vport->auth.auth_msg_state = LPFC_AUTH_NONE; >+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); >+ if (ndlp->nlp_type & NLP_FABRIC) { >+ lpfc_port_link_failure (vport); >+ lpfc_vport_set_state(vport, FC_VPORT_FAILED); >+ lpfc_issue_els_logo(vport, ndlp, 0); >+ } > } > > int >@@ -607,9 +719,8 @@ lpfc_linkdown(struct lpfc_hba *phba) > LPFC_MBOXQ_t *mb; > int i; > >- if (phba->link_state == LPFC_LINK_DOWN) { >+ if (phba->link_state == LPFC_LINK_DOWN) > return 0; >- } > spin_lock_irq(&phba->hbalock); > if (phba->link_state > LPFC_LINK_DOWN) { > phba->link_state = LPFC_LINK_DOWN; >@@ -618,18 +729,18 @@ lpfc_linkdown(struct lpfc_hba *phba) > spin_unlock_irq(&phba->hbalock); > vports = lpfc_create_vport_work_array(phba); > if (vports != NULL) >- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { >+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { > /* Issue a LINK DOWN event to all nodes */ > lpfc_linkdown_port(vports[i]); > } >- lpfc_destroy_vport_work_array(vports); >+ lpfc_destroy_vport_work_array(phba, vports); > /* Clean up any firmware default rpi's */ > mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); > if (mb) { > lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb); > mb->vport = vport; > mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; >- if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB)) >+ if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) > == MBX_NOT_FINISHED) { > mempool_free(mb, phba->mbox_mem_pool); > } >@@ -643,8 +754,7 @@ lpfc_linkdown(struct lpfc_hba *phba) > lpfc_config_link(phba, mb); > mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; > mb->vport = vport; >- if (lpfc_sli_issue_mbox(phba, mb, >- (MBX_NOWAIT | MBX_STOP_IOCB)) >+ if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) > == MBX_NOT_FINISHED) { > mempool_free(mb, phba->mbox_mem_pool); > } >@@ -663,20 +773,21 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vp > struct lpfc_nodelist *ndlp; > > list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { >+ if (!NLP_CHK_NODE_ACT(ndlp)) >+ continue; > if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) > continue; >- > if (ndlp->nlp_type & NLP_FABRIC) { >- /* On Linkup its safe to clean up the ndlp >- * from Fabric connections. >- */ >+ /* On Linkup its safe to clean up the ndlp >+ * from Fabric connections. >+ */ > if (ndlp->nlp_DID != Fabric_DID) > lpfc_unreg_rpi(vport, ndlp); > lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); > } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { >- /* Fail outstanding IO now since device is >- * marked for PLOGI. >- */ >+ /* Fail outstanding IO now since device is >+ * marked for PLOGI. >+ */ > lpfc_unreg_rpi(vport, ndlp); > } > } >@@ -686,7 +797,6 @@ static void > lpfc_linkup_port(struct lpfc_vport *vport) > { > struct Scsi_Host *shost = lpfc_shost_from_vport(vport); >- struct lpfc_nodelist *ndlp, *next_ndlp; > struct lpfc_hba *phba = vport->phba; > > if ((vport->load_flag & FC_UNLOADING) != 0) >@@ -713,11 +823,6 @@ lpfc_linkup_port(struct lpfc_vport *vpor > if (vport->fc_flag & FC_LBIT) > lpfc_linkup_cleanup_nodes(vport); > >- /* free any ndlp's in unused state */ >- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, >- nlp_listp) >- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) >- lpfc_drop_node(vport, ndlp); > } > > static int >@@ -734,9 +839,9 @@ lpfc_linkup(struct lpfc_hba *phba) > > vports = lpfc_create_vport_work_array(phba); > if (vports != NULL) >- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) >+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) > lpfc_linkup_port(vports[i]); >- lpfc_destroy_vport_work_array(vports); >+ lpfc_destroy_vport_work_array(phba, vports); > if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) > lpfc_issue_clear_la(phba, phba->pport); > >@@ -784,21 +889,9 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba * > writel(control, phba->HCregaddr); > readl(phba->HCregaddr); /* flush */ > spin_unlock_irq(&phba->hbalock); >+ mempool_free(pmb, phba->mbox_mem_pool); > return; > >- vport->num_disc_nodes = 0; >- /* go thru NPR nodes and issue ELS PLOGIs */ >- if (vport->fc_npr_cnt) >- lpfc_els_disc_plogi(vport); >- >- if (!vport->num_disc_nodes) { >- spin_lock_irq(shost->host_lock); >- vport->fc_flag &= ~FC_NDISC_ACTIVE; >- spin_unlock_irq(shost->host_lock); >- } >- >- vport->port_state = LPFC_VPORT_READY; >- > out: > /* Device Discovery completes */ > lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, >@@ -806,11 +899,9 @@ out: > mempool_free(pmb, phba->mbox_mem_pool); > > spin_lock_irq(shost->host_lock); >- vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK); >+ vport->fc_flag &= ~FC_ABORT_DISCOVERY; > spin_unlock_irq(shost->host_lock); > >- del_timer_sync(&phba->fc_estabtmo); >- > lpfc_can_disctmo(vport); > > /* turn on Link Attention interrupts */ >@@ -852,8 +943,6 @@ lpfc_mbx_cmpl_local_config_link(struct l > * LPFC_FLOGI while waiting for FLOGI cmpl > */ > if (vport->port_state != LPFC_FLOGI) { >- vport->port_state = LPFC_FLOGI; >- lpfc_set_disctmo(vport); > lpfc_initial_flogi(vport); > } > return; >@@ -962,7 +1051,11 @@ lpfc_mbx_process_link_up(struct lpfc_hba > if (phba->fc_topology == TOPOLOGY_LOOP) { > phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; > >- /* Get Loop Map information */ >+ if (phba->cfg_enable_npiv) >+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, >+ "1310 Link Up Event npiv not supported in loop " >+ "topology\n"); >+ /* Get Loop Map information */ > if (la->il) > vport->fc_flag |= FC_LBIT; > >@@ -1022,8 +1115,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba > lpfc_read_sparam(phba, sparam_mbox, 0); > sparam_mbox->vport = vport; > sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; >- rc = lpfc_sli_issue_mbox(phba, sparam_mbox, >- (MBX_NOWAIT | MBX_STOP_IOCB)); >+ rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); > if (rc == MBX_NOT_FINISHED) { > mp = (struct lpfc_dmabuf *) sparam_mbox->context1; > lpfc_mbuf_free(phba, mp->virt, mp->phys); >@@ -1040,8 +1132,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba > lpfc_config_link(phba, cfglink_mbox); > cfglink_mbox->vport = vport; > cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; >- rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, >- (MBX_NOWAIT | MBX_STOP_IOCB)); >+ rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); > if (rc != MBX_NOT_FINISHED) > return; > mempool_free(cfglink_mbox, phba->mbox_mem_pool); >@@ -1056,14 +1147,10 @@ out: > } > > static void >-lpfc_mbx_issue_link_down(struct lpfc_hba *phba) >+lpfc_enable_la(struct lpfc_hba *phba) > { > uint32_t control; > struct lpfc_sli *psli = &phba->sli; >- >- lpfc_linkdown(phba); >- >- /* turn on Link Attention interrupts - no CLEAR_LA needed */ > spin_lock_irq(&phba->hbalock); > psli->sli_flag |= LPFC_PROCESS_LA; > control = readl(phba->HCregaddr); >@@ -1073,6 +1160,15 @@ lpfc_mbx_issue_link_down(struct lpfc_hba > spin_unlock_irq(&phba->hbalock); > } > >+static void >+lpfc_mbx_issue_link_down(struct lpfc_hba *phba) >+{ >+ lpfc_linkdown(phba); >+ lpfc_enable_la(phba); >+ /* turn on Link Attention interrupts - no CLEAR_LA needed */ >+} >+ >+ > /* > * This routine handles processing a READ_LA mailbox > * command upon completion. It is setup in the LPFC_MBOXQ >@@ -1088,6 +1184,8 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *p > MAILBOX_t *mb = &pmb->mb; > struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); > >+ /* Unblock ELS traffic */ >+ phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; > /* Check for error */ > if (mb->mbxStatus) { > lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, >@@ -1118,11 +1216,15 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *p > } > > phba->fc_eventTag = la->eventTag; >+ if (la->mm) >+ phba->sli.sli_flag |= LPFC_MENLO_MAINT; >+ else >+ phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; > >- if (la->attType == AT_LINK_UP) { >+ if (la->attType == AT_LINK_UP && (!la->mm)) { > phba->fc_stat.LinkUp++; > if (phba->link_flag & LS_LOOPBACK_MODE) { >- lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, >+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, > "1306 Link Up Event in loop back mode " > "x%x received Data: x%x x%x x%x x%x\n", > la->eventTag, phba->fc_eventTag, >@@ -1131,21 +1233,70 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *p > } else { > lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, > "1303 Link Up Event x%x received " >- "Data: x%x x%x x%x x%x\n", >+ "Data: x%x x%x x%x x%x x%x x%x %d\n", > la->eventTag, phba->fc_eventTag, > la->granted_AL_PA, la->UlnkSpeed, >- phba->alpa_map[0]); >+ phba->alpa_map[0], >+ la->mm, la->fa, >+ phba->wait_4_mlo_maint_flg); > } > lpfc_mbx_process_link_up(phba, la); >- } else { >+ } else if (la->attType == AT_LINK_DOWN) { > phba->fc_stat.LinkDown++; >- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, >- "1305 Link Down Event x%x received " >+ if (phba->link_flag & LS_LOOPBACK_MODE) { >+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, >+ "1300 Link Down Event in loop back mode " >+ "x%x received " > "Data: x%x x%x x%x\n", > la->eventTag, phba->fc_eventTag, > phba->pport->port_state, vport->fc_flag); >+ } >+ else { >+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, >+ "1305 Link Down Event x%x received " >+ "Data: x%x x%x x%x x%x x%x\n", >+ la->eventTag, phba->fc_eventTag, >+ phba->pport->port_state, vport->fc_flag, >+ la->mm, la->fa); >+ } > lpfc_mbx_issue_link_down(phba); > } >+ if (la->mm && la->attType == AT_LINK_UP) { >+ if (phba->link_state != LPFC_LINK_DOWN) { >+ phba->fc_stat.LinkDown++; >+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, >+ "1309 Link Down Event x%x received " >+ "Data: x%x x%x x%x\n", >+ la->eventTag, phba->fc_eventTag, >+ phba->pport->port_state, vport->fc_flag); >+ lpfc_mbx_issue_link_down(phba); >+ } else >+ lpfc_enable_la(phba); >+ >+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, >+ "1308 Menlo Maint Mode Link up Event x%x rcvd " >+ "Data: x%x x%x x%x\n", >+ la->eventTag, phba->fc_eventTag, >+ phba->pport->port_state, vport->fc_flag); >+ /* >+ * The cmnd that triggered this will be waiting for this >+ * signal. >+ */ >+ /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */ >+ if ( phba->wait_4_mlo_maint_flg) { >+ phba->wait_4_mlo_maint_flg = 0; >+ wake_up_interruptible(&phba->wait_4_mlo_m_q); >+ } >+ } >+ >+ if (la->fa ) { >+ if (la->mm) >+ lpfc_issue_clear_la(phba, vport); >+ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, >+ "1311 fa %d\n", la->fa); >+ lpfc_check_menlo_cfg(phba); >+ } >+ > > lpfc_mbx_cmpl_read_la_free_mbuf: > lpfc_mbuf_free(phba, mp->virt, mp->phys); >@@ -1174,6 +1325,9 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba > lpfc_mbuf_free(phba, mp->virt, mp->phys); > kfree(mp); > mempool_free(pmb, phba->mbox_mem_pool); >+ /* decrement the node reference count held for this callback >+ * function. >+ */ > lpfc_nlp_put(ndlp); > > return; >@@ -1205,7 +1359,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba > scsi_host_put(shost); > } > >-void >+int > lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) > { > struct lpfc_hba *phba = vport->phba; >@@ -1214,18 +1368,20 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vp > > mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); > if (!mbox) >- return; >+ return 1; > > lpfc_unreg_vpi(phba, vport->vpi, mbox); > mbox->vport = vport; > mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; >- rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); >+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); > if (rc == MBX_NOT_FINISHED) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, > "1800 Could not issue unreg_vpi\n"); > mempool_free(mbox, phba->mbox_mem_pool); > vport->unreg_vpi_cmpl = VPORT_ERROR; >+ return rc; > } >+ return 0; > } > > static void >@@ -1281,8 +1437,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lp > MAILBOX_t *mb = &pmb->mb; > struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); > struct lpfc_nodelist *ndlp; >- struct lpfc_vport **vports; >- int i; > > ndlp = (struct lpfc_nodelist *) pmb->context2; > pmb->context1 = NULL; >@@ -1291,7 +1445,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lp > lpfc_mbuf_free(phba, mp->virt, mp->phys); > kfree(mp); > mempool_free(pmb, phba->mbox_mem_pool); >- lpfc_nlp_put(ndlp); > > if (phba->fc_topology == TOPOLOGY_LOOP) { > /* FLOGI failed, use loop map to make discovery list */ >@@ -1299,6 +1452,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lp > > /* Start discovery */ > lpfc_disc_start(vport); >+ /* Decrement the reference count to ndlp after the >+ * reference to the ndlp are done. >+ */ >+ lpfc_nlp_put(ndlp); > return; > } > >@@ -1306,6 +1463,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lp > lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, > "0258 Register Fabric login error: 0x%x\n", > mb->mbxStatus); >+ /* Decrement the reference count to ndlp after the reference >+ * to the ndlp are done. >+ */ >+ lpfc_nlp_put(ndlp); > return; > } > >@@ -1313,35 +1474,18 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lp > ndlp->nlp_type |= NLP_FABRIC; > lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); > >- lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */ >- >- if (vport->port_state == LPFC_FABRIC_CFG_LINK) { >- vports = lpfc_create_vport_work_array(phba); >- if (vports != NULL) >- for(i = 0; >- i < LPFC_MAX_VPORTS && vports[i] != NULL; >- i++) { >- if (vports[i]->port_type == LPFC_PHYSICAL_PORT) >- continue; >- if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) >- lpfc_initial_fdisc(vports[i]); >- else if (phba->sli3_options & >- LPFC_SLI3_NPIV_ENABLED) { >- lpfc_vport_set_state(vports[i], >- FC_VPORT_NO_FABRIC_SUPP); >- lpfc_printf_vlog(vport, KERN_ERR, >- LOG_ELS, >- "0259 No NPIV " >- "Fabric support\n"); >- } >- } >- lpfc_destroy_vport_work_array(vports); >- lpfc_do_scr_ns_plogi(phba, vport); >- } >+ if (vport->port_state == LPFC_FABRIC_CFG_LINK && >+ !vport->cfg_enable_auth) >+ lpfc_start_discovery(vport); > > lpfc_mbuf_free(phba, mp->virt, mp->phys); > kfree(mp); > mempool_free(pmb, phba->mbox_mem_pool); >+ >+ /* Drop the reference count from the mbox at the end after >+ * all the current reference to the ndlp have been done. >+ */ >+ lpfc_nlp_put(ndlp); > return; > } > >@@ -1361,11 +1505,16 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_h > > if (mb->mbxStatus) { > out: >+ /* decrement the node reference count held for this >+ * callback function. >+ */ > lpfc_nlp_put(ndlp); > lpfc_mbuf_free(phba, mp->virt, mp->phys); > kfree(mp); > mempool_free(pmb, phba->mbox_mem_pool); >- lpfc_drop_node(vport, ndlp); >+ >+ /* If no other thread is using the ndlp, free it */ >+ lpfc_nlp_not_used(ndlp); > > if (phba->fc_topology == TOPOLOGY_LOOP) { > /* >@@ -1410,6 +1559,9 @@ out: > goto out; > } > >+ /* decrement the node reference count held for this >+ * callback function. >+ */ > lpfc_nlp_put(ndlp); > lpfc_mbuf_free(phba, mp->virt, mp->phys); > kfree(mp); >@@ -1441,9 +1593,8 @@ lpfc_register_remote_port(struct lpfc_vp > * registered the port. > */ > if (ndlp->rport && ndlp->rport->dd_data && >- ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) { >+ ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) > lpfc_nlp_put(ndlp); >- } > > lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, > "rport add: did:x%x flg:x%x type x%x", >@@ -1487,6 +1638,7 @@ lpfc_unregister_remote_port(struct lpfc_ > "rport delete: did:x%x flg:x%x type x%x", > ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); > >+ ndlp->unreg_time = jiffies; > fc_remote_port_delete(rport); > > return; >@@ -1617,7 +1769,6 @@ lpfc_nlp_set_state(struct lpfc_vport *vp > ndlp->nlp_DID, old_state, state); > > if (old_state == NLP_STE_NPR_NODE && >- (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 && > state != NLP_STE_NPR_NODE) > lpfc_cancel_retry_delay_tmo(vport, ndlp); > if (old_state == NLP_STE_UNMAPPED_NODE) { >@@ -1638,26 +1789,127 @@ lpfc_nlp_set_state(struct lpfc_vport *vp > } > > void >+lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) >+{ >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); >+ >+ if (list_empty(&ndlp->nlp_listp)) { >+ spin_lock_irq(shost->host_lock); >+ list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); >+ spin_unlock_irq(shost->host_lock); >+ } >+} >+ >+void > lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) > { > struct Scsi_Host *shost = lpfc_shost_from_vport(vport); > >- if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) >- lpfc_cancel_retry_delay_tmo(vport, ndlp); >+ lpfc_cancel_retry_delay_tmo(vport, ndlp); > if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) > lpfc_nlp_counters(vport, ndlp->nlp_state, -1); > spin_lock_irq(shost->host_lock); > list_del_init(&ndlp->nlp_listp); > spin_unlock_irq(shost->host_lock); > lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, >- NLP_STE_UNUSED_NODE); >+ NLP_STE_UNUSED_NODE); >+} >+ >+void >+lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) >+{ >+ lpfc_cancel_retry_delay_tmo(vport, ndlp); >+ if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) >+ lpfc_nlp_counters(vport, ndlp->nlp_state, -1); >+ lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, >+ NLP_STE_UNUSED_NODE); >+} >+ >+struct lpfc_nodelist * >+lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, >+ int state) >+{ >+ struct lpfc_hba *phba = vport->phba; >+ uint32_t did; >+ unsigned long flags; >+ >+ if (!ndlp) >+ return NULL; >+ >+ spin_lock_irqsave(&phba->ndlp_lock, flags); >+ /* The ndlp should not be in memory free mode */ >+ if (NLP_CHK_FREE_REQ(ndlp)) { >+ spin_unlock_irqrestore(&phba->ndlp_lock, flags); >+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, >+ "0277 lpfc_enable_node: ndlp:x%p " >+ "usgmap:x%x refcnt:%d\n", >+ (void *)ndlp, ndlp->nlp_usg_map, >+ atomic_read(&ndlp->kref.refcount)); >+ return NULL; >+ } >+ /* The ndlp should not already be in active mode */ >+ if (NLP_CHK_NODE_ACT(ndlp)) { >+ spin_unlock_irqrestore(&phba->ndlp_lock, flags); >+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, >+ "0278 lpfc_enable_node: ndlp:x%p " >+ "usgmap:x%x refcnt:%d\n", >+ (void *)ndlp, ndlp->nlp_usg_map, >+ atomic_read(&ndlp->kref.refcount)); >+ return NULL; >+ } >+ >+ /* Keep the original DID */ >+ did = ndlp->nlp_DID; >+ >+ /* re-initialize ndlp except of ndlp linked list pointer */ >+ memset((((char *)ndlp) + sizeof (struct list_head)), 0, >+ sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); >+ INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); >+ INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); >+ INIT_LIST_HEAD(&ndlp->els_reauth_evt.evt_listp); >+ >+ init_timer(&ndlp->nlp_delayfunc); >+ ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; >+ ndlp->nlp_delayfunc.data = (unsigned long)ndlp; >+ >+ init_timer(&ndlp->nlp_reauth_tmr); >+ ndlp->nlp_reauth_tmr.function = lpfc_reauth_node; >+ ndlp->nlp_reauth_tmr.data = (unsigned long)ndlp; >+ >+ ndlp->unreg_time = jiffies; >+ ndlp->nlp_DID = did; >+ ndlp->vport = vport; >+ ndlp->nlp_sid = NLP_NO_SID; >+ /* ndlp management re-initialize */ >+ kref_init(&ndlp->kref); >+ NLP_INT_NODE_ACT(ndlp); >+ >+ spin_unlock_irqrestore(&phba->ndlp_lock, flags); >+ >+ if (state != NLP_STE_UNUSED_NODE) >+ lpfc_nlp_set_state(vport, ndlp, state); >+ >+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, >+ "node enable: did:x%x", >+ ndlp->nlp_DID, 0, 0); >+ return ndlp; > } > > void > lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) > { >+ /* >+ * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should >+ * be used if we wish to issue the "last" lpfc_nlp_put() to remove >+ * the ndlp from the vport. The ndlp marked as UNUSED on the list >+ * until ALL other outstanding threads have completed. We check >+ * that the ndlp not already in the UNUSED state before we proceed. >+ */ >+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) >+ return; > lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); > lpfc_nlp_put(ndlp); >+ return; > } > > /* >@@ -1868,8 +2120,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, > lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); > mbox->vport = vport; > mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; >- rc = lpfc_sli_issue_mbox(phba, mbox, >- (MBX_NOWAIT | MBX_STOP_IOCB)); >+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); > if (rc == MBX_NOT_FINISHED) > mempool_free(mbox, phba->mbox_mem_pool); > } >@@ -1892,8 +2143,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *v > lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); > mbox->vport = vport; > mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; >- rc = lpfc_sli_issue_mbox(phba, mbox, >- (MBX_NOWAIT | MBX_STOP_IOCB)); >+ mbox->context1 = NULL; >+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); > if (rc == MBX_NOT_FINISHED) { > mempool_free(mbox, phba->mbox_mem_pool); > } >@@ -1912,8 +2163,8 @@ lpfc_unreg_default_rpis(struct lpfc_vpor > lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox); > mbox->vport = vport; > mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; >- rc = lpfc_sli_issue_mbox(phba, mbox, >- (MBX_NOWAIT | MBX_STOP_IOCB)); >+ mbox->context1 = NULL; >+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); > if (rc == MBX_NOT_FINISHED) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, > "1815 Could not issue " >@@ -1941,7 +2192,21 @@ lpfc_cleanup_node(struct lpfc_vport *vpo > "Data: x%x x%x x%x\n", > ndlp->nlp_DID, ndlp->nlp_flag, > ndlp->nlp_state, ndlp->nlp_rpi); >- lpfc_dequeue_node(vport, ndlp); >+ if (NLP_CHK_FREE_REQ(ndlp)) { >+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, >+ "0280 lpfc_cleanup_node: ndlp:x%p " >+ "usgmap:x%x refcnt:%d\n", >+ (void *)ndlp, ndlp->nlp_usg_map, >+ atomic_read(&ndlp->kref.refcount)); >+ lpfc_dequeue_node(vport, ndlp); >+ } else { >+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, >+ "0281 lpfc_cleanup_node: ndlp:x%p " >+ "usgmap:x%x refcnt:%d\n", >+ (void *)ndlp, ndlp->nlp_usg_map, >+ atomic_read(&ndlp->kref.refcount)); >+ lpfc_disable_node(vport, ndlp); >+ } > > /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ > if ((mb = phba->sli.mbox_active)) { >@@ -1963,28 +2228,28 @@ lpfc_cleanup_node(struct lpfc_vport *vpo > } > list_del(&mb->list); > mempool_free(mb, phba->mbox_mem_pool); >- lpfc_nlp_put(ndlp); >+ /* We shall not invoke the lpfc_nlp_put to decrement >+ * the ndlp reference count as we are in the process >+ * of lpfc_nlp_release. >+ */ > } > } > spin_unlock_irq(&phba->hbalock); > >- lpfc_els_abort(phba,ndlp); >+ lpfc_els_abort(phba, ndlp); >+ > spin_lock_irq(shost->host_lock); > ndlp->nlp_flag &= ~NLP_DELAY_TMO; > spin_unlock_irq(shost->host_lock); > > ndlp->nlp_last_elscmd = 0; > del_timer_sync(&ndlp->nlp_delayfunc); >+ del_timer_sync(&ndlp->nlp_reauth_tmr); > >- if (!list_empty(&ndlp->els_retry_evt.evt_listp)) >- list_del_init(&ndlp->els_retry_evt.evt_listp); >- if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) >- list_del_init(&ndlp->dev_loss_evt.evt_listp); >- >- if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) { >- list_del_init(&ndlp->dev_loss_evt.evt_listp); >- complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2)); >- } >+ list_del_init(&ndlp->els_retry_evt.evt_listp); >+ list_del_init(&ndlp->dev_loss_evt.evt_listp); >+ if (!list_empty(&ndlp->els_reauth_evt.evt_listp)) >+ list_del_init(&ndlp->els_reauth_evt.evt_listp); > > lpfc_unreg_rpi(vport, ndlp); > >@@ -1999,12 +2264,35 @@ lpfc_cleanup_node(struct lpfc_vport *vpo > static void > lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) > { >+ struct lpfc_hba *phba = vport->phba; > struct lpfc_rport_data *rdata; >+ LPFC_MBOXQ_t *mbox; >+ int rc; > >- if (ndlp->nlp_flag & NLP_DELAY_TMO) { >- lpfc_cancel_retry_delay_tmo(vport, ndlp); >+ lpfc_cancel_retry_delay_tmo(vport, ndlp); >+ if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { >+ /* For this case we need to cleanup the default rpi >+ * allocated by the firmware. >+ */ >+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) >+ != NULL) { >+ rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID, >+ (uint8_t *) &vport->fc_sparam, mbox, 0); >+ if (rc) { >+ mempool_free(mbox, phba->mbox_mem_pool); >+ } >+ else { >+ mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; >+ mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; >+ mbox->vport = vport; >+ mbox->context2 = NULL; >+ rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); >+ if (rc == MBX_NOT_FINISHED) { >+ mempool_free(mbox, phba->mbox_mem_pool); >+ } >+ } >+ } > } >- > lpfc_cleanup_node(vport, ndlp); > > /* >@@ -2028,10 +2316,6 @@ lpfc_matchdid(struct lpfc_vport *vport, > if (did == Bcast_DID) > return 0; > >- if (ndlp->nlp_DID == 0) { >- return 0; >- } >- > /* First check for Direct match */ > if (ndlp->nlp_DID == did) > return 1; >@@ -2129,9 +2413,25 @@ lpfc_setup_disc_node(struct lpfc_vport * > ndlp->nlp_flag |= NLP_NPR_2B_DISC; > spin_unlock_irq(shost->host_lock); > return ndlp; >+ } else if (!NLP_CHK_NODE_ACT(ndlp)) { >+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); >+ if (!ndlp) >+ return NULL; >+ spin_lock_irq(shost->host_lock); >+ ndlp->nlp_flag |= NLP_NPR_2B_DISC; >+ spin_unlock_irq(shost->host_lock); >+ return ndlp; > } >- if (vport->fc_flag & FC_RSCN_MODE) { >+ >+ if ((vport->fc_flag & FC_RSCN_MODE) && >+ !(vport->fc_flag & FC_NDISC_ACTIVE)) { > if (lpfc_rscn_payload_check(vport, did)) { >+ /* If we've already recieved a PLOGI from this NPort >+ * we don't need to try to discover it again. >+ */ >+ if (ndlp->nlp_flag & NLP_RCV_PLOGI) >+ return NULL; >+ > spin_lock_irq(shost->host_lock); > ndlp->nlp_flag |= NLP_NPR_2B_DISC; > spin_unlock_irq(shost->host_lock); >@@ -2139,13 +2439,17 @@ lpfc_setup_disc_node(struct lpfc_vport * > /* Since this node is marked for discovery, > * delay timeout is not needed. > */ >- if (ndlp->nlp_flag & NLP_DELAY_TMO) >- lpfc_cancel_retry_delay_tmo(vport, ndlp); >+ lpfc_cancel_retry_delay_tmo(vport, ndlp); > } else > ndlp = NULL; > } else { >+ /* If we've already recieved a PLOGI from this NPort, >+ * or we are already in the process of discovery on it, >+ * we don't need to try to discover it again. >+ */ > if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || >- ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) >+ ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || >+ ndlp->nlp_flag & NLP_RCV_PLOGI) > return NULL; > lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); > spin_lock_irq(shost->host_lock); >@@ -2220,8 +2524,7 @@ lpfc_issue_clear_la(struct lpfc_hba *phb > lpfc_clear_la(phba, mbox); > mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; > mbox->vport = vport; >- rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | >- MBX_STOP_IOCB)); >+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); > if (rc == MBX_NOT_FINISHED) { > mempool_free(mbox, phba->mbox_mem_pool); > lpfc_disc_flush_list(vport); >@@ -2244,8 +2547,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba > lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); > regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; > regvpimbox->vport = vport; >- if (lpfc_sli_issue_mbox(phba, regvpimbox, >- (MBX_NOWAIT | MBX_STOP_IOCB)) >+ if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) > == MBX_NOT_FINISHED) { > mempool_free(regvpimbox, phba->mbox_mem_pool); > } >@@ -2301,6 +2603,7 @@ lpfc_disc_start(struct lpfc_vport *vport > * continue discovery. > */ > if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && >+ !(vport->fc_flag & FC_PT2PT) && > !(vport->fc_flag & FC_RSCN_MODE)) { > lpfc_issue_reg_vpi(phba, vport); > return; >@@ -2423,11 +2726,11 @@ lpfc_disc_flush_list(struct lpfc_vport * > if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { > list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, > nlp_listp) { >+ if (!NLP_CHK_NODE_ACT(ndlp)) >+ continue; > if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || >- ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { >+ ndlp->nlp_state == NLP_STE_ADISC_ISSUE) > lpfc_free_tx(phba, ndlp); >- lpfc_nlp_put(ndlp); >- } > } > } > } >@@ -2460,21 +2763,20 @@ lpfc_disc_timeout(unsigned long ptr) > { > struct lpfc_vport *vport = (struct lpfc_vport *) ptr; > struct lpfc_hba *phba = vport->phba; >+ uint32_t tmo_posted; > unsigned long flags = 0; > > if (unlikely(!phba)) > return; > >- if ((vport->work_port_events & WORKER_DISC_TMO) == 0) { >- spin_lock_irqsave(&vport->work_port_lock, flags); >+ spin_lock_irqsave(&vport->work_port_lock, flags); >+ tmo_posted = vport->work_port_events & WORKER_DISC_TMO; >+ if (!tmo_posted) > vport->work_port_events |= WORKER_DISC_TMO; >- spin_unlock_irqrestore(&vport->work_port_lock, flags); >+ spin_unlock_irqrestore(&vport->work_port_lock, flags); > >- spin_lock_irqsave(&phba->hbalock, flags); >- if (phba->work_wait) >- lpfc_worker_wake_up(phba); >- spin_unlock_irqrestore(&phba->hbalock, flags); >- } >+ if (!tmo_posted) >+ lpfc_worker_wake_up(phba); > return; > } > >@@ -2511,11 +2813,14 @@ lpfc_disc_timeout_handler(struct lpfc_vp > /* Start discovery by sending FLOGI, clean up old rpis */ > list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, > nlp_listp) { >+ if (!NLP_CHK_NODE_ACT(ndlp)) >+ continue; > if (ndlp->nlp_state != NLP_STE_NPR_NODE) > continue; > if (ndlp->nlp_type & NLP_FABRIC) { > /* Clean up the ndlp on Fabric connections */ > lpfc_drop_node(vport, ndlp); >+ > } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { > /* Fail outstanding IO now since device > * is marked for PLOGI. >@@ -2524,9 +2829,8 @@ lpfc_disc_timeout_handler(struct lpfc_vp > } > } > if (vport->port_state != LPFC_FLOGI) { >- vport->port_state = LPFC_FLOGI; >- lpfc_set_disctmo(vport); > lpfc_initial_flogi(vport); >+ return; > } > break; > >@@ -2536,7 +2840,7 @@ lpfc_disc_timeout_handler(struct lpfc_vp > /* Initial FLOGI timeout */ > lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, > "0222 Initial %s timeout\n", >- vport->vpi ? "FLOGI" : "FDISC"); >+ vport->vpi ? "FDISC" : "FLOGI"); > > /* Assume no Fabric and go on with discovery. > * Check for outstanding ELS FLOGI to abort. >@@ -2557,11 +2861,11 @@ lpfc_disc_timeout_handler(struct lpfc_vp > "NameServer login\n"); > /* Next look for NameServer ndlp */ > ndlp = lpfc_findnode_did(vport, NameServer_DID); >- if (ndlp) >- lpfc_nlp_put(ndlp); >- /* Start discovery */ >- lpfc_disc_start(vport); >- break; >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) >+ lpfc_els_abort(phba, ndlp); >+ >+ /* ReStart discovery */ >+ goto restart_disc; > > case LPFC_NS_QRY: > /* Check for wait for NameServer Rsp timeout */ >@@ -2580,6 +2884,7 @@ lpfc_disc_timeout_handler(struct lpfc_vp > } > vport->fc_ns_retry = 0; > >+restart_disc: > /* > * Discovery is over. > * set port_state to PORT_READY if SLI2. >@@ -2608,8 +2913,7 @@ lpfc_disc_timeout_handler(struct lpfc_vp > initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; > initlinkmbox->vport = vport; > initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; >- rc = lpfc_sli_issue_mbox(phba, initlinkmbox, >- (MBX_NOWAIT | MBX_STOP_IOCB)); >+ rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); > lpfc_set_loopback_flag(phba); > if (rc == MBX_NOT_FINISHED) > mempool_free(initlinkmbox, phba->mbox_mem_pool); >@@ -2651,7 +2955,7 @@ lpfc_disc_timeout_handler(struct lpfc_vp > > default: > lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, >- "0229 Unexpected discovery timeout, " >+ "0273 Unexpected discovery timeout, " > "vport State x%x\n", vport->port_state); > break; > } >@@ -2664,12 +2968,14 @@ lpfc_disc_timeout_handler(struct lpfc_vp > clrlaerr = 1; > break; > >+ case LPFC_LINK_UP: >+ lpfc_issue_clear_la(phba, vport); >+ /* Drop thru */ > case LPFC_LINK_UNKNOWN: > case LPFC_WARM_START: > case LPFC_INIT_START: > case LPFC_INIT_MBX_CMDS: > case LPFC_LINK_DOWN: >- case LPFC_LINK_UP: > case LPFC_HBA_ERROR: > lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, > "0230 Unexpected timeout, hba link " >@@ -2723,7 +3029,9 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc > else > mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); > >- /* Mailbox took a reference to the node */ >+ /* decrement the node reference count held for this callback >+ * function. >+ */ > lpfc_nlp_put(ndlp); > lpfc_mbuf_free(phba, mp->virt, mp->phys); > kfree(mp); >@@ -2747,14 +3055,20 @@ lpfc_filter_by_wwpn(struct lpfc_nodelist > sizeof(ndlp->nlp_portname)) == 0; > } > >+static int >+lpfc_filter_by_wwnn(struct lpfc_nodelist *ndlp, void *param) >+{ >+ return memcmp(&ndlp->nlp_nodename, param, >+ sizeof(ndlp->nlp_nodename)) == 0; >+} >+ > struct lpfc_nodelist * > __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) > { > struct lpfc_nodelist *ndlp; > > list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { >- if (ndlp->nlp_state != NLP_STE_UNUSED_NODE && >- filter(ndlp, param)) >+ if (filter(ndlp, param)) > return ndlp; > } > return NULL; >@@ -2814,6 +3128,23 @@ lpfc_findnode_wwpn(struct lpfc_vport *vp > return ndlp; > } > >+/* >+ * This routine looks up the ndlp lists for the given WWNN. If WWNN found it >+ * returns the node element list pointer else return NULL. >+ */ >+struct lpfc_nodelist * >+lpfc_findnode_wwnn(struct lpfc_vport *vport, struct lpfc_name *wwnn) >+{ >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); >+ struct lpfc_nodelist *ndlp; >+ >+ spin_lock_irq(shost->host_lock); >+ ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwnn, wwnn); >+ spin_unlock_irq(shost->host_lock); >+ return ndlp; >+} >+ >+ > void > lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, > uint32_t did) >@@ -2821,14 +3152,22 @@ lpfc_nlp_init(struct lpfc_vport *vport, > memset(ndlp, 0, sizeof (struct lpfc_nodelist)); > INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); > INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); >+ INIT_LIST_HEAD(&ndlp->els_reauth_evt.evt_listp); > init_timer(&ndlp->nlp_delayfunc); > ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; > ndlp->nlp_delayfunc.data = (unsigned long)ndlp; >+ >+ init_timer(&ndlp->nlp_reauth_tmr); >+ ndlp->nlp_reauth_tmr.function = lpfc_reauth_node; >+ ndlp->nlp_reauth_tmr.data = (unsigned long)ndlp; >+ >+ ndlp->unreg_time = jiffies; > ndlp->nlp_DID = did; > ndlp->vport = vport; > ndlp->nlp_sid = NLP_NO_SID; > INIT_LIST_HEAD(&ndlp->nlp_listp); > kref_init(&ndlp->kref); >+ NLP_INT_NODE_ACT(ndlp); > > lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, > "node init: did:x%x", >@@ -2837,9 +3176,14 @@ lpfc_nlp_init(struct lpfc_vport *vport, > return; > } > >+/* This routine releases all resources associated with a specifc NPort's ndlp >+ * and mempool_free's the nodelist. >+ */ > static void > lpfc_nlp_release(struct kref *kref) > { >+ struct lpfc_hba *phba; >+ unsigned long flags; > struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, > kref); > >@@ -2847,20 +3191,145 @@ lpfc_nlp_release(struct kref *kref) > "node release: did:x%x flg:x%x type:x%x", > ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); > >+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, >+ "0279 lpfc_nlp_release: ndlp:x%p " >+ "usgmap:x%x refcnt:%d\n", >+ (void *)ndlp, ndlp->nlp_usg_map, >+ atomic_read(&ndlp->kref.refcount)); >+ >+ /* remove ndlp from action. */ > lpfc_nlp_remove(ndlp->vport, ndlp); >- mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); >+ >+ /* clear the ndlp active flag for all release cases */ >+ phba = ndlp->vport->phba; >+ spin_lock_irqsave(&phba->ndlp_lock, flags); >+ NLP_CLR_NODE_ACT(ndlp); >+ spin_unlock_irqrestore(&phba->ndlp_lock, flags); >+ >+ /* free ndlp memory for final ndlp release */ >+ if (NLP_CHK_FREE_REQ(ndlp)) >+ mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); > } > >+/* This routine bumps the reference count for a ndlp structure to ensure >+ * that one discovery thread won't free a ndlp while another discovery thread >+ * is using it. >+ */ > struct lpfc_nodelist * > lpfc_nlp_get(struct lpfc_nodelist *ndlp) > { >- if (ndlp) >- kref_get(&ndlp->kref); >+ struct lpfc_hba *phba; >+ unsigned long flags; >+ >+ if (ndlp) { >+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, >+ "node get: did:x%x flg:x%x refcnt:x%x", >+ ndlp->nlp_DID, ndlp->nlp_flag, >+ atomic_read(&ndlp->kref.refcount)); >+ /* The check of ndlp usage to prevent incrementing the >+ * ndlp reference count that is in the process of being >+ * released. >+ */ >+ phba = ndlp->vport->phba; >+ spin_lock_irqsave(&phba->ndlp_lock, flags); >+ if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) { >+ spin_unlock_irqrestore(&phba->ndlp_lock, flags); >+ lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, >+ "0276 lpfc_nlp_get: ndlp:x%p " >+ "usgmap:x%x refcnt:%d\n", >+ (void *)ndlp, ndlp->nlp_usg_map, >+ atomic_read(&ndlp->kref.refcount)); >+ return NULL; >+ } else >+ kref_get(&ndlp->kref); >+ spin_unlock_irqrestore(&phba->ndlp_lock, flags); >+ } > return ndlp; > } > >+/* This routine decrements the reference count for a ndlp structure. If the >+ * count goes to 0, this indicates the the associated nodelist should be >+ * freed. Returning 1 indicates the ndlp resource has been released; on the >+ * other hand, returning 0 indicates the ndlp resource has not been released >+ * yet. >+ */ > int > lpfc_nlp_put(struct lpfc_nodelist *ndlp) > { >- return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; >+ struct lpfc_hba *phba; >+ unsigned long flags; >+ >+ if (!ndlp) >+ return 1; >+ >+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, >+ "node put: did:x%x flg:x%x refcnt:x%x", >+ ndlp->nlp_DID, ndlp->nlp_flag, >+ atomic_read(&ndlp->kref.refcount)); >+ phba = ndlp->vport->phba; >+ spin_lock_irqsave(&phba->ndlp_lock, flags); >+ /* Check the ndlp memory free acknowledge flag to avoid the >+ * possible race condition that kref_put got invoked again >+ * after previous one has done ndlp memory free. >+ */ >+ if (NLP_CHK_FREE_ACK(ndlp)) { >+ spin_unlock_irqrestore(&phba->ndlp_lock, flags); >+ lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, >+ "0274 lpfc_nlp_put: ndlp:x%p " >+ "usgmap:x%x refcnt:%d\n", >+ (void *)ndlp, ndlp->nlp_usg_map, >+ atomic_read(&ndlp->kref.refcount)); >+ return 1; >+ } >+ /* Check the ndlp inactivate log flag to avoid the possible >+ * race condition that kref_put got invoked again after ndlp >+ * is already in inactivating state. >+ */ >+ if (NLP_CHK_IACT_REQ(ndlp)) { >+ spin_unlock_irqrestore(&phba->ndlp_lock, flags); >+ lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, >+ "0275 lpfc_nlp_put: ndlp:x%p " >+ "usgmap:x%x refcnt:%d\n", >+ (void *)ndlp, ndlp->nlp_usg_map, >+ atomic_read(&ndlp->kref.refcount)); >+ return 1; >+ } >+ /* For last put, mark the ndlp usage flags to make sure no >+ * other kref_get and kref_put on the same ndlp shall get >+ * in between the process when the final kref_put has been >+ * invoked on this ndlp. >+ */ >+ if (atomic_read(&ndlp->kref.refcount) == 1) { >+ /* Indicate ndlp is put to inactive state. */ >+ NLP_SET_IACT_REQ(ndlp); >+ /* Acknowledge ndlp memory free has been seen. */ >+ if (NLP_CHK_FREE_REQ(ndlp)) >+ NLP_SET_FREE_ACK(ndlp); >+ } >+ spin_unlock_irqrestore(&phba->ndlp_lock, flags); >+ /* Note, the kref_put returns 1 when decrementing a reference >+ * count that was 1, it invokes the release callback function, >+ * but it still left the reference count as 1 (not actually >+ * performs the last decrementation). Otherwise, it actually >+ * decrements the reference count and returns 0. >+ */ >+ return kref_put(&ndlp->kref, lpfc_nlp_release); >+} >+ >+/* This routine free's the specified nodelist if it is not in use >+ * by any other discovery thread. This routine returns 1 if the >+ * ndlp has been freed. A return value of 0 indicates the ndlp is >+ * not yet been released. >+ */ >+int >+lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) >+{ >+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, >+ "node not used: did:x%x flg:x%x refcnt:x%x", >+ ndlp->nlp_DID, ndlp->nlp_flag, >+ atomic_read(&ndlp->kref.refcount)); >+ if (atomic_read(&ndlp->kref.refcount) == 1) >+ if (lpfc_nlp_put(ndlp)) >+ return 1; >+ return 0; > } >diff -urpN a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h >--- a/drivers/scsi/lpfc/lpfc_hw.h 2008-09-05 17:47:41.684244000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_hw.h 2008-09-05 17:47:49.663879000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * * >@@ -64,6 +64,7 @@ > #define SLI3_IOCB_CMD_SIZE 128 > #define SLI3_IOCB_RSP_SIZE 64 > >+#define BUF_SZ_4K 4096 > > /* Common Transport structures and definitions */ > >@@ -139,6 +140,9 @@ struct lpfc_sli_ct_request { > uint8_t len; > uint8_t symbname[255]; > } rsnn; >+ struct da_id { /* For DA_ID requests */ >+ uint32_t port_id; >+ } da_id; > struct rspn { /* For RSPN_ID requests */ > uint32_t PortId; > uint8_t len; >@@ -150,11 +154,7 @@ struct lpfc_sli_ct_request { > struct gff_acc { > uint8_t fbits[128]; > } gff_acc; >-#ifdef __BIG_ENDIAN_BITFIELD > #define FCP_TYPE_FEATURE_OFFSET 7 >-#else /* __LITTLE_ENDIAN_BITFIELD */ >-#define FCP_TYPE_FEATURE_OFFSET 4 >-#endif > struct rff { > uint32_t PortId; > uint8_t reserved[2]; >@@ -177,6 +177,8 @@ struct lpfc_sli_ct_request { > sizeof(struct rnn)) > #define RSNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ > sizeof(struct rsnn)) >+#define DA_ID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ >+ sizeof(struct da_id)) > #define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ > sizeof(struct rspn)) > >@@ -349,7 +351,8 @@ struct csp { > > uint16_t huntgroup:1; /* FC Word 1, bit 23 */ > uint16_t simplex:1; /* FC Word 1, bit 22 */ >- uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */ >+ uint16_t security:1; /* FC Word 1, bit 21 */ >+ uint16_t word1Reserved1:2; /* FC Word 1, bit 20:19 */ > uint16_t dhd:1; /* FC Word 1, bit 18 */ > uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */ > uint16_t payloadlength:1; /* FC Word 1, bit 16 */ >@@ -366,7 +369,8 @@ struct csp { > uint16_t payloadlength:1; /* FC Word 1, bit 16 */ > uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */ > uint16_t dhd:1; /* FC Word 1, bit 18 */ >- uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */ >+ uint16_t word1Reserved1:2; /* FC Word 1, bit 20:19 */ >+ uint16_t security:1; /* FC Word 1, bit 21 */ > uint16_t simplex:1; /* FC Word 1, bit 22 */ > uint16_t huntgroup:1; /* FC Word 1, bit 23 */ > #endif >@@ -505,6 +509,17 @@ struct serv_parm { /* Structure is in Bi > #define ELS_CMD_SCR 0x62000000 > #define ELS_CMD_RNID 0x78000000 > #define ELS_CMD_LIRR 0x7A000000 >+/* >+ * ELS commands for authentication >+ * ELS_CMD_AUTH<<24 | AUTH_NEGOTIATE<<8 | AUTH_VERSION >+ */ >+#define ELS_CMD_AUTH 0x90000000 >+#define ELS_CMD_AUTH_RJT 0x90000A01 >+#define ELS_CMD_AUTH_NEG 0x90000B01 >+#define ELS_CMD_AUTH_DONE 0x90000C01 >+#define ELS_CMD_DH_CHA 0x90001001 >+#define ELS_CMD_DH_REP 0x90001101 >+#define ELS_CMD_DH_SUC 0x90001201 > #else /* __LITTLE_ENDIAN_BITFIELD */ > #define ELS_CMD_MASK 0xffff > #define ELS_RSP_MASK 0xff >@@ -541,6 +556,17 @@ struct serv_parm { /* Structure is in Bi > #define ELS_CMD_SCR 0x62 > #define ELS_CMD_RNID 0x78 > #define ELS_CMD_LIRR 0x7A >+/* >+ * ELS commands for authentication >+ * ELS_CMD_AUTH | AUTH_NEGOTIATE<<16 | AUTH_VERSION<<24 >+ */ >+#define ELS_CMD_AUTH 0x00000090 >+#define ELS_CMD_AUTH_RJT 0x010A0090 >+#define ELS_CMD_AUTH_NEG 0x010B0090 >+#define ELS_CMD_AUTH_DONE 0x010C0090 >+#define ELS_CMD_DH_CHA 0x01100090 >+#define ELS_CMD_DH_REP 0x01110090 >+#define ELS_CMD_DH_SUC 0x01120090 > #endif > > /* >@@ -580,6 +606,7 @@ struct ls_rjt { /* Structure is in Big E > #define LSEXP_INVALID_O_SID 0x15 > #define LSEXP_INVALID_OX_RX 0x17 > #define LSEXP_CMD_IN_PROGRESS 0x19 >+#define LSEXP_PORT_LOGIN_REQ 0x1E > #define LSEXP_INVALID_NPORT_ID 0x1F > #define LSEXP_INVALID_SEQ_ID 0x21 > #define LSEXP_INVALID_XCHG 0x23 >@@ -1105,6 +1132,8 @@ typedef struct { > /* Start FireFly Register definitions */ > #define PCI_VENDOR_ID_EMULEX 0x10df > #define PCI_DEVICE_ID_FIREFLY 0x1ae5 >+#define PCI_DEVICE_ID_PROTEUS_VF 0xe100 >+#define PCI_DEVICE_ID_PROTEUS_PF 0xe180 > #define PCI_DEVICE_ID_SAT_SMB 0xf011 > #define PCI_DEVICE_ID_SAT_MID 0xf015 > #define PCI_DEVICE_ID_RFLY 0xf095 >@@ -1131,10 +1160,12 @@ typedef struct { > #define PCI_DEVICE_ID_LP11000S 0xfc10 > #define PCI_DEVICE_ID_LPE11000S 0xfc20 > #define PCI_DEVICE_ID_SAT_S 0xfc40 >+#define PCI_DEVICE_ID_PROTEUS_S 0xfc50 > #define PCI_DEVICE_ID_HELIOS 0xfd00 > #define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11 > #define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12 > #define PCI_DEVICE_ID_ZEPHYR 0xfe00 >+#define PCI_DEVICE_ID_HORNET 0xfe05 > #define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 > #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 > >@@ -1152,6 +1183,7 @@ typedef struct { > #define ZEPHYR_JEDEC_ID 0x0577 > #define VIPER_JEDEC_ID 0x4838 > #define SATURN_JEDEC_ID 0x1004 >+#define HORNET_JDEC_ID 0x2057706D > > #define JEDEC_ID_MASK 0x0FFFF000 > #define JEDEC_ID_SHIFT 12 >@@ -1228,7 +1260,8 @@ typedef struct { /* FireFly BIU registe > #define HS_FFER3 0x20000000 /* Bit 29 */ > #define HS_FFER2 0x40000000 /* Bit 30 */ > #define HS_FFER1 0x80000000 /* Bit 31 */ >-#define HS_FFERM 0xFF000000 /* Mask for error bits 31:24 */ >+#define HS_CRIT_TEMP 0x00000100 /* Bit 8 */ >+#define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */ > > /* Host Control Register */ > >@@ -1277,12 +1310,18 @@ typedef struct { /* FireFly BIU registe > #define MBX_DEL_LD_ENTRY 0x1D > #define MBX_RUN_PROGRAM 0x1E > #define MBX_SET_MASK 0x20 >-#define MBX_SET_SLIM 0x21 >+#define MBX_SET_VARIABLE 0x21 > #define MBX_UNREG_D_ID 0x23 > #define MBX_KILL_BOARD 0x24 > #define MBX_CONFIG_FARP 0x25 > #define MBX_BEACON 0x2A > #define MBX_HEARTBEAT 0x31 >+#define MBX_WRITE_VPARMS 0x32 >+#define MBX_ASYNCEVT_ENABLE 0x33 >+#define MBX_READ_EVENT_LOG_STATUS 0x37 >+#define MBX_READ_EVENT_LOG 0x38 >+#define MBX_WRITE_EVENT_LOG 0x39 >+ > > #define MBX_CONFIG_HBQ 0x7C > #define MBX_LOAD_AREA 0x81 >@@ -1297,7 +1336,7 @@ typedef struct { /* FireFly BIU registe > #define MBX_REG_VNPID 0x96 > #define MBX_UNREG_VNPID 0x97 > >-#define MBX_FLASH_WR_ULA 0x98 >+#define MBX_WRITE_WWN 0x98 > #define MBX_SET_DEBUG 0x99 > #define MBX_LOAD_EXP_ROM 0x9C > >@@ -1344,6 +1383,7 @@ typedef struct { /* FireFly BIU registe > > /* SLI_2 IOCB Command Set */ > >+#define CMD_ASYNC_STATUS 0x7C > #define CMD_RCV_SEQUENCE64_CX 0x81 > #define CMD_XMIT_SEQUENCE64_CR 0x82 > #define CMD_XMIT_SEQUENCE64_CX 0x83 >@@ -1368,13 +1408,29 @@ typedef struct { /* FireFly BIU registe > #define CMD_FCP_TRECEIVE64_CX 0xA1 > #define CMD_FCP_TRSP64_CX 0xA3 > >+#define CMD_QUE_XRI64_CX 0xB3 > #define CMD_IOCB_RCV_SEQ64_CX 0xB5 > #define CMD_IOCB_RCV_ELS64_CX 0xB7 >+#define CMD_IOCB_RET_XRI64_CX 0xB9 > #define CMD_IOCB_RCV_CONT64_CX 0xBB > > #define CMD_GEN_REQUEST64_CR 0xC2 > #define CMD_GEN_REQUEST64_CX 0xC3 > >+/* Unhandled SLI-3 Commands */ >+#define CMD_IOCB_XMIT_MSEQ64_CR 0xB0 >+#define CMD_IOCB_XMIT_MSEQ64_CX 0xB1 >+#define CMD_IOCB_RCV_SEQ_LIST64_CX 0xC1 >+#define CMD_IOCB_RCV_ELS_LIST64_CX 0xCD >+#define CMD_IOCB_CLOSE_EXTENDED_CN 0xB6 >+#define CMD_IOCB_ABORT_EXTENDED_CN 0xBA >+#define CMD_IOCB_RET_HBQE64_CN 0xCA >+#define CMD_IOCB_FCP_IBIDIR64_CR 0xAC >+#define CMD_IOCB_FCP_IBIDIR64_CX 0xAD >+#define CMD_IOCB_FCP_ITASKMGT64_CX 0xAF >+#define CMD_IOCB_LOGENTRY_CN 0x94 >+#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 >+ > #define CMD_MAX_IOCB_CMD 0xE6 > #define CMD_IOCB_MASK 0xff > >@@ -1401,11 +1457,14 @@ typedef struct { /* FireFly BIU registe > #define MBXERR_BAD_RCV_LENGTH 14 > #define MBXERR_DMA_ERROR 15 > #define MBXERR_ERROR 16 >+#define MBXERR_UNKNOWN_CMD 18 > #define MBX_NOT_FINISHED 255 > > #define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ > #define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */ > >+#define TEMPERATURE_OFFSET 0xB0 /* Slim offset for critical temperature event */ >+ > /* > * Begin Structure Definitions for Mailbox Commands > */ >@@ -1572,6 +1631,14 @@ typedef struct { > } un; > } BIU_DIAG_VAR; > >+/* Structure for MB command READ_EVENT_LOG (0x38) */ >+typedef struct { >+ uint32_t rsvd1; >+ uint32_t offset; >+ struct ulp_bde64 rcv_bde64; >+}READ_EVENT_LOG_VAR; >+ >+ > /* Structure for MB Command INIT_LINK (05) */ > > typedef struct { >@@ -2177,7 +2244,10 @@ typedef struct { > typedef struct { > uint32_t eventTag; /* Event tag */ > #ifdef __BIG_ENDIAN_BITFIELD >- uint32_t rsvd1:22; >+ uint32_t rsvd1:19; >+ uint32_t fa:1; >+ uint32_t mm:1; /* Menlo Maintenance mode enabled */ >+ uint32_t rx:1; > uint32_t pb:1; > uint32_t il:1; > uint32_t attType:8; >@@ -2185,7 +2255,10 @@ typedef struct { > uint32_t attType:8; > uint32_t il:1; > uint32_t pb:1; >- uint32_t rsvd1:22; >+ uint32_t rx:1; >+ uint32_t mm:1; >+ uint32_t fa:1; >+ uint32_t rsvd1:19; > #endif > > #define AT_RESERVED 0x00 /* Reserved - attType */ >@@ -2206,6 +2279,7 @@ typedef struct { > > #define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */ > #define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */ >+#define TOPOLOGY_LNK_MENLO_MAINTENANCE 0x05 /* maint mode zephtr to menlo */ > > union { > struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer >@@ -2266,6 +2340,14 @@ typedef struct { > uint32_t rsvd1; > } CLEAR_LA_VAR; > >+/* Structure for MB Command SET_SLIM (33) */ >+/* Values needed to set MAX_DMA_LENGTH parameter */ >+#define SLIM_VAR_MAX_DMA_LENGTH 0x100506 >+#define SLIM_VAL_MAX_DMA_512 0x0 >+#define SLIM_VAL_MAX_DMA_1024 0x1 >+#define SLIM_VAL_MAX_DMA_2048 0x2 >+#define SLIM_VAL_MAX_DMA_4096 0x3 >+ > /* Structure for MB Command DUMP */ > > typedef struct { >@@ -2300,6 +2382,37 @@ typedef struct { > #define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ > #define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ > >+/* Structure for MB Command UPDATE_CFG (0x1B) */ >+ >+struct update_cfg_var { >+#ifdef __BIG_ENDIAN_BITFIELD >+ uint32_t rsvd2:16; >+ uint32_t type:8; >+ uint32_t rsvd:1; >+ uint32_t ra:1; >+ uint32_t co:1; >+ uint32_t cv:1; >+ uint32_t req:4; >+ uint32_t entry_length:16; >+ uint32_t region_id:16; >+#else /* __LITTLE_ENDIAN_BITFIELD */ >+ uint32_t req:4; >+ uint32_t cv:1; >+ uint32_t co:1; >+ uint32_t ra:1; >+ uint32_t rsvd:1; >+ uint32_t type:8; >+ uint32_t rsvd2:16; >+ uint32_t region_id:16; >+ uint32_t entry_length:16; >+#endif >+ >+ uint32_t resp_info; >+ uint32_t byte_cnt; >+ uint32_t data_offset; >+}; >+ >+ > struct hbq_mask { > #ifdef __BIG_ENDIAN_BITFIELD > uint8_t tmatch; >@@ -2606,9 +2719,25 @@ typedef struct { > uint32_t IPAddress; > } CONFIG_FARP_VAR; > >+/* Structure for MB Command MBX_ASYNCEVT_ENABLE (0x33) */ >+ >+typedef struct { >+#ifdef __BIG_ENDIAN_BITFIELD >+ uint32_t rsvd:30; >+ uint32_t ring:2; /* Ring for ASYNC_EVENT iocb Bits 0-1*/ >+#else /* __LITTLE_ENDIAN */ >+ uint32_t ring:2; /* Ring for ASYNC_EVENT iocb Bits 0-1*/ >+ uint32_t rsvd:30; >+#endif >+} ASYNCEVT_ENABLE_VAR; >+ > /* Union of all Mailbox Command types */ > #define MAILBOX_CMD_WSIZE 32 > #define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t)) >+#define MAILBOX_EXT_WSIZE 512 >+#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t)) >+#define MAILBOX_HBA_EXT_OFFSET 0x100 >+#define MAILBOX_MAX_XMIT_SIZE 1024 > > typedef union { > uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/ >@@ -2642,9 +2771,12 @@ typedef union { > * NEW_FEATURE > */ > struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */ >+ struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/ > CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ > REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */ > UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */ >+ ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */ >+ READ_EVENT_LOG_VAR varRdEventLog; /* cmd = 0x38 (READ_EVENT_LOG) */ > } MAILVARIANTS; > > /* >@@ -2973,6 +3105,34 @@ typedef struct { > #endif > } RCV_ELS_REQ64; > >+/* IOCB Command template for RCV_SEQ64 */ >+struct rcv_seq64 { >+ struct ulp_bde64 elsReq; >+ uint32_t hbq_1; >+ uint32_t parmRo; >+#ifdef __BIG_ENDIAN_BITFIELD >+ uint32_t rctl:8; >+ uint32_t type:8; >+ uint32_t dfctl:8; >+ uint32_t ls:1; >+ uint32_t fs:1; >+ uint32_t rsvd2:3; >+ uint32_t si:1; >+ uint32_t bc:1; >+ uint32_t rsvd3:1; >+#else /* __LITTLE_ENDIAN_BITFIELD */ >+ uint32_t rsvd3:1; >+ uint32_t bc:1; >+ uint32_t si:1; >+ uint32_t rsvd2:3; >+ uint32_t fs:1; >+ uint32_t ls:1; >+ uint32_t dfctl:8; >+ uint32_t type:8; >+ uint32_t rctl:8; >+#endif >+}; >+ > /* IOCB Command template for all 64 bit FCP Initiator commands */ > typedef struct { > ULP_BDL bdl; >@@ -2987,6 +3147,21 @@ typedef struct { > uint32_t fcpt_Length; /* transfer ready for IWRITE */ > } FCPT_FIELDS64; > >+/* IOCB Command template for Async Status iocb commands */ >+typedef struct { >+ uint32_t rsvd[4]; >+ uint32_t param; >+#ifdef __BIG_ENDIAN_BITFIELD >+ uint16_t evt_code; /* High order bits word 5 */ >+ uint16_t sub_ctxt_tag; /* Low order bits word 5 */ >+#else /* __LITTLE_ENDIAN_BITFIELD */ >+ uint16_t sub_ctxt_tag; /* High order bits word 5 */ >+ uint16_t evt_code; /* Low order bits word 5 */ >+#endif >+} ASYNCSTAT_FIELDS; >+#define ASYNC_TEMP_WARN 0x100 >+#define ASYNC_TEMP_SAFE 0x101 >+ > /* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7) > or CMD_IOCB_RCV_SEQ64_CX (0xB5) */ > >@@ -3004,7 +3179,26 @@ struct rcv_sli3 { > struct ulp_bde64 bde2; > }; > >+/* Structure used for a single HBQ entry */ >+struct lpfc_hbq_entry { >+ struct ulp_bde64 bde; >+ uint32_t buffer_tag; >+}; > >+/* IOCB Command template for QUE_XRI64_CX (0xB3) command */ >+typedef struct { >+ struct lpfc_hbq_entry buff; >+ uint32_t rsvd; >+ uint32_t rsvd1; >+} QUE_XRI64_CX_FIELDS; >+ >+struct que_xri64cx_ext_fields { >+ uint32_t iotag64_low; >+ uint32_t iotag64_high; >+ uint32_t ebde_count; >+ uint32_t rsvd; >+ struct lpfc_hbq_entry buff[5]; >+}; > > typedef struct _IOCB { /* IOCB structure */ > union { >@@ -3028,6 +3222,9 @@ typedef struct _IOCB { /* IOCB structure > XMT_SEQ_FIELDS64 xseq64; /* XMIT / BCAST cmd */ > FCPI_FIELDS64 fcpi64; /* FCP 64 bit Initiator template */ > FCPT_FIELDS64 fcpt64; /* FCP 64 bit target template */ >+ ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ >+ QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ >+ struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ > > uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ > } un; >@@ -3085,6 +3282,10 @@ typedef struct _IOCB { /* IOCB structure > > union { > struct rcv_sli3 rcvsli3; /* words 8 - 15 */ >+ >+ /* words 8-31 used for que_xri_cx iocb */ >+ struct que_xri64cx_ext_fields que_xri64cx_ext_words; >+ > uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */ > } unsli3; > >@@ -3124,24 +3325,20 @@ typedef struct _IOCB { /* IOCB structure > > } IOCB_t; > >-/* Structure used for a single HBQ entry */ >-struct lpfc_hbq_entry { >- struct ulp_bde64 bde; >- uint32_t buffer_tag; >-}; >- > > #define SLI1_SLIM_SIZE (4 * 1024) > > /* Up to 498 IOCBs will fit into 16k >- * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384 >+ * 256 (MAILBOX_t) + 512 mailbox extension + >+ * 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384 > */ > #define SLI2_SLIM_SIZE (64 * 1024) > > /* Maximum IOCBs that will fit in SLI2 slim */ > #define MAX_SLI2_IOCB 498 > #define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \ >- (sizeof(MAILBOX_t) + sizeof(PCB_t))) >+ (sizeof(MAILBOX_t) + sizeof(PCB_t) + \ >+ sizeof(uint32_t) * MAILBOX_EXT_WSIZE)) > > /* HBQ entries are 4 words each = 4k */ > #define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \ >@@ -3149,6 +3346,7 @@ struct lpfc_hbq_entry { > > struct lpfc_sli2_slim { > MAILBOX_t mbx; >+ uint32_t mbx_ext_words[MAILBOX_EXT_WSIZE]; > PCB_t pcb; > IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE]; > }; >@@ -3172,6 +3370,8 @@ lpfc_is_LC_HBA(unsigned short device) > (device == PCI_DEVICE_ID_BSMB) || > (device == PCI_DEVICE_ID_ZMID) || > (device == PCI_DEVICE_ID_ZSMB) || >+ (device == PCI_DEVICE_ID_SAT_MID) || >+ (device == PCI_DEVICE_ID_SAT_SMB) || > (device == PCI_DEVICE_ID_RFLY)) > return 1; > else >@@ -3190,3 +3390,10 @@ lpfc_error_lost_link(IOCB_t *iocbp) > iocbp->un.ulpWord[4] == IOERR_LINK_DOWN || > iocbp->un.ulpWord[4] == IOERR_SLI_DOWN)); > } >+ >+#define MENLO_TRANSPORT_TYPE 0xfe >+#define MENLO_CONTEXT 0 >+#define MENLO_PU 3 >+#define MENLO_TIMEOUT 30 >+#define SETVAR_MLOMNT 0x103107 >+#define SETVAR_MLORST 0x103007 >diff -urpN a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c >--- a/drivers/scsi/lpfc/lpfc_init.c 2008-09-05 17:47:41.692246000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_init.c 2008-09-05 17:47:49.715876000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * Portions Copyright (C) 2004-2005 Christoph Hellwig * >@@ -43,7 +43,26 @@ > #include "lpfc_crtn.h" > #include "lpfc_vport.h" > #include "lpfc_version.h" >- >+#include "lpfc_auth_access.h" >+#include "lpfc_ioctl.h" >+#include "lpfc_security.h" >+#include "lpfc_compat.h" >+ >+#include <net/sock.h> >+#include <linux/netlink.h> >+ >+extern struct notifier_block lpfc_fc_netlink_notifier; >+extern char security_work_q_name[KOBJ_NAME_LEN]; >+extern struct workqueue_struct *security_work_q; >+extern struct sock *fc_nl_sock; >+extern struct list_head fc_security_user_list; >+extern int fc_service_state; >+void lpfc_fc_sc_security_online(struct work_struct *work); >+void lpfc_fc_sc_security_offline(struct work_struct *work); >+void lpfc_fc_nl_rcv(struct sock *sk, int len); >+void lpfc_fc_nl_rcv_msg(struct sk_buff *); >+int lpfc_fc_queue_security_work(struct lpfc_vport *, struct work_struct *); >+int lpfc_fc_nl_rcv_nl_event(struct notifier_block *, unsigned long , void *); > static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); > static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); > static int lpfc_post_rcv_buf(struct lpfc_hba *); >@@ -52,6 +71,29 @@ static struct scsi_transport_template *l > static struct scsi_transport_template *lpfc_vport_transport_template = NULL; > static DEFINE_IDR(lpfc_hba_index); > >+ >+ >+ >+/* >+ * lpfc_hba_max_vpi - Get the maximum supported VPI for an HBA >+ * @device: The PCI device ID for this HBA >+ * >+ * Description: >+ * This routine will return the maximum supported VPI limit for each HBA. In >+ * most cases the maximum VPI limit will be 0xFFFF, which indicates that the >+ * driver supports whatever the HBA can support. In some cases the driver >+ * supports fewer VPI that the HBA supports. >+ */ >+static inline uint16_t >+lpfc_hba_max_vpi(unsigned short device) >+{ >+ if ((device == PCI_DEVICE_ID_HELIOS) || >+ (device == PCI_DEVICE_ID_ZEPHYR)) >+ return LPFC_INTR_VPI; >+ else >+ return LPFC_MAX_VPI; >+} >+ > /************************************************************************/ > /* */ > /* lpfc_config_port_prep */ >@@ -145,8 +187,10 @@ lpfc_config_port_prep(struct lpfc_hba *p > return -ERESTART; > } > >- if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) >+ if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { >+ mempool_free(pmb, phba->mbox_mem_pool); > return -EINVAL; >+ } > > /* Save information as VPD data */ > vp->rev.rBit = 1; >@@ -178,12 +222,9 @@ lpfc_config_port_prep(struct lpfc_hba *p > sizeof (phba->RandomData)); > > /* Get adapter VPD information */ >- pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL); >- if (!pmb->context2) >- goto out_free_mbox; > lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); > if (!lpfc_vpd_data) >- goto out_free_context2; >+ goto out_free_mbox; > > do { > lpfc_dump_mem(phba, pmb, offset); >@@ -198,20 +239,32 @@ lpfc_config_port_prep(struct lpfc_hba *p > } > if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) > mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; >- lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, >+ >+ lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, >+ lpfc_vpd_data + offset, > mb->un.varDmp.word_cnt); > offset += mb->un.varDmp.word_cnt; > } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); > lpfc_parse_vpd(phba, lpfc_vpd_data, offset); > > kfree(lpfc_vpd_data); >-out_free_context2: >- kfree(pmb->context2); > out_free_mbox: > mempool_free(pmb, phba->mbox_mem_pool); > return 0; > } > >+/* Completion handler for config async event mailbox command. */ >+static void >+lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) >+{ >+ if (pmboxq->mb.mbxStatus == MBX_SUCCESS) >+ phba->temp_sensor_support = 1; >+ else >+ phba->temp_sensor_support = 0; >+ mempool_free(pmboxq, phba->mbox_mem_pool); >+ return; >+} >+ > /************************************************************************/ > /* */ > /* lpfc_config_port_post */ >@@ -234,6 +287,15 @@ lpfc_config_port_post(struct lpfc_hba *p > int i, j; > int rc; > >+ spin_lock_irq(&phba->hbalock); >+ /* >+ * If the Config port completed correctly the HBA is not >+ * over heated any more. >+ */ >+ if (phba->over_temp_state == HBA_OVER_TEMP) >+ phba->over_temp_state = HBA_NORMAL_TEMP; >+ spin_unlock_irq(&phba->hbalock); >+ > pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); > if (!pmb) { > phba->link_state = LPFC_HBA_ERROR; >@@ -343,7 +405,7 @@ lpfc_config_port_post(struct lpfc_hba *p > > phba->link_state = LPFC_LINK_DOWN; > >- /* Only process IOCBs on ring 0 till hba_state is READY */ >+ /* Only process IOCBs on ELS ring till hba_state is READY */ > if (psli->ring[psli->extra_ring].cmdringaddr) > psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; > if (psli->ring[psli->fcp_ring].cmdringaddr) >@@ -385,14 +447,27 @@ lpfc_config_port_post(struct lpfc_hba *p > phba->hb_outstanding = 0; > phba->last_completion_time = jiffies; > >+ if (vport->cfg_enable_auth) { >+ if (lpfc_security_service_state == SECURITY_OFFLINE) { >+ lpfc_printf_log(vport->phba, KERN_ERR, LOG_SECURITY, >+ "1000 Authentication is enabled but " >+ "authentication service is not running\n"); >+ vport->auth.auth_mode = FC_AUTHMODE_UNKNOWN; >+ phba->link_state = LPFC_HBA_ERROR; >+ mempool_free( pmb, phba->mbox_mem_pool); >+ return 0; >+ } >+ } >+ > lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); > pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; >- pmb->vport = vport; >- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); > lpfc_set_loopback_flag(phba); >+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); > if (rc != MBX_SUCCESS) { >- lpfc_printf_log(phba, KERN_ERR, LOG_INIT, >- "0454 Adapter failed to init, mbxCmd x%x " >+ lpfc_printf_log(phba, >+ KERN_ERR, >+ LOG_INIT, >+ "1001 Adapter failed to init, mbxCmd x%x " > "INIT_LINK, mbxStatus x%x\n", > mb->mbxCommand, mb->mbxStatus); > >@@ -408,9 +483,24 @@ lpfc_config_port_post(struct lpfc_hba *p > mempool_free(pmb, phba->mbox_mem_pool); > return -EIO; > } >+ > /* MBOX buffer will be freed in mbox compl */ >+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); >+ lpfc_config_async(phba, pmb, LPFC_ELS_RING); >+ pmb->mbox_cmpl = lpfc_config_async_cmpl; >+ pmb->vport = phba->pport; >+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); > >- return (0); >+ if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { >+ lpfc_printf_log(phba, >+ KERN_ERR, >+ LOG_INIT, >+ "0456 Adapter failed to issue " >+ "ASYNCEVT_ENABLE mbox status x%x \n.", >+ rc); >+ mempool_free(pmb, phba->mbox_mem_pool); >+ } >+ return 0; > } > > /************************************************************************/ >@@ -426,11 +516,21 @@ lpfc_config_port_post(struct lpfc_hba *p > int > lpfc_hba_down_prep(struct lpfc_hba *phba) > { >+ struct lpfc_vport **vports; >+ int i; > /* Disable interrupts */ > writel(0, phba->HCregaddr); > readl(phba->HCregaddr); /* flush */ > >- lpfc_cleanup_discovery_resources(phba->pport); >+ if (phba->pport->load_flag & FC_UNLOADING) >+ lpfc_cleanup_discovery_resources(phba->pport); >+ else { >+ vports = lpfc_create_vport_work_array(phba); >+ if (vports != NULL) >+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) >+ lpfc_cleanup_discovery_resources(vports[i]); >+ lpfc_destroy_vport_work_array(phba, vports); >+ } > return 0; > } > >@@ -449,6 +549,9 @@ lpfc_hba_down_post(struct lpfc_hba *phba > struct lpfc_sli *psli = &phba->sli; > struct lpfc_sli_ring *pring; > struct lpfc_dmabuf *mp, *next_mp; >+ struct lpfc_iocbq *iocb; >+ IOCB_t *cmd = NULL; >+ LIST_HEAD(completions); > int i; > > if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) >@@ -464,10 +567,36 @@ lpfc_hba_down_post(struct lpfc_hba *phba > } > } > >+ spin_lock_irq(&phba->hbalock); > for (i = 0; i < psli->num_rings; i++) { > pring = &psli->ring[i]; >+ >+ /* At this point in time the HBA is either reset or DOA. Either >+ * way, nothing should be on txcmplq as it will NEVER complete. >+ */ >+ list_splice_init(&pring->txcmplq, &completions); >+ pring->txcmplq_cnt = 0; >+ spin_unlock_irq(&phba->hbalock); >+ >+ while (!list_empty(&completions)) { >+ iocb = list_get_first(&completions, struct lpfc_iocbq, >+ list); >+ cmd = &iocb->iocb; >+ list_del_init(&iocb->list); >+ >+ if (!iocb->iocb_cmpl) >+ lpfc_sli_release_iocbq(phba, iocb); >+ else { >+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT; >+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; >+ (iocb->iocb_cmpl) (phba, iocb, iocb); >+ } >+ } >+ > lpfc_sli_abort_iocb_ring(phba, pring); >+ spin_lock_irq(&phba->hbalock); > } >+ spin_unlock_irq(&phba->hbalock); > > return 0; > } >@@ -477,16 +606,18 @@ void > lpfc_hb_timeout(unsigned long ptr) > { > struct lpfc_hba *phba; >+ uint32_t tmo_posted; > unsigned long iflag; > > phba = (struct lpfc_hba *)ptr; > spin_lock_irqsave(&phba->pport->work_port_lock, iflag); >- if (!(phba->pport->work_port_events & WORKER_HB_TMO)) >+ tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; >+ if (!tmo_posted) > phba->pport->work_port_events |= WORKER_HB_TMO; > spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); > >- if (phba->work_wait) >- wake_up(phba->work_wait); >+ if (!tmo_posted) >+ lpfc_worker_wake_up(phba); > return; > } > >@@ -512,8 +643,10 @@ void > lpfc_hb_timeout_handler(struct lpfc_hba *phba) > { > LPFC_MBOXQ_t *pmboxq; >+ struct lpfc_dmabuf *buf_ptr; > int retval; > struct lpfc_sli *psli = &phba->sli; >+ LIST_HEAD(completions); > > if ((phba->link_state == LPFC_HBA_ERROR) || > (phba->pport->load_flag & FC_UNLOADING) || >@@ -540,49 +673,88 @@ lpfc_hb_timeout_handler(struct lpfc_hba > } > spin_unlock_irq(&phba->pport->work_port_lock); > >- /* If there is no heart beat outstanding, issue a heartbeat command */ >- if (!phba->hb_outstanding) { >- pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); >- if (!pmboxq) { >- mod_timer(&phba->hb_tmofunc, >- jiffies + HZ * LPFC_HB_MBOX_INTERVAL); >- return; >+ if (phba->elsbuf_cnt && >+ (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { >+ spin_lock_irq(&phba->hbalock); >+ list_splice_init(&phba->elsbuf, &completions); >+ phba->elsbuf_cnt = 0; >+ phba->elsbuf_prev_cnt = 0; >+ spin_unlock_irq(&phba->hbalock); >+ >+ while (!list_empty(&completions)) { >+ list_remove_head(&completions, buf_ptr, >+ struct lpfc_dmabuf, list); >+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); >+ kfree(buf_ptr); > } >+ } >+ phba->elsbuf_prev_cnt = phba->elsbuf_cnt; > >- lpfc_heart_beat(phba, pmboxq); >- pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; >- pmboxq->vport = phba->pport; >- retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); >+ /* If there is no heart beat outstanding, issue a heartbeat command */ >+ if (phba->cfg_enable_hba_heartbeat) { >+ if (!phba->hb_outstanding) { >+ pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); >+ if (!pmboxq) { >+ mod_timer(&phba->hb_tmofunc, >+ jiffies + HZ * LPFC_HB_MBOX_INTERVAL); >+ return; >+ } > >- if (retval != MBX_BUSY && retval != MBX_SUCCESS) { >- mempool_free(pmboxq, phba->mbox_mem_pool); >+ lpfc_heart_beat(phba, pmboxq); >+ pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; >+ pmboxq->vport = phba->pport; >+ retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); >+ >+ if (retval != MBX_BUSY && retval != MBX_SUCCESS) { >+ mempool_free(pmboxq, phba->mbox_mem_pool); >+ mod_timer(&phba->hb_tmofunc, >+ jiffies + HZ * LPFC_HB_MBOX_INTERVAL); >+ return; >+ } > mod_timer(&phba->hb_tmofunc, >- jiffies + HZ * LPFC_HB_MBOX_INTERVAL); >+ jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); >+ phba->hb_outstanding = 1; > return; >+ } else { >+ /* >+ * If heart beat timeout called with hb_outstanding set >+ * we need to take the HBA offline. >+ */ >+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, >+ "0459 Adapter heartbeat failure, " >+ "taking this port offline.\n"); >+ >+ spin_lock_irq(&phba->hbalock); >+ psli->sli_flag &= ~LPFC_SLI2_ACTIVE; >+ spin_unlock_irq(&phba->hbalock); >+ >+ lpfc_offline_prep(phba); >+ lpfc_offline(phba); >+ lpfc_unblock_mgmt_io(phba); >+ phba->link_state = LPFC_HBA_ERROR; >+ lpfc_hba_down_post(phba); > } >- mod_timer(&phba->hb_tmofunc, >- jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); >- phba->hb_outstanding = 1; >- return; >- } else { >- /* >- * If heart beat timeout called with hb_outstanding set we >- * need to take the HBA offline. >- */ >- lpfc_printf_log(phba, KERN_ERR, LOG_INIT, >- "0459 Adapter heartbeat failure, taking " >- "this port offline.\n"); >+ } >+} > >- spin_lock_irq(&phba->hbalock); >- psli->sli_flag &= ~LPFC_SLI2_ACTIVE; >- spin_unlock_irq(&phba->hbalock); >+static void >+lpfc_offline_eratt(struct lpfc_hba *phba) >+{ >+ struct lpfc_sli *psli = &phba->sli; > >- lpfc_offline_prep(phba); >- lpfc_offline(phba); >- lpfc_unblock_mgmt_io(phba); >- phba->link_state = LPFC_HBA_ERROR; >- lpfc_hba_down_post(phba); >- } >+ spin_lock_irq(&phba->hbalock); >+ psli->sli_flag &= ~LPFC_SLI2_ACTIVE; >+ spin_unlock_irq(&phba->hbalock); >+ lpfc_offline_prep(phba); >+ >+ lpfc_offline(phba); >+ lpfc_reset_barrier(phba); >+ lpfc_sli_brdreset(phba); >+ lpfc_hba_down_post(phba); >+ lpfc_sli_brdready(phba, HS_MBRDY); >+ lpfc_unblock_mgmt_io(phba); >+ phba->link_state = LPFC_HBA_ERROR; >+ return; > } > > /************************************************************************/ >@@ -599,35 +771,28 @@ lpfc_handle_eratt(struct lpfc_hba *phba) > struct lpfc_vport *vport = phba->pport; > struct lpfc_sli *psli = &phba->sli; > struct lpfc_sli_ring *pring; >- struct lpfc_vport **vports; > uint32_t event_data; >+ unsigned long temperature; >+ struct temp_event temp_event_data; > struct Scsi_Host *shost; >- int i; > > /* If the pci channel is offline, ignore possible errors, > * since we cannot communicate with the pci card anyway. */ > if (pci_channel_offline(phba->pcidev)) > return; > >- if (phba->work_hs & HS_FFER6 || >- phba->work_hs & HS_FFER5) { >+ /* If resets are disabled then leave the HBA alone and return */ >+ if (!phba->cfg_enable_hba_reset) >+ return; >+ >+ if (phba->work_hs & HS_FFER6) { > /* Re-establishing Link */ > lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, > "1301 Re-establishing Link " > "Data: x%x x%x x%x\n", > phba->work_hs, > phba->work_status[0], phba->work_status[1]); >- vports = lpfc_create_vport_work_array(phba); >- if (vports != NULL) >- for(i = 0; >- i < LPFC_MAX_VPORTS && vports[i] != NULL; >- i++){ >- shost = lpfc_shost_from_vport(vports[i]); >- spin_lock_irq(shost->host_lock); >- vports[i]->fc_flag |= FC_ESTABLISH_LINK; >- spin_unlock_irq(shost->host_lock); >- } >- lpfc_destroy_vport_work_array(vports); >+ > spin_lock_irq(&phba->hbalock); > psli->sli_flag &= ~LPFC_SLI2_ACTIVE; > spin_unlock_irq(&phba->hbalock); >@@ -641,7 +806,6 @@ lpfc_handle_eratt(struct lpfc_hba *phba) > pring = &psli->ring[psli->fcp_ring]; > lpfc_sli_abort_iocb_ring(phba, pring); > >- > /* > * There was a firmware error. Take the hba offline and then > * attempt to restart it. >@@ -650,11 +814,35 @@ lpfc_handle_eratt(struct lpfc_hba *phba) > lpfc_offline(phba); > lpfc_sli_brdrestart(phba); > if (lpfc_online(phba) == 0) { /* Initialize the HBA */ >- mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); > lpfc_unblock_mgmt_io(phba); > return; > } > lpfc_unblock_mgmt_io(phba); >+ } else if (phba->work_hs & HS_CRIT_TEMP) { >+ temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); >+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; >+ temp_event_data.event_code = LPFC_CRIT_TEMP; >+ temp_event_data.data = (uint32_t)temperature; >+ >+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, >+ "0406 Adapter maximum temperature exceeded " >+ "(%ld), taking this port offline " >+ "Data: x%x x%x x%x\n", >+ temperature, phba->work_hs, >+ phba->work_status[0], phba->work_status[1]); >+ >+ shost = lpfc_shost_from_vport(phba->pport); >+ fc_host_post_vendor_event(shost, fc_get_event_number(), >+ sizeof(temp_event_data), >+ (char *) &temp_event_data, >+ SCSI_NL_VID_TYPE_PCI >+ | PCI_VENDOR_ID_EMULEX); >+ >+ spin_lock_irq(&phba->hbalock); >+ phba->over_temp_state = HBA_OVER_TEMP; >+ spin_unlock_irq(&phba->hbalock); >+ lpfc_offline_eratt(phba); >+ > } else { > /* The if clause above forces this code path when the status > * failure is a value other than FFER6. Do not call the offline >@@ -672,14 +860,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba) > sizeof(event_data), (char *) &event_data, > SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); > >- spin_lock_irq(&phba->hbalock); >- psli->sli_flag &= ~LPFC_SLI2_ACTIVE; >- spin_unlock_irq(&phba->hbalock); >- lpfc_offline_prep(phba); >- lpfc_offline(phba); >- lpfc_unblock_mgmt_io(phba); >- phba->link_state = LPFC_HBA_ERROR; >- lpfc_hba_down_post(phba); >+ lpfc_offline_eratt(phba); > } > } > >@@ -699,21 +880,25 @@ lpfc_handle_latt(struct lpfc_hba *phba) > LPFC_MBOXQ_t *pmb; > volatile uint32_t control; > struct lpfc_dmabuf *mp; >- int rc = -ENOMEM; >+ int rc = 0; > > pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); >- if (!pmb) >+ if (!pmb) { >+ rc = 1; > goto lpfc_handle_latt_err_exit; >+ } > > mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); >- if (!mp) >+ if (!mp) { >+ rc = 2; > goto lpfc_handle_latt_free_pmb; >+ } > > mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); >- if (!mp->virt) >+ if (!mp->virt) { >+ rc = 3; > goto lpfc_handle_latt_free_mp; >- >- rc = -EIO; >+ } > > /* Cleanup any outstanding ELS commands */ > lpfc_els_flush_all_cmd(phba); >@@ -722,9 +907,13 @@ lpfc_handle_latt(struct lpfc_hba *phba) > lpfc_read_la(phba, pmb, mp); > pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; > pmb->vport = vport; >- rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); >- if (rc == MBX_NOT_FINISHED) >+ /* Block ELS IOCBs until we have processed this mbox command */ >+ phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; >+ rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); >+ if (rc == MBX_NOT_FINISHED) { >+ rc = 4; > goto lpfc_handle_latt_free_mbuf; >+ } > > /* Clear Link Attention in HA REG */ > spin_lock_irq(&phba->hbalock); >@@ -735,6 +924,7 @@ lpfc_handle_latt(struct lpfc_hba *phba) > return; > > lpfc_handle_latt_free_mbuf: >+ phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; > lpfc_mbuf_free(phba, mp->virt, mp->phys); > lpfc_handle_latt_free_mp: > kfree(mp); >@@ -756,10 +946,8 @@ lpfc_handle_latt_err_exit: > lpfc_linkdown(phba); > phba->link_state = LPFC_HBA_ERROR; > >- /* The other case is an error from issue_mbox */ >- if (rc == -ENOMEM) >- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, >- "0300 READ_LA: no buffers\n"); >+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, >+ "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); > > return; > } >@@ -912,6 +1100,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba > lpfc_vpd_t *vp; > uint16_t dev_id = phba->pcidev->device; > int max_speed; >+ int GE = 0; > struct { > char * name; > int max_speed; >@@ -1043,6 +1232,19 @@ lpfc_get_hba_model_desc(struct lpfc_hba > case PCI_DEVICE_ID_SAT_S: > m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; > break; >+ case PCI_DEVICE_ID_HORNET: >+ m = (typeof(m)){"LP21000", max_speed, "PCIe"}; >+ GE = 1; >+ break; >+ case PCI_DEVICE_ID_PROTEUS_VF: >+ m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; >+ break; >+ case PCI_DEVICE_ID_PROTEUS_PF: >+ m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; >+ break; >+ case PCI_DEVICE_ID_PROTEUS_S: >+ m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; >+ break; > default: > m = (typeof(m)){ NULL }; > break; >@@ -1052,8 +1254,11 @@ lpfc_get_hba_model_desc(struct lpfc_hba > snprintf(mdp, 79,"%s", m.name); > if (descp && descp[0] == '\0') > snprintf(descp, 255, >- "Emulex %s %dGb %s Fibre Channel Adapter", >- m.name, m.max_speed, m.bus); >+ "Emulex %s %d%s %s %s", >+ m.name, m.max_speed, >+ (GE) ? "GE":"Gb", >+ m.bus, >+ (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); > } > > /**************************************************/ >@@ -1280,41 +1485,82 @@ lpfc_hba_init(struct lpfc_hba *phba, uin > kfree(HashWorking); > } > >-static void >+void > lpfc_cleanup(struct lpfc_vport *vport) > { >+ struct lpfc_hba *phba = vport->phba; > struct lpfc_nodelist *ndlp, *next_ndlp; >+ int i = 0; > >- /* clean up phba - lpfc specific */ >- lpfc_can_disctmo(vport); >- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) >- lpfc_nlp_put(ndlp); >- return; >-} >+ if (phba->link_state > LPFC_LINK_DOWN) >+ lpfc_port_link_failure(vport); > >-static void >-lpfc_establish_link_tmo(unsigned long ptr) >-{ >- struct lpfc_hba *phba = (struct lpfc_hba *) ptr; >- struct lpfc_vport **vports; >- unsigned long iflag; >- int i; >+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { >+ if (!NLP_CHK_NODE_ACT(ndlp)) { >+ ndlp = lpfc_enable_node(vport, ndlp, >+ NLP_STE_UNUSED_NODE); >+ if (!ndlp) >+ continue; >+ spin_lock_irq(&phba->ndlp_lock); >+ NLP_SET_FREE_REQ(ndlp); >+ spin_unlock_irq(&phba->ndlp_lock); >+ /* Trigger the release of the ndlp memory */ >+ lpfc_nlp_put(ndlp); >+ continue; >+ } >+ spin_lock_irq(&phba->ndlp_lock); >+ if (NLP_CHK_FREE_REQ(ndlp)) { >+ /* The ndlp should not be in memory free mode already */ >+ spin_unlock_irq(&phba->ndlp_lock); >+ continue; >+ } else >+ /* Indicate request for freeing ndlp memory */ >+ NLP_SET_FREE_REQ(ndlp); >+ spin_unlock_irq(&phba->ndlp_lock); >+ >+ if (vport->port_type != LPFC_PHYSICAL_PORT && >+ ndlp->nlp_DID == Fabric_DID) { >+ /* Just free up ndlp with Fabric_DID for vports */ >+ lpfc_nlp_put(ndlp); >+ continue; >+ } >+ >+ if (ndlp->nlp_type & NLP_FABRIC) >+ lpfc_disc_state_machine(vport, ndlp, NULL, >+ NLP_EVT_DEVICE_RECOVERY); >+ >+ lpfc_disc_state_machine(vport, ndlp, NULL, >+ NLP_EVT_DEVICE_RM); > >- /* Re-establishing Link, timer expired */ >- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, >- "1300 Re-establishing Link, timer expired " >- "Data: x%x x%x\n", >- phba->pport->fc_flag, phba->pport->port_state); >- vports = lpfc_create_vport_work_array(phba); >- if (vports != NULL) >- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { >- struct Scsi_Host *shost; >- shost = lpfc_shost_from_vport(vports[i]); >- spin_lock_irqsave(shost->host_lock, iflag); >- vports[i]->fc_flag &= ~FC_ESTABLISH_LINK; >- spin_unlock_irqrestore(shost->host_lock, iflag); >+ } >+ >+ /* At this point, ALL ndlp's should be gone >+ * because of the previous NLP_EVT_DEVICE_RM. >+ * Lets wait for this to happen, if needed. >+ */ >+ while (!list_empty(&vport->fc_nodes)) { >+ >+ if (i++ > 3000) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, >+ "0233 Nodelist not empty\n"); >+ list_for_each_entry_safe(ndlp, next_ndlp, >+ &vport->fc_nodes, nlp_listp) { >+ lpfc_printf_vlog(ndlp->vport, KERN_ERR, >+ LOG_NODE, >+ "0282 did:x%x ndlp:x%p " >+ "usgmap:x%x refcnt:%d\n", >+ ndlp->nlp_DID, (void *)ndlp, >+ ndlp->nlp_usg_map, >+ atomic_read( >+ &ndlp->kref.refcount)); >+ } >+ break; > } >- lpfc_destroy_vport_work_array(vports); >+ >+ /* Wait for any activity on ndlps to settle */ >+ msleep(10); >+ } >+ return; > } > > void >@@ -1330,7 +1576,6 @@ static void > lpfc_stop_phba_timers(struct lpfc_hba *phba) > { > del_timer_sync(&phba->fcp_poll_timer); >- del_timer_sync(&phba->fc_estabtmo); > lpfc_stop_vport_timers(phba->pport); > del_timer_sync(&phba->sli.mbox_tmo); > del_timer_sync(&phba->fabric_block_timer); >@@ -1369,7 +1614,7 @@ lpfc_online(struct lpfc_hba *phba) > > vports = lpfc_create_vport_work_array(phba); > if (vports != NULL) >- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { >+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { > struct Scsi_Host *shost; > shost = lpfc_shost_from_vport(vports[i]); > spin_lock_irq(shost->host_lock); >@@ -1378,7 +1623,7 @@ lpfc_online(struct lpfc_hba *phba) > vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; > spin_unlock_irq(shost->host_lock); > } >- lpfc_destroy_vport_work_array(vports); >+ lpfc_destroy_vport_work_array(phba, vports); > > lpfc_unblock_mgmt_io(phba); > return 0; >@@ -1409,6 +1654,8 @@ lpfc_offline_prep(struct lpfc_hba * phba > { > struct lpfc_vport *vport = phba->pport; > struct lpfc_nodelist *ndlp, *next_ndlp; >+ struct lpfc_vport **vports; >+ int i; > > if (vport->fc_flag & FC_OFFLINE_MODE) > return; >@@ -1417,10 +1664,36 @@ lpfc_offline_prep(struct lpfc_hba * phba > > lpfc_linkdown(phba); > >- /* Issue an unreg_login to all nodes */ >- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) >- if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) >- lpfc_unreg_rpi(vport, ndlp); >+ /* Issue an unreg_login to all nodes on all vports */ >+ vports = lpfc_create_vport_work_array(phba); >+ if (vports != NULL) { >+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { >+ struct Scsi_Host *shost; >+ >+ if (vports[i]->load_flag & FC_UNLOADING) >+ continue; >+ shost = lpfc_shost_from_vport(vports[i]); >+ list_for_each_entry_safe(ndlp, next_ndlp, >+ &vports[i]->fc_nodes, >+ nlp_listp) { >+ if (!NLP_CHK_NODE_ACT(ndlp)) >+ continue; >+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) >+ continue; >+ if (ndlp->nlp_type & NLP_FABRIC) { >+ lpfc_disc_state_machine(vports[i], ndlp, >+ NULL, NLP_EVT_DEVICE_RECOVERY); >+ lpfc_disc_state_machine(vports[i], ndlp, >+ NULL, NLP_EVT_DEVICE_RM); >+ } >+ spin_lock_irq(shost->host_lock); >+ ndlp->nlp_flag &= ~NLP_NPR_ADISC; >+ spin_unlock_irq(shost->host_lock); >+ lpfc_unreg_rpi(vports[i], ndlp); >+ } >+ } >+ } >+ lpfc_destroy_vport_work_array(phba, vports); > > lpfc_sli_flush_mbox_queue(phba); > } >@@ -1439,9 +1712,9 @@ lpfc_offline(struct lpfc_hba *phba) > lpfc_stop_phba_timers(phba); > vports = lpfc_create_vport_work_array(phba); > if (vports != NULL) >- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) >+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) > lpfc_stop_vport_timers(vports[i]); >- lpfc_destroy_vport_work_array(vports); >+ lpfc_destroy_vport_work_array(phba, vports); > lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, > "0460 Bring Adapter offline\n"); > /* Bring down the SLI Layer and cleanup. The HBA is offline >@@ -1452,15 +1725,14 @@ lpfc_offline(struct lpfc_hba *phba) > spin_unlock_irq(&phba->hbalock); > vports = lpfc_create_vport_work_array(phba); > if (vports != NULL) >- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { >+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { > shost = lpfc_shost_from_vport(vports[i]); >- lpfc_cleanup(vports[i]); > spin_lock_irq(shost->host_lock); > vports[i]->work_port_events = 0; > vports[i]->fc_flag |= FC_OFFLINE_MODE; > spin_unlock_irq(shost->host_lock); > } >- lpfc_destroy_vport_work_array(vports); >+ lpfc_destroy_vport_work_array(phba, vports); > } > > /****************************************************************************** >@@ -1515,9 +1787,9 @@ lpfc_create_port(struct lpfc_hba *phba, > > vport = (struct lpfc_vport *) shost->hostdata; > vport->phba = phba; >- > vport->load_flag |= FC_LOADING; > vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; >+ vport->fc_rscn_flush = 0; > > lpfc_get_vport_cfgparam(vport); > shost->unique_id = instance; >@@ -1556,8 +1828,17 @@ lpfc_create_port(struct lpfc_hba *phba, > vport->els_tmofunc.data = (unsigned long)vport; > > error = scsi_add_host(shost, dev); >- if (error) >+ if (error || !shost->shost_data || !shost->work_q) > goto out_put_shost; >+ vport->auth.challenge = NULL; >+ vport->auth.challenge_len = 0; >+ vport->auth.dh_pub_key = NULL; >+ vport->auth.dh_pub_key_len = 0; >+ >+ INIT_WORK(&vport->sc_online_work, lpfc_fc_sc_security_online); >+ INIT_WORK(&vport->sc_offline_work, lpfc_fc_sc_security_offline); >+ INIT_LIST_HEAD(&vport->sc_users); >+ INIT_LIST_HEAD(&vport->sc_response_wait_queue); > > spin_lock_irq(&phba->hbalock); > list_add_tail(&vport->listentry, &phba->port_list); >@@ -1674,6 +1955,8 @@ void lpfc_host_attrib_init(struct Scsi_H > fc_host_supported_speeds(shost) = 0; > if (phba->lmt & LMT_10Gb) > fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; >+ if (phba->lmt & LMT_8Gb) >+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; > if (phba->lmt & LMT_4Gb) > fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; > if (phba->lmt & LMT_2Gb) >@@ -1697,6 +1980,73 @@ void lpfc_host_attrib_init(struct Scsi_H > spin_unlock_irq(shost->host_lock); > } > >+void >+lpfc_setup_max_dma_length(struct lpfc_hba * phba) >+{ >+ struct pci_dev *pdev = phba->pcidev; >+ struct pci_bus *bus = pdev->bus; >+ uint8_t rev; >+ >+ while (bus) { >+ /* >+ * 0x7450 == PCI_DEVICE_ID_AMD_8131_BRIDGE for 2.6 kernels >+ * 0x7450 == PCI_DEVICE_ID_AMD_8131_APIC for 2.4 kernels >+ */ >+ if ( bus->self && >+ (bus->self->vendor == PCI_VENDOR_ID_AMD) && >+ (bus->self->device == 0x7450)) { >+ pci_read_config_byte(bus->self, 0x08, &rev); >+ if (rev == 0x13) { >+ /* >+ * If set a value in module paramter, >+ * use that value. >+ */ >+ if (phba->cfg_pci_max_read == 2048) >+ phba->cfg_pci_max_read = 1024; >+ return; >+ } >+ } >+ bus = bus->parent; >+ } >+ return; >+} >+ >+static int >+lpfc_enable_msix(struct lpfc_hba *phba) >+{ >+ int error; >+ >+ phba->msix_entries[0].entry = 0; >+ phba->msix_entries[0].vector = 0; >+ >+ error = pci_enable_msix(phba->pcidev, phba->msix_entries, >+ ARRAY_SIZE(phba->msix_entries)); >+ if (error) { >+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, >+ "0420 Enable MSI-X failed (%d), continuing " >+ "with MSI\n", error); >+ pci_disable_msix(phba->pcidev); >+ return error; >+ } >+ >+ error = request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0, >+ LPFC_DRIVER_NAME, phba); >+ if (error) { >+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, >+ "0421 MSI-X request_irq failed (%d), " >+ "continuing with MSI\n", error); >+ pci_disable_msix(phba->pcidev); >+ } >+ return error; >+} >+ >+static void >+lpfc_disable_msix(struct lpfc_hba *phba) >+{ >+ free_irq(phba->msix_entries[0].vector, phba); >+ pci_disable_msix(phba->pcidev); >+} >+ > static int __devinit > lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) > { >@@ -1707,13 +2057,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, > struct Scsi_Host *shost = NULL; > void *ptr; > unsigned long bar0map_len, bar2map_len; >- int error = -ENODEV; >+ int error = -ENODEV, retval; > int i, hbq_count; > uint16_t iotag; >+ int bars = pci_select_bars(pdev, IORESOURCE_MEM); > >- if (pci_enable_device(pdev)) >+ if (pci_enable_device_bars(pdev, bars)) > goto out; >- if (pci_request_regions(pdev, LPFC_DRIVER_NAME)) >+ if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) > goto out_disable_device; > > phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); >@@ -1722,25 +2073,31 @@ lpfc_pci_probe_one(struct pci_dev *pdev, > > spin_lock_init(&phba->hbalock); > >+ /* Initialize ndlp management spinlock */ >+ spin_lock_init(&phba->ndlp_lock); >+ > phba->pcidev = pdev; >+ if (!pdev->error_state) >+ pdev->error_state = pci_channel_io_normal; > > /* Assign an unused board number */ > if ((phba->brd_no = lpfc_get_instance()) < 0) > goto out_free_phba; > > INIT_LIST_HEAD(&phba->port_list); >+ init_waitqueue_head(&phba->wait_4_mlo_m_q); > /* > * Get all the module params for configuring this host and then > * establish the host. > */ > lpfc_get_cfgparam(phba); >- phba->max_vpi = LPFC_MAX_VPI; > >- /* Initialize timers used by driver */ >- init_timer(&phba->fc_estabtmo); >- phba->fc_estabtmo.function = lpfc_establish_link_tmo; >- phba->fc_estabtmo.data = (unsigned long)phba; >+ /* Check if we need to change the DMA length */ >+ lpfc_setup_max_dma_length(phba); > >+ phba->max_vpi = lpfc_hba_max_vpi(phba->pcidev->device); >+ >+ /* Initialize timers used by driver */ > init_timer(&phba->hb_tmofunc); > phba->hb_tmofunc.function = lpfc_hb_timeout; > phba->hb_tmofunc.data = (unsigned long)phba; >@@ -1757,7 +2114,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, > phba->fabric_block_timer.data = (unsigned long) phba; > > pci_set_master(pdev); >- pci_try_set_mwi(pdev); >+ retval = pci_set_mwi(pdev); >+ if (retval) >+ dev_printk(KERN_WARNING, &pdev->dev, >+ "Warning: pci_set_mwi returned %d\n", retval); > > if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) > if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0) >@@ -1819,13 +2179,17 @@ lpfc_pci_probe_one(struct pci_dev *pdev, > > memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); > >+ INIT_LIST_HEAD(&phba->hbqbuf_in_list); >+ > /* Initialize the SLI Layer to run with lpfc HBAs. */ > lpfc_sli_setup(phba); > lpfc_sli_queue_setup(phba); > >- error = lpfc_mem_alloc(phba); >- if (error) >+ retval = lpfc_mem_alloc(phba); >+ if (retval) { >+ error = retval; > goto out_free_hbqslimp; >+ } > > /* Initialize and populate the iocb list per host. */ > INIT_LIST_HEAD(&phba->lpfc_iocb_list); >@@ -1865,6 +2229,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, > phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); > phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); > >+ /* Initialize the wait queue head for the kernel thread */ >+ init_waitqueue_head(&phba->work_waitq); >+ > /* Startup the kernel thread for this host adapter. */ > phba->worker_thread = kthread_run(lpfc_do_work, phba, > "lpfc_worker_%d", phba->brd_no); >@@ -1880,32 +2247,68 @@ lpfc_pci_probe_one(struct pci_dev *pdev, > /* Initialize list of fabric iocbs */ > INIT_LIST_HEAD(&phba->fabric_iocb_list); > >+ /* Initialize list to save ELS buffers */ >+ INIT_LIST_HEAD(&phba->elsbuf); >+ >+ /* Initialize list of sysfs mailbox commands */ >+ INIT_LIST_HEAD(&phba->sysfs_mbox_list); >+ /* Initialize list of sysfs menlo commands */ >+ INIT_LIST_HEAD(&phba->sysfs_menlo_list); >+ > vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); > if (!vport) > goto out_kthread_stop; > > shost = lpfc_shost_from_vport(vport); >+ >+ if ((lpfc_get_security_enabled)(shost)){ >+ unsigned long flags; >+ spin_lock_irqsave(&fc_security_user_lock, flags); >+ >+ list_add_tail(&vport->sc_users, &fc_security_user_list); >+ >+ spin_unlock_irqrestore(&fc_security_user_lock, flags); >+ >+ if (fc_service_state == FC_SC_SERVICESTATE_ONLINE) { >+ lpfc_fc_queue_security_work(vport, >+ &vport->sc_online_work); >+ } >+ } >+ > phba->pport = vport; > lpfc_debugfs_initialize(vport); > > pci_set_drvdata(pdev, shost); >+ phba->intr_type = NONE; > >- if (phba->cfg_use_msi) { >- error = pci_enable_msi(phba->pcidev); >+ if (phba->cfg_use_msi == 2) { >+ error = lpfc_enable_msix(phba); > if (!error) >- phba->using_msi = 1; >+ phba->intr_type = MSIX; >+ } >+ >+ /* Fallback to MSI if MSI-X initialization failed */ >+ if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { >+ retval = pci_enable_msi(phba->pcidev); >+ if (!retval) >+ phba->intr_type = MSI; > else > lpfc_printf_log(phba, KERN_INFO, LOG_INIT, > "0452 Enable MSI failed, continuing " > "with IRQ\n"); > } > >- error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, >- LPFC_DRIVER_NAME, phba); >- if (error) { >- lpfc_printf_log(phba, KERN_ERR, LOG_INIT, >- "0451 Enable interrupt handler failed\n"); >- goto out_disable_msi; >+ /* MSI-X is the only case the doesn't need to call request_irq */ >+ if (phba->intr_type != MSIX) { >+ retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, >+ IRQF_SHARED, LPFC_DRIVER_NAME, phba); >+ if (retval) { >+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable " >+ "interrupt handler failed\n"); >+ error = retval; >+ goto out_disable_msi; >+ } else if (phba->intr_type != MSI) >+ phba->intr_type = INTx; > } > > phba->MBslimaddr = phba->slim_memmap_p; >@@ -1914,11 +2317,23 @@ lpfc_pci_probe_one(struct pci_dev *pdev, > phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; > phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; > >- if (lpfc_alloc_sysfs_attr(vport)) >+ phba->dfc_host = lpfcdfc_host_add(pdev, shost, phba); >+ if (!phba->dfc_host) { >+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, >+ "1201 Failed to allocate dfc_host \n"); >+ error = -ENOMEM; > goto out_free_irq; >+ } >+ >+ if (lpfc_alloc_sysfs_attr(vport)) { >+ error = -ENOMEM; >+ goto out_free_irq; >+ } > >- if (lpfc_sli_hba_setup(phba)) >+ if (lpfc_sli_hba_setup(phba)) { >+ error = -ENODEV; > goto out_remove_device; >+ } > > /* > * hba setup may have changed the hba_queue_depth so we need to adjust >@@ -1944,11 +2359,18 @@ out_remove_device: > vport->load_flag |= FC_UNLOADING; > spin_unlock_irq(shost->host_lock); > out_free_irq: >+ if (phba->dfc_host) >+ lpfcdfc_host_del(phba->dfc_host); > lpfc_stop_phba_timers(phba); > phba->pport->work_port_events = 0; >- free_irq(phba->pcidev->irq, phba); >+ >+ if (phba->intr_type == MSIX) >+ lpfc_disable_msix(phba); >+ else >+ free_irq(phba->pcidev->irq, phba); >+ > out_disable_msi: >- if (phba->using_msi) >+ if (phba->intr_type == MSI) > pci_disable_msi(phba->pcidev); > destroy_port(vport); > out_kthread_stop: >@@ -1975,7 +2397,7 @@ out_idr_remove: > out_free_phba: > kfree(phba); > out_release_regions: >- pci_release_regions(pdev); >+ pci_release_selected_regions(pdev, bars); > out_disable_device: > pci_disable_device(pdev); > out: >@@ -1991,6 +2413,11 @@ lpfc_pci_remove_one(struct pci_dev *pdev > struct Scsi_Host *shost = pci_get_drvdata(pdev); > struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; > struct lpfc_hba *phba = vport->phba; >+ int bars = pci_select_bars(pdev, IORESOURCE_MEM); >+ >+ lpfcdfc_host_del(phba->dfc_host); >+ phba->dfc_host = NULL; >+ > spin_lock_irq(&phba->hbalock); > vport->load_flag |= FC_UNLOADING; > spin_unlock_irq(&phba->hbalock); >@@ -1998,8 +2425,12 @@ lpfc_pci_remove_one(struct pci_dev *pdev > kfree(vport->vname); > lpfc_free_sysfs_attr(vport); > >+ kthread_stop(phba->worker_thread); >+ > fc_remove_host(shost); > scsi_remove_host(shost); >+ lpfc_cleanup(vport); >+ > /* > * Bring down the SLI Layer. This step disable all interrupts, > * clears the rings, discards all mailbox commands, and resets >@@ -2014,14 +2445,14 @@ lpfc_pci_remove_one(struct pci_dev *pdev > spin_unlock_irq(&phba->hbalock); > > lpfc_debugfs_terminate(vport); >- lpfc_cleanup(vport); > >- kthread_stop(phba->worker_thread); >- >- /* Release the irq reservation */ >- free_irq(phba->pcidev->irq, phba); >- if (phba->using_msi) >- pci_disable_msi(phba->pcidev); >+ if (phba->intr_type == MSIX) >+ lpfc_disable_msix(phba); >+ else { >+ free_irq(phba->pcidev->irq, phba); >+ if (phba->intr_type == MSI) >+ pci_disable_msi(phba->pcidev); >+ } > > pci_set_drvdata(pdev, NULL); > scsi_host_put(shost); >@@ -2048,7 +2479,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev > > kfree(phba); > >- pci_release_regions(pdev); >+ pci_release_selected_regions(pdev, bars); > pci_disable_device(pdev); > } > >@@ -2080,10 +2511,13 @@ static pci_ers_result_t lpfc_io_error_de > pring = &psli->ring[psli->fcp_ring]; > lpfc_sli_abort_iocb_ring(phba, pring); > >- /* Release the irq reservation */ >- free_irq(phba->pcidev->irq, phba); >- if (phba->using_msi) >- pci_disable_msi(phba->pcidev); >+ if (phba->intr_type == MSIX) >+ lpfc_disable_msix(phba); >+ else { >+ free_irq(phba->pcidev->irq, phba); >+ if (phba->intr_type == MSI) >+ pci_disable_msi(phba->pcidev); >+ } > > /* Request a slot reset. */ > return PCI_ERS_RESULT_NEED_RESET; >@@ -2100,9 +2534,14 @@ static pci_ers_result_t lpfc_io_slot_res > struct Scsi_Host *shost = pci_get_drvdata(pdev); > struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; > struct lpfc_sli *psli = &phba->sli; >+ int error, retval; > int bars = pci_select_bars(pdev, IORESOURCE_MEM); > > dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); >+ >+ /* Workaround on core EEH code not settng pdev->error_state properly */ >+ pdev->error_state = pci_channel_io_normal; >+ > if (pci_enable_device_bars(pdev, bars)) { > printk(KERN_ERR "lpfc: Cannot re-enable " > "PCI device after reset.\n"); >@@ -2111,15 +2550,40 @@ static pci_ers_result_t lpfc_io_slot_res > > pci_set_master(pdev); > >- /* Re-establishing Link */ >- spin_lock_irq(shost->host_lock); >- phba->pport->fc_flag |= FC_ESTABLISH_LINK; >- spin_unlock_irq(shost->host_lock); >- > spin_lock_irq(&phba->hbalock); > psli->sli_flag &= ~LPFC_SLI2_ACTIVE; > spin_unlock_irq(&phba->hbalock); > >+ /* Enable configured interrupt method */ >+ phba->intr_type = NONE; >+ if (phba->cfg_use_msi == 2) { >+ error = lpfc_enable_msix(phba); >+ if (!error) >+ phba->intr_type = MSIX; >+ } >+ >+ /* Fallback to MSI if MSI-X initialization failed */ >+ if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { >+ retval = pci_enable_msi(phba->pcidev); >+ if (!retval) >+ phba->intr_type = MSI; >+ else >+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, >+ "0470 Enable MSI failed, continuing " >+ "with IRQ\n"); >+ } >+ >+ /* MSI-X is the only case the doesn't need to call request_irq */ >+ if (phba->intr_type != MSIX) { >+ retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, >+ IRQF_SHARED, LPFC_DRIVER_NAME, phba); >+ if (retval) { >+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, >+ "0471 Enable interrupt handler " >+ "failed\n"); >+ } else if (phba->intr_type != MSI) >+ phba->intr_type = INTx; >+ } > > /* Take device offline; this will perform cleanup */ > lpfc_offline(phba); >@@ -2140,9 +2604,7 @@ static void lpfc_io_resume(struct pci_de > struct Scsi_Host *shost = pci_get_drvdata(pdev); > struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; > >- if (lpfc_online(phba) == 0) { >- mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); >- } >+ lpfc_online(phba); > } > > static struct pci_device_id lpfc_id_table[] = { >@@ -2182,6 +2644,8 @@ static struct pci_device_id lpfc_id_tabl > PCI_ANY_ID, PCI_ANY_ID, }, > {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, > PCI_ANY_ID, PCI_ANY_ID, }, >+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, >+ PCI_ANY_ID, PCI_ANY_ID, }, > {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, > PCI_ANY_ID, PCI_ANY_ID, }, > {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, >@@ -2212,6 +2676,12 @@ static struct pci_device_id lpfc_id_tabl > PCI_ANY_ID, PCI_ANY_ID, }, > {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, > PCI_ANY_ID, PCI_ANY_ID, }, >+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, >+ PCI_ANY_ID, PCI_ANY_ID, }, >+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, >+ PCI_ANY_ID, PCI_ANY_ID, }, >+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, >+ PCI_ANY_ID, PCI_ANY_ID, }, > { 0 } > }; > >@@ -2239,17 +2709,59 @@ lpfc_init(void) > printk(LPFC_MODULE_DESC "\n"); > printk(LPFC_COPYRIGHT "\n"); > >+ if (lpfc_enable_npiv) { >+ lpfc_transport_functions.vport_create = lpfc_vport_create; >+ lpfc_transport_functions.vport_delete = lpfc_vport_delete; >+ } > lpfc_transport_template = > fc_attach_transport(&lpfc_transport_functions); >- lpfc_vport_transport_template = >- fc_attach_transport(&lpfc_vport_transport_functions); >- if (!lpfc_transport_template || !lpfc_vport_transport_template) >+ if (lpfc_transport_template == NULL) > return -ENOMEM; >+ if (lpfc_enable_npiv) { >+ lpfc_vport_transport_template = >+ fc_attach_transport(&lpfc_vport_transport_functions); >+ if (lpfc_vport_transport_template == NULL) { >+ fc_release_transport(lpfc_transport_template); >+ return -ENOMEM; >+ } >+ } >+ >+ error = netlink_register_notifier(&lpfc_fc_netlink_notifier); >+ if (error) >+ goto out_release_transport; >+ fc_nl_sock = netlink_kernel_create(&init_net, NETLINK_FCTRANSPORT, >+ FC_NL_GROUP_CNT, lpfc_fc_nl_rcv_msg, >+ NULL, THIS_MODULE); >+ if (!fc_nl_sock) >+ goto out_unregister_notifier; >+ snprintf(security_work_q_name, KOBJ_NAME_LEN, "fc_sc_wq"); >+ security_work_q = create_singlethread_workqueue(security_work_q_name); >+ if (!security_work_q) >+ goto out_sock_release; >+ INIT_LIST_HEAD(&fc_security_user_list); > error = pci_register_driver(&lpfc_driver); >- if (error) { >- fc_release_transport(lpfc_transport_template); >+ if (error) >+ goto out_destroy_workqueue; >+ error = lpfc_cdev_init(); >+ if (error) >+ goto out_pci_unregister; >+ >+ return error; >+ >+out_pci_unregister: >+ pci_unregister_driver(&lpfc_driver); >+out_destroy_workqueue: >+ destroy_workqueue(security_work_q); >+ security_work_q = NULL; >+out_sock_release: >+ sock_release(fc_nl_sock->sk_socket); >+ fc_nl_sock = NULL; >+out_unregister_notifier: >+ netlink_unregister_notifier(&lpfc_fc_netlink_notifier); >+out_release_transport: >+ fc_release_transport(lpfc_transport_template); >+ if (lpfc_enable_npiv) > fc_release_transport(lpfc_vport_transport_template); >- } > > return error; > } >@@ -2258,8 +2770,18 @@ static void __exit > lpfc_exit(void) > { > pci_unregister_driver(&lpfc_driver); >+ if (security_work_q) >+ destroy_workqueue(security_work_q); >+ security_work_q = NULL; >+ if (fc_nl_sock) { >+ sock_release(fc_nl_sock->sk_socket); >+ netlink_unregister_notifier(&lpfc_fc_netlink_notifier); >+ fc_nl_sock = NULL; >+ } > fc_release_transport(lpfc_transport_template); >- fc_release_transport(lpfc_vport_transport_template); >+ if (lpfc_enable_npiv) >+ fc_release_transport(lpfc_vport_transport_template); >+ lpfc_cdev_exit(); > } > > module_init(lpfc_init); >diff -urpN a/drivers/scsi/lpfc/lpfc_ioctl.c b/drivers/scsi/lpfc/lpfc_ioctl.c >--- a/drivers/scsi/lpfc/lpfc_ioctl.c 1969-12-31 19:00:00.000000000 -0500 >+++ b/drivers/scsi/lpfc/lpfc_ioctl.c 2008-09-05 17:47:49.760877000 -0400 >@@ -0,0 +1,2520 @@ >+/******************************************************************* >+ * This file is part of the Emulex Linux Device Driver for * >+ * Fibre Channel Host Bus Adapters. * >+ * Copyright (C) 2006-2008 Emulex. All rights reserved. * >+ * EMULEX and SLI are trademarks of Emulex. * >+ * www.emulex.com * >+ * * >+ * This program is free software; you can redistribute it and/or * >+ * modify it under the terms of version 2 of the GNU General * >+ * Public License as published by the Free Software Foundation. * >+ * This program is distributed in the hope that it will be useful. * >+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * >+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * >+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * >+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * >+ * TO BE LEGALLY INVALID. See the GNU General Public License for * >+ * more details, a copy of which can be found in the file COPYING * >+ * included with this package. * >+ *******************************************************************/ >+ >+#include <linux/delay.h> >+#include <linux/blkdev.h> >+#include <linux/interrupt.h> >+#include <linux/pci.h> >+ >+#include <scsi/scsi_host.h> >+#include <scsi/scsi_transport_fc.h> >+ >+#include "lpfc_hw.h" >+#include "lpfc_sli.h" >+#include "lpfc_disc.h" >+#include "lpfc_scsi.h" >+#include "lpfc.h" >+#include "lpfc_crtn.h" >+#include "lpfc_ioctl.h" >+#include "lpfc_logmsg.h" >+#include "lpfc_vport.h" >+ >+ >+struct lpfcdfc_event { >+ struct list_head node; >+ int ref; >+ wait_queue_head_t wq; >+ >+ /* Event type and waiter identifiers */ >+ uint32_t type_mask; >+ uint32_t req_id; >+ uint32_t reg_id; >+ >+ /* next two flags are here for the auto-delete logic */ >+ unsigned long wait_time_stamp; >+ int waiting; >+ >+ /* seen and not seen events */ >+ struct list_head events_to_get; >+ struct list_head events_to_see; >+}; >+ >+struct event_data { >+ struct list_head node; >+ uint32_t type; >+ uint32_t immed_dat; >+ void * data; >+ uint32_t len; >+}; >+ >+ >+/* values for a_topology */ >+#define LNK_LOOP 0x1 >+#define LNK_PUBLIC_LOOP 0x2 >+#define LNK_FABRIC 0x3 >+#define LNK_PT2PT 0x4 >+ >+/* values for a_linkState */ >+#define LNK_DOWN 0x1 >+#define LNK_UP 0x2 >+#define LNK_FLOGI 0x3 >+#define LNK_DISCOVERY 0x4 >+#define LNK_REDISCOVERY 0x5 >+#define LNK_READY 0x6 >+ >+struct lpfcdfc_host { >+ struct list_head node; >+ int inst; >+ struct lpfc_hba * phba; >+ struct lpfc_vport *vport; >+ struct Scsi_Host * host; >+ struct pci_dev * dev; >+ void (*base_ct_unsol_event)(struct lpfc_hba *, >+ struct lpfc_sli_ring *, >+ struct lpfc_iocbq *); >+ /* Threads waiting for async event */ >+ struct list_head ev_waiters; >+ uint32_t blocked; >+ uint32_t ref_count; >+}; >+ >+ >+ >+ >+static void lpfc_ioctl_timeout_iocb_cmpl(struct lpfc_hba *, >+ struct lpfc_iocbq *, struct lpfc_iocbq *); >+ >+static struct lpfc_dmabufext * >+dfc_cmd_data_alloc(struct lpfc_hba *, char *, >+ struct ulp_bde64 *, uint32_t); >+static int dfc_cmd_data_free(struct lpfc_hba *, struct lpfc_dmabufext *); >+static int dfc_rsp_data_copy(struct lpfc_hba *, uint8_t *, >+ struct lpfc_dmabufext *, >+ uint32_t); >+static int lpfc_issue_ct_rsp(struct lpfc_hba *, uint32_t, struct lpfc_dmabuf *, >+ struct lpfc_dmabufext *); >+ >+static struct lpfcdfc_host * lpfcdfc_host_from_hba(struct lpfc_hba *); >+ >+static DEFINE_MUTEX(lpfcdfc_lock); >+ >+static struct list_head lpfcdfc_hosts = LIST_HEAD_INIT(lpfcdfc_hosts); >+ >+static int lpfcdfc_major = 0; >+ >+static int >+lpfc_ioctl_hba_rnid(struct lpfc_hba * phba, >+ struct lpfcCmdInput * cip, >+ void *dataout) >+{ >+ struct nport_id idn; >+ struct lpfc_sli *psli; >+ struct lpfc_iocbq *cmdiocbq = NULL; >+ struct lpfc_iocbq *rspiocbq = NULL; >+ RNID *prsp; >+ uint32_t *pcmd; >+ uint32_t *psta; >+ IOCB_t *rsp; >+ struct lpfc_sli_ring *pring; >+ void *context2; >+ int i0; >+ int rtnbfrsiz; >+ struct lpfc_nodelist *pndl; >+ int rc = 0; >+ >+ psli = &phba->sli; >+ pring = &psli->ring[LPFC_ELS_RING]; >+ >+ if (copy_from_user((uint8_t *) &idn, (void __user *) cip->lpfc_arg1, >+ sizeof(struct nport_id))) { >+ rc = EIO; >+ return rc; >+ } >+ >+ if (idn.idType == LPFC_WWNN_TYPE) >+ pndl = lpfc_findnode_wwnn(phba->pport, >+ (struct lpfc_name *) idn.wwpn); >+ else >+ pndl = lpfc_findnode_wwpn(phba->pport, >+ (struct lpfc_name *) idn.wwpn); >+ >+ if (!pndl || !NLP_CHK_NODE_ACT(pndl)) >+ return ENODEV; >+ >+ for (i0 = 0; >+ i0 < 10 && (pndl->nlp_flag & NLP_ELS_SND_MASK) == NLP_RNID_SND; >+ i0++) { >+ mdelay(1000); >+ } >+ >+ if (i0 == 10) { >+ pndl->nlp_flag &= ~NLP_RNID_SND; >+ return EBUSY; >+ } >+ >+ cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, (2 * sizeof(uint32_t)), 0, >+ pndl, pndl->nlp_DID, ELS_CMD_RNID); >+ if (!cmdiocbq) >+ return ENOMEM; >+ >+ /* >+ * Context2 is used by prep/free to locate cmd and rsp buffers, >+ * but context2 is also used by iocb_wait to hold a rspiocb ptr. >+ * The rsp iocbq can be returned from the completion routine for >+ * iocb_wait, so save the prep/free value locally . It will be >+ * restored after returning from iocb_wait. >+ */ >+ context2 = cmdiocbq->context2; >+ >+ if ((rspiocbq = lpfc_sli_get_iocbq(phba)) == NULL) { >+ rc = ENOMEM; >+ goto sndrndqwt; >+ } >+ rsp = &(rspiocbq->iocb); >+ >+ pcmd = (uint32_t *) (((struct lpfc_dmabuf *) cmdiocbq->context2)->virt); >+ *pcmd++ = ELS_CMD_RNID; >+ >+ memset((void *) pcmd, 0, sizeof (RNID)); >+ ((RNID *) pcmd)->Format = 0; >+ ((RNID *) pcmd)->Format = RNID_TOPOLOGY_DISC; >+ cmdiocbq->context1 = NULL; >+ cmdiocbq->context2 = NULL; >+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; >+ >+ pndl->nlp_flag |= NLP_RNID_SND; >+ cmdiocbq->iocb.ulpTimeout = (phba->fc_ratov * 2) + 3 ; >+ >+ rc = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq, >+ (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); >+ pndl->nlp_flag &= ~NLP_RNID_SND; >+ cmdiocbq->context2 = context2; >+ >+ if (rc == IOCB_TIMEDOUT) { >+ lpfc_sli_release_iocbq(phba, rspiocbq); >+ cmdiocbq->context1 = NULL; >+ cmdiocbq->iocb_cmpl = lpfc_ioctl_timeout_iocb_cmpl; >+ return EIO; >+ } >+ >+ if (rc != IOCB_SUCCESS) { >+ rc = EIO; >+ goto sndrndqwt; >+ } >+ >+ if (rsp->ulpStatus == IOSTAT_SUCCESS) { >+ struct lpfc_dmabuf *buf_ptr1, *buf_ptr; >+ buf_ptr1 = (struct lpfc_dmabuf *)(cmdiocbq->context2); >+ buf_ptr = list_entry(buf_ptr1->list.next, struct lpfc_dmabuf, >+ list); >+ psta = (uint32_t*)buf_ptr->virt; >+ prsp = (RNID *) (psta + 1); /* then rnid response data */ >+ rtnbfrsiz = prsp->CommonLen + prsp->SpecificLen + >+ sizeof (uint32_t); >+ memcpy((uint8_t *) dataout, (uint8_t *) psta, rtnbfrsiz); >+ >+ if (rtnbfrsiz > cip->lpfc_outsz) >+ rtnbfrsiz = cip->lpfc_outsz; >+ if (copy_to_user >+ ((void __user *) cip->lpfc_arg2, (uint8_t *) & rtnbfrsiz, >+ sizeof (int))) >+ rc = EIO; >+ } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { >+ uint8_t ls_rjt[8]; >+ uint32_t *ls_rjtrsp; >+ >+ ls_rjtrsp = (uint32_t*)(ls_rjt + 4); >+ >+ /* construct the LS_RJT payload */ >+ ls_rjt[0] = 0x01; >+ ls_rjt[1] = 0x00; >+ ls_rjt[2] = 0x00; >+ ls_rjt[3] = 0x00; >+ >+ *ls_rjtrsp = be32_to_cpu(rspiocbq->iocb.un.ulpWord[4]); >+ rtnbfrsiz = 8; >+ memcpy((uint8_t *) dataout, (uint8_t *) ls_rjt, rtnbfrsiz); >+ if (copy_to_user >+ ((void __user *) cip->lpfc_arg2, (uint8_t *) & rtnbfrsiz, >+ sizeof (int))) >+ rc = EIO; >+ } else >+ rc = EACCES; >+ >+sndrndqwt: >+ if (cmdiocbq) >+ lpfc_els_free_iocb(phba, cmdiocbq); >+ >+ if (rspiocbq) >+ lpfc_sli_release_iocbq(phba, rspiocbq); >+ >+ return rc; >+} >+ >+static void >+lpfc_ioctl_timeout_iocb_cmpl(struct lpfc_hba * phba, >+ struct lpfc_iocbq * cmd_iocb_q, >+ struct lpfc_iocbq * rsp_iocb_q) >+{ >+ struct lpfc_timedout_iocb_ctxt *iocb_ctxt = cmd_iocb_q->context1; >+ >+ if (!iocb_ctxt) { >+ if (cmd_iocb_q->context2) >+ lpfc_els_free_iocb(phba, cmd_iocb_q); >+ else >+ lpfc_sli_release_iocbq(phba,cmd_iocb_q); >+ return; >+ } >+ >+ if (iocb_ctxt->outdmp) >+ dfc_cmd_data_free(phba, iocb_ctxt->outdmp); >+ >+ if (iocb_ctxt->indmp) >+ dfc_cmd_data_free(phba, iocb_ctxt->indmp); >+ >+ if (iocb_ctxt->mp) { >+ lpfc_mbuf_free(phba, >+ iocb_ctxt->mp->virt, >+ iocb_ctxt->mp->phys); >+ kfree(iocb_ctxt->mp); >+ } >+ >+ if (iocb_ctxt->bmp) { >+ lpfc_mbuf_free(phba, >+ iocb_ctxt->bmp->virt, >+ iocb_ctxt->bmp->phys); >+ kfree(iocb_ctxt->bmp); >+ } >+ >+ lpfc_sli_release_iocbq(phba,cmd_iocb_q); >+ >+ if (iocb_ctxt->rspiocbq) >+ lpfc_sli_release_iocbq(phba, iocb_ctxt->rspiocbq); >+ >+ kfree(iocb_ctxt); >+} >+ >+ >+static int >+lpfc_ioctl_send_els(struct lpfc_hba * phba, >+ struct lpfcCmdInput * cip, void *dataout) >+{ >+ struct lpfc_sli *psli = &phba->sli; >+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; >+ struct lpfc_iocbq *cmdiocbq, *rspiocbq; >+ struct lpfc_dmabufext *pcmdext = NULL, *prspext = NULL; >+ struct lpfc_nodelist *pndl; >+ struct ulp_bde64 *bpl; >+ IOCB_t *rsp; >+ struct lpfc_dmabuf *pcmd, *prsp, *pbuflist = NULL; >+ uint16_t rpi = 0; >+ struct nport_id destID; >+ int rc = 0; >+ uint32_t cmdsize; >+ uint32_t rspsize; >+ uint32_t elscmd; >+ int iocb_status; >+ >+ elscmd = *(uint32_t *)cip->lpfc_arg2; >+ cmdsize = cip->lpfc_arg4; >+ rspsize = cip->lpfc_outsz; >+ >+ if (copy_from_user((uint8_t *)&destID, (void __user *)cip->lpfc_arg1, >+ sizeof(struct nport_id))) >+ return EIO; >+ >+ if ((rspiocbq = lpfc_sli_get_iocbq(phba)) == NULL) >+ return ENOMEM; >+ >+ rsp = &rspiocbq->iocb; >+ >+ if (destID.idType == 0) >+ pndl = lpfc_findnode_wwpn(phba->pport, >+ (struct lpfc_name *)&destID.wwpn); >+ else { >+ destID.d_id = (destID.d_id & Mask_DID); >+ pndl = lpfc_findnode_did(phba->pport, destID.d_id); >+ } >+ >+ if (!pndl || !NLP_CHK_NODE_ACT(pndl)) { >+ if (destID.idType == 0) { >+ lpfc_sli_release_iocbq(phba, rspiocbq); >+ return ENODEV; >+ } >+ if (!pndl) { >+ pndl = kmalloc(sizeof (struct lpfc_nodelist), >+ GFP_KERNEL); >+ if (!pndl) { >+ lpfc_sli_release_iocbq(phba, rspiocbq); >+ return ENODEV; >+ } >+ lpfc_nlp_init(phba->pport, pndl, destID.d_id); >+ lpfc_nlp_set_state(phba->pport, pndl, NLP_STE_NPR_NODE); >+ } else { >+ pndl = lpfc_enable_node(phba->pport, pndl, >+ NLP_STE_NPR_NODE); >+ if (!pndl) { >+ lpfc_sli_release_iocbq(phba, rspiocbq); >+ return ENODEV; >+ } >+ } >+ } else { >+ lpfc_nlp_get(pndl); >+ rpi = pndl->nlp_rpi; >+ } >+ >+ cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, pndl, >+ pndl->nlp_DID, elscmd); >+ >+ /* release the new pndl once the iocb complete */ >+ lpfc_nlp_put(pndl); >+ >+ if (cmdiocbq == NULL) { >+ lpfc_sli_release_iocbq(phba, rspiocbq); >+ return EIO; >+ } >+ >+ pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; >+ prsp = (struct lpfc_dmabuf *) pcmd->list.next; >+ >+ /* >+ * If we exceed the size of the allocated mbufs we need to >+ * free them and allocate our own. >+ */ >+ if ((cmdsize > LPFC_BPL_SIZE) || (rspsize > LPFC_BPL_SIZE)) { >+ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); >+ kfree(pcmd); >+ lpfc_mbuf_free(phba, prsp->virt, prsp->phys); >+ kfree(prsp); >+ cmdiocbq->context2 = NULL; >+ >+ pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3; >+ bpl = (struct ulp_bde64 *) pbuflist->virt; >+ pcmdext = dfc_cmd_data_alloc(phba, cip->lpfc_arg2, >+ bpl, cmdsize); >+ if (!pcmdext) { >+ lpfc_els_free_iocb(phba, cmdiocbq); >+ lpfc_sli_release_iocbq(phba, rspiocbq); >+ return ENOMEM; >+ } >+ bpl += pcmdext->flag; >+ prspext = dfc_cmd_data_alloc(phba, NULL, bpl, rspsize); >+ if (!prspext) { >+ dfc_cmd_data_free(phba, pcmdext); >+ lpfc_els_free_iocb(phba, cmdiocbq); >+ lpfc_sli_release_iocbq(phba, rspiocbq); >+ return ENOMEM; >+ } >+ } else { >+ /* Copy the command from user space */ >+ if (copy_from_user((uint8_t *) pcmd->virt, >+ (void __user *) cip->lpfc_arg2, >+ cmdsize)) { >+ lpfc_els_free_iocb(phba, cmdiocbq); >+ lpfc_sli_release_iocbq(phba, rspiocbq); >+ return EIO; >+ } >+ } >+ >+ cmdiocbq->iocb.ulpContext = rpi; >+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; >+ cmdiocbq->context1 = NULL; >+ cmdiocbq->context2 = NULL; >+ >+ iocb_status = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq, >+ (phba->fc_ratov*2) + LPFC_DRVR_TIMEOUT); >+ rc = iocb_status; >+ >+ if (rc == IOCB_SUCCESS) { >+ if (rsp->ulpStatus == IOSTAT_SUCCESS) { >+ if (rspsize < (rsp->un.ulpWord[0] & 0xffffff)) { >+ rc = ERANGE; >+ } else { >+ rspsize = rsp->un.ulpWord[0] & 0xffffff; >+ if (pbuflist) { >+ if (dfc_rsp_data_copy( >+ phba, >+ (uint8_t *) cip->lpfc_dataout, >+ prspext, >+ rspsize)) { >+ rc = EIO; >+ } else { >+ cip->lpfc_outsz = 0; >+ } >+ } else { >+ if (copy_to_user( (void __user *) >+ cip->lpfc_dataout, >+ (uint8_t *) prsp->virt, >+ rspsize)) { >+ rc = EIO; >+ } else { >+ cip->lpfc_outsz = 0; >+ } >+ } >+ } >+ } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { >+ uint8_t ls_rjt[8]; >+ >+ /* construct the LS_RJT payload */ >+ ls_rjt[0] = 0x01; >+ ls_rjt[1] = 0x00; >+ ls_rjt[2] = 0x00; >+ ls_rjt[3] = 0x00; >+ memcpy(&ls_rjt[4], (uint8_t *) &rsp->un.ulpWord[4], >+ sizeof(uint32_t)); >+ >+ if (rspsize < 8) >+ rc = ERANGE; >+ else >+ rspsize = 8; >+ >+ memcpy(dataout, ls_rjt, rspsize); >+ } else >+ rc = EIO; >+ >+ if (copy_to_user((void __user *)cip->lpfc_arg3, >+ (uint8_t *)&rspsize, sizeof(uint32_t))) >+ rc = EIO; >+ } else { >+ rc = EIO; >+ } >+ >+ if (pbuflist) { >+ dfc_cmd_data_free(phba, pcmdext); >+ dfc_cmd_data_free(phba, prspext); >+ } else >+ cmdiocbq->context2 = (uint8_t *) pcmd; >+ >+ if (iocb_status != IOCB_TIMEDOUT) >+ lpfc_els_free_iocb(phba, cmdiocbq); >+ >+ lpfc_sli_release_iocbq(phba, rspiocbq); >+ return rc; >+} >+ >+static int >+lpfc_ioctl_send_mgmt_rsp(struct lpfc_hba * phba, >+ struct lpfcCmdInput * cip) >+{ >+ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); >+ struct ulp_bde64 *bpl; >+ struct lpfc_dmabuf *bmp = NULL; >+ struct lpfc_dmabufext *indmp = NULL; >+ uint32_t tag = (uint32_t)cip->lpfc_flag; /* XRI for XMIT_SEQUENCE */ >+ unsigned long reqbfrcnt = (unsigned long)cip->lpfc_arg2; >+ int rc = 0; >+ unsigned long iflag; >+ >+ if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { >+ rc = ERANGE; >+ return rc; >+ } >+ >+ bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); >+ if (!bmp) { >+ rc = ENOMEM; >+ goto send_mgmt_rsp_exit; >+ } >+ spin_lock_irqsave(shost->host_lock, iflag); >+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); >+ spin_unlock_irqrestore(shost->host_lock, iflag); /* remove */ >+ if (!bmp->virt) { >+ rc = ENOMEM; >+ goto send_mgmt_rsp_free_bmp; >+ } >+ >+ INIT_LIST_HEAD(&bmp->list); >+ bpl = (struct ulp_bde64 *) bmp->virt; >+ >+ indmp = dfc_cmd_data_alloc(phba, cip->lpfc_arg1, bpl, reqbfrcnt); >+ if (!indmp) { >+ rc = ENOMEM; >+ goto send_mgmt_rsp_free_bmpvirt; >+ } >+ rc = lpfc_issue_ct_rsp(phba, tag, bmp, indmp); >+ if (rc) { >+ if (rc == IOCB_TIMEDOUT) >+ rc = ETIMEDOUT; >+ else if (rc == IOCB_ERROR) >+ rc = EACCES; >+ } >+ >+ dfc_cmd_data_free(phba, indmp); >+send_mgmt_rsp_free_bmpvirt: >+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys); >+send_mgmt_rsp_free_bmp: >+ kfree(bmp); >+send_mgmt_rsp_exit: >+ return rc; >+} >+ >+static int >+lpfc_ioctl_send_mgmt_cmd(struct lpfc_hba * phba, >+ struct lpfcCmdInput * cip, void *dataout) >+{ >+ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); >+ struct lpfc_nodelist *pndl = NULL; >+ struct ulp_bde64 *bpl = NULL; >+ struct lpfc_name findwwn; >+ uint32_t finddid, timeout; >+ struct lpfc_iocbq *cmdiocbq = NULL, *rspiocbq = NULL; >+ struct lpfc_dmabufext *indmp = NULL, *outdmp = NULL; >+ IOCB_t *cmd = NULL, *rsp = NULL; >+ struct lpfc_dmabuf *bmp = NULL; >+ struct lpfc_sli *psli = NULL; >+ struct lpfc_sli_ring *pring = NULL; >+ int i0 = 0, rc = 0, reqbfrcnt, snsbfrcnt; >+ struct lpfc_timedout_iocb_ctxt *iocb_ctxt; >+ >+ psli = &phba->sli; >+ pring = &psli->ring[LPFC_ELS_RING]; >+ >+ if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { >+ rc = EACCES; >+ goto send_mgmt_cmd_exit; >+ } >+ >+ reqbfrcnt = cip->lpfc_arg4; >+ snsbfrcnt = cip->lpfc_arg5; >+ >+ if (!reqbfrcnt || !snsbfrcnt >+ || (reqbfrcnt + snsbfrcnt > 80 * BUF_SZ_4K)) { >+ rc = ERANGE; >+ goto send_mgmt_cmd_exit; >+ } >+ >+ if (phba->pport->port_state != LPFC_VPORT_READY) { >+ rc = ENODEV; >+ goto send_mgmt_cmd_exit; >+ } >+ >+ if (cip->lpfc_cmd == LPFC_HBA_SEND_MGMT_CMD) { >+ rc = copy_from_user(&findwwn, (void __user *)cip->lpfc_arg3, >+ sizeof(struct lpfc_name)); >+ if (rc) { >+ rc = EIO; >+ goto send_mgmt_cmd_exit; >+ } >+ pndl = lpfc_findnode_wwpn(phba->pport, &findwwn); >+ /* Do additional get to pndl found so that at the end of the >+ * function we can do unditional lpfc_nlp_put on it. >+ */ >+ if (pndl && NLP_CHK_NODE_ACT(pndl)) >+ lpfc_nlp_get(pndl); >+ } else { >+ finddid = (uint32_t)(unsigned long)cip->lpfc_arg3; >+ pndl = lpfc_findnode_did(phba->pport, finddid); >+ if (!pndl || !NLP_CHK_NODE_ACT(pndl)) { >+ if (phba->pport->fc_flag & FC_FABRIC) { >+ if (!pndl) { >+ pndl = kmalloc(sizeof >+ (struct lpfc_nodelist), >+ GFP_KERNEL); >+ if (!pndl) { >+ rc = ENODEV; >+ goto send_mgmt_cmd_exit; >+ } >+ lpfc_nlp_init(phba->pport, pndl, >+ finddid); >+ lpfc_nlp_set_state(phba->pport, >+ pndl, NLP_STE_PLOGI_ISSUE); >+ /* Indicate free ioctl allocated >+ * memory for ndlp after it's done >+ */ >+ NLP_SET_FREE_REQ(pndl); >+ } else >+ lpfc_enable_node(phba->pport, >+ pndl, NLP_STE_PLOGI_ISSUE); >+ >+ if (lpfc_issue_els_plogi(phba->pport, >+ pndl->nlp_DID, 0)) { >+ rc = ENODEV; >+ goto send_mgmt_cmd_free_pndl_exit; >+ } >+ >+ /* Allow the node to complete discovery */ >+ while (i0++ < 4) { >+ if (pndl->nlp_state == >+ NLP_STE_UNMAPPED_NODE) >+ break; >+ msleep(500); >+ } >+ >+ if (i0 == 4) { >+ rc = ENODEV; >+ goto send_mgmt_cmd_free_pndl_exit; >+ } >+ } else { >+ rc = ENODEV; >+ goto send_mgmt_cmd_exit; >+ } >+ } else >+ /* Do additional get to pndl found so at the end of >+ * the function we can do unconditional lpfc_nlp_put. >+ */ >+ lpfc_nlp_get(pndl); >+ } >+ >+ if (!pndl || !NLP_CHK_NODE_ACT(pndl)) { >+ rc = ENODEV; >+ goto send_mgmt_cmd_exit; >+ } >+ >+ if (pndl->nlp_flag & NLP_ELS_SND_MASK) { >+ rc = ENODEV; >+ goto send_mgmt_cmd_free_pndl_exit; >+ } >+ >+ spin_lock_irq(shost->host_lock); >+ cmdiocbq = lpfc_sli_get_iocbq(phba); >+ if (!cmdiocbq) { >+ rc = ENOMEM; >+ spin_unlock_irq(shost->host_lock); >+ goto send_mgmt_cmd_free_pndl_exit; >+ } >+ cmd = &cmdiocbq->iocb; >+ >+ rspiocbq = lpfc_sli_get_iocbq(phba); >+ if (!rspiocbq) { >+ rc = ENOMEM; >+ goto send_mgmt_cmd_free_cmdiocbq; >+ } >+ spin_unlock_irq(shost->host_lock); >+ >+ rsp = &rspiocbq->iocb; >+ >+ bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); >+ if (!bmp) { >+ rc = ENOMEM; >+ spin_lock_irq(shost->host_lock); >+ goto send_mgmt_cmd_free_rspiocbq; >+ } >+ >+ spin_lock_irq(shost->host_lock); >+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); >+ if (!bmp->virt) { >+ rc = ENOMEM; >+ goto send_mgmt_cmd_free_bmp; >+ } >+ spin_unlock_irq(shost->host_lock); >+ >+ INIT_LIST_HEAD(&bmp->list); >+ bpl = (struct ulp_bde64 *) bmp->virt; >+ indmp = dfc_cmd_data_alloc(phba, cip->lpfc_arg1, bpl, reqbfrcnt); >+ if (!indmp) { >+ rc = ENOMEM; >+ spin_lock_irq(shost->host_lock); >+ goto send_mgmt_cmd_free_bmpvirt; >+ } >+ >+ /* flag contains total number of BPLs for xmit */ >+ bpl += indmp->flag; >+ >+ outdmp = dfc_cmd_data_alloc(phba, NULL, bpl, snsbfrcnt); >+ if (!outdmp) { >+ rc = ENOMEM; >+ spin_lock_irq(shost->host_lock); >+ goto send_mgmt_cmd_free_indmp; >+ } >+ >+ cmd->un.genreq64.bdl.ulpIoTag32 = 0; >+ cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); >+ cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); >+ cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL; >+ cmd->un.genreq64.bdl.bdeSize = >+ (outdmp->flag + indmp->flag) * sizeof (struct ulp_bde64); >+ cmd->ulpCommand = CMD_GEN_REQUEST64_CR; >+ cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); >+ cmd->un.genreq64.w5.hcsw.Dfctl = 0; >+ cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; >+ cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; >+ cmd->ulpBdeCount = 1; >+ cmd->ulpLe = 1; >+ cmd->ulpClass = CLASS3; >+ cmd->ulpContext = pndl->nlp_rpi; >+ cmd->ulpOwner = OWN_CHIP; >+ cmdiocbq->vport = phba->pport; >+ cmdiocbq->context1 = NULL; >+ cmdiocbq->context2 = NULL; >+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; >+ >+ if (cip->lpfc_flag == 0 ) >+ timeout = phba->fc_ratov * 2 ; >+ else >+ timeout = cip->lpfc_flag; >+ >+ cmd->ulpTimeout = timeout; >+ >+ rc = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq, >+ timeout + LPFC_DRVR_TIMEOUT); >+ >+ if (rc == IOCB_TIMEDOUT) { >+ lpfc_sli_release_iocbq(phba, rspiocbq); >+ iocb_ctxt = kmalloc(sizeof(struct lpfc_timedout_iocb_ctxt), >+ GFP_KERNEL); >+ if (!iocb_ctxt) { >+ rc = EACCES; >+ goto send_mgmt_cmd_free_pndl_exit; >+ } >+ >+ cmdiocbq->context1 = iocb_ctxt; >+ cmdiocbq->context2 = NULL; >+ iocb_ctxt->rspiocbq = NULL; >+ iocb_ctxt->mp = NULL; >+ iocb_ctxt->bmp = bmp; >+ iocb_ctxt->outdmp = outdmp; >+ iocb_ctxt->lpfc_cmd = NULL; >+ iocb_ctxt->indmp = indmp; >+ >+ cmdiocbq->iocb_cmpl = lpfc_ioctl_timeout_iocb_cmpl; >+ rc = EACCES; >+ goto send_mgmt_cmd_free_pndl_exit; >+ } >+ >+ if (rc != IOCB_SUCCESS) { >+ rc = EACCES; >+ goto send_mgmt_cmd_free_outdmp; >+ } >+ >+ if (rsp->ulpStatus) { >+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { >+ switch (rsp->un.ulpWord[4] & 0xff) { >+ case IOERR_SEQUENCE_TIMEOUT: >+ rc = ETIMEDOUT; >+ break; >+ case IOERR_INVALID_RPI: >+ rc = EFAULT; >+ break; >+ default: >+ rc = EACCES; >+ break; >+ } >+ goto send_mgmt_cmd_free_outdmp; >+ } >+ } else >+ outdmp->flag = rsp->un.genreq64.bdl.bdeSize; >+ >+ /* Copy back response data */ >+ if (outdmp->flag > snsbfrcnt) { >+ rc = ERANGE; >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1209 C_CT Request error Data: x%x x%x\n", >+ outdmp->flag, BUF_SZ_4K); >+ goto send_mgmt_cmd_free_outdmp; >+ } >+ >+ /* copy back size of response, and response itself */ >+ memcpy(dataout, &outdmp->flag, sizeof (int)); >+ rc = dfc_rsp_data_copy (phba, cip->lpfc_arg2, outdmp, outdmp->flag); >+ if (rc) >+ rc = EIO; >+ >+send_mgmt_cmd_free_outdmp: >+ spin_lock_irq(shost->host_lock); >+ dfc_cmd_data_free(phba, outdmp); >+send_mgmt_cmd_free_indmp: >+ dfc_cmd_data_free(phba, indmp); >+send_mgmt_cmd_free_bmpvirt: >+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys); >+send_mgmt_cmd_free_bmp: >+ kfree(bmp); >+send_mgmt_cmd_free_rspiocbq: >+ lpfc_sli_release_iocbq(phba, rspiocbq); >+send_mgmt_cmd_free_cmdiocbq: >+ lpfc_sli_release_iocbq(phba, cmdiocbq); >+ spin_unlock_irq(shost->host_lock); >+send_mgmt_cmd_free_pndl_exit: >+ lpfc_nlp_put(pndl); >+send_mgmt_cmd_exit: >+ return rc; >+} >+ >+static inline struct lpfcdfc_event * >+lpfcdfc_event_new(uint32_t ev_mask, >+ int ev_reg_id, >+ uint32_t ev_req_id) >+{ >+ struct lpfcdfc_event * evt = kzalloc(sizeof(*evt), GFP_KERNEL); >+ if (evt == NULL) >+ return NULL; >+ >+ INIT_LIST_HEAD(&evt->events_to_get); >+ INIT_LIST_HEAD(&evt->events_to_see); >+ evt->type_mask = ev_mask; >+ evt->req_id = ev_req_id; >+ evt->reg_id = ev_reg_id; >+ evt->wait_time_stamp = jiffies; >+ init_waitqueue_head(&evt->wq); >+ >+ return evt; >+} >+ >+static inline void lpfcdfc_event_free(struct lpfcdfc_event * evt) >+{ >+ struct event_data * ed; >+ >+ list_del(&evt->node); >+ >+ while(!list_empty(&evt->events_to_get)) { >+ ed = list_entry(evt->events_to_get.next, typeof(*ed), node); >+ list_del(&ed->node); >+ kfree(ed->data); >+ kfree(ed); >+ } >+ >+ while(!list_empty(&evt->events_to_see)) { >+ ed = list_entry(evt->events_to_see.next, typeof(*ed), node); >+ list_del(&ed->node); >+ kfree(ed->data); >+ kfree(ed); >+ } >+ >+ kfree(evt); >+} >+ >+#define lpfcdfc_event_ref(evt) evt->ref++ >+ >+#define lpfcdfc_event_unref(evt) \ >+ if (--evt->ref < 0) \ >+ lpfcdfc_event_free(evt); >+ >+static int >+lpfc_ioctl_hba_get_event(struct lpfc_hba * phba, >+ struct lpfcCmdInput * cip, >+ void **dataout, int *data_size) >+{ >+ uint32_t ev_mask = ((uint32_t)(unsigned long)cip->lpfc_arg3 & >+ FC_REG_EVENT_MASK); >+ int ev_reg_id = (uint32_t) cip->lpfc_flag; >+ uint32_t ev_req_id = 0; >+ struct lpfcdfc_host * dfchba; >+ struct lpfcdfc_event * evt; >+ struct event_data * evt_dat = NULL; >+ int ret_val = 0; >+ >+ /* All other events supported through NET_LINK_EVENTs */ >+ if (ev_mask != FC_REG_CT_EVENT) >+ return ENOENT; >+ >+ mutex_lock(&lpfcdfc_lock); >+ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) >+ if (dfchba->phba == phba) >+ break; >+ mutex_unlock(&lpfcdfc_lock); >+ >+ BUG_ON(&dfchba->node == &lpfcdfc_hosts); >+ >+ if ((ev_mask == FC_REG_CT_EVENT) && >+ copy_from_user(&ev_req_id, (void __user *)cip->lpfc_arg2, >+ sizeof (uint32_t))) >+ return EIO; >+ >+ mutex_lock(&lpfcdfc_lock); >+ list_for_each_entry(evt, &dfchba->ev_waiters, node) >+ if (evt->reg_id == ev_reg_id) { >+ if(list_empty(&evt->events_to_get)) >+ break; >+ lpfcdfc_event_ref(evt); >+ evt->wait_time_stamp = jiffies; >+ evt_dat = list_entry(evt->events_to_get.prev, >+ struct event_data, node); >+ list_del(&evt_dat->node); >+ break; >+ } >+ mutex_unlock(&lpfcdfc_lock); >+ >+ if (evt_dat == NULL) >+ return ENOENT; >+ >+ BUG_ON((ev_mask & evt_dat->type) == 0); >+ >+ if (evt_dat->len > cip->lpfc_outsz) >+ evt_dat->len = cip->lpfc_outsz; >+ >+ if (copy_to_user((void __user *)cip->lpfc_arg2, &evt_dat->immed_dat, >+ sizeof (uint32_t)) || >+ copy_to_user((void __user *)cip->lpfc_arg1, &evt_dat->len, >+ sizeof (uint32_t))) { >+ ret_val = EIO; >+ goto error_get_event_exit; >+ } >+ >+ if (evt_dat->len > 0) { >+ *data_size = evt_dat->len; >+ *dataout = kmalloc(*data_size, GFP_KERNEL); >+ if (*dataout) >+ memcpy(*dataout, evt_dat->data, *data_size); >+ else >+ *data_size = 0; >+ >+ } else >+ *data_size = 0; >+ ret_val = 0; >+ >+error_get_event_exit: >+ >+ kfree(evt_dat->data); >+ kfree(evt_dat); >+ mutex_lock(&lpfcdfc_lock); >+ lpfcdfc_event_unref(evt); >+ mutex_unlock(&lpfcdfc_lock); >+ >+ return ret_val; >+} >+ >+static int >+lpfc_ioctl_hba_set_event(struct lpfc_hba * phba, >+ struct lpfcCmdInput * cip) >+{ >+ uint32_t ev_mask = ((uint32_t)(unsigned long)cip->lpfc_arg3 & >+ FC_REG_EVENT_MASK); >+ int ev_reg_id = cip->lpfc_flag; >+ uint32_t ev_req_id = 0; >+ >+ struct lpfcdfc_host * dfchba; >+ struct lpfcdfc_event * evt; >+ >+ int ret_val = 0; >+ >+ /* All other events supported through NET_LINK_EVENTs */ >+ if (ev_mask != FC_REG_CT_EVENT) >+ return ENOENT; >+ >+ mutex_lock(&lpfcdfc_lock); >+ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) { >+ if (dfchba->phba == phba) >+ break; >+ } >+ mutex_unlock(&lpfcdfc_lock); >+ BUG_ON(&dfchba->node == &lpfcdfc_hosts); >+ >+ if (ev_mask == FC_REG_CT_EVENT) >+ ev_req_id = ((uint32_t)(unsigned long)cip->lpfc_arg2); >+ >+ mutex_lock(&lpfcdfc_lock); >+ list_for_each_entry(evt, &dfchba->ev_waiters, node) { >+ if (evt->reg_id == ev_reg_id) { >+ lpfcdfc_event_ref(evt); >+ evt->wait_time_stamp = jiffies; >+ break; >+ } >+ } >+ mutex_unlock(&lpfcdfc_lock); >+ >+ if (&evt->node == &dfchba->ev_waiters) { >+ /* no event waiting struct yet - first call */ >+ evt = lpfcdfc_event_new(ev_mask, ev_reg_id, ev_req_id); >+ if (evt == NULL) >+ return ENOMEM; >+ >+ mutex_lock(&lpfcdfc_lock); >+ list_add(&evt->node, &dfchba->ev_waiters); >+ lpfcdfc_event_ref(evt); >+ mutex_unlock(&lpfcdfc_lock); >+ } >+ >+ evt->waiting = 1; >+ if (wait_event_interruptible(evt->wq, >+ (!list_empty(&evt->events_to_see) || >+ dfchba->blocked))) { >+ mutex_lock(&lpfcdfc_lock); >+ lpfcdfc_event_unref(evt); /* release ref */ >+ lpfcdfc_event_unref(evt); /* delete */ >+ mutex_unlock(&lpfcdfc_lock); >+ return EINTR; >+ } >+ >+ mutex_lock(&lpfcdfc_lock); >+ if (dfchba->blocked) { >+ lpfcdfc_event_unref(evt); >+ lpfcdfc_event_unref(evt); >+ mutex_unlock(&lpfcdfc_lock); >+ return ENODEV; >+ } >+ mutex_unlock(&lpfcdfc_lock); >+ >+ evt->wait_time_stamp = jiffies; >+ evt->waiting = 0; >+ >+ BUG_ON(list_empty(&evt->events_to_see)); >+ >+ mutex_lock(&lpfcdfc_lock); >+ list_move(evt->events_to_see.prev, &evt->events_to_get); >+ lpfcdfc_event_unref(evt); /* release ref */ >+ mutex_unlock(&lpfcdfc_lock); >+ >+ return ret_val; >+} >+ >+static int >+lpfc_ioctl_loopback_mode(struct lpfc_hba *phba, >+ struct lpfcCmdInput *cip, void *dataout) >+{ >+ struct Scsi_Host *shost; >+ struct lpfc_sli *psli = &phba->sli; >+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING]; >+ uint32_t link_flags = cip->lpfc_arg4; >+ uint32_t timeout = cip->lpfc_arg5 * 100; >+ struct lpfc_vport **vports; >+ LPFC_MBOXQ_t *pmboxq; >+ int mbxstatus; >+ int i = 0; >+ int rc = 0; >+ >+ if ((phba->link_state == LPFC_HBA_ERROR) || >+ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || >+ (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) >+ return EACCES; >+ >+ if ((pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL)) == 0) >+ return ENOMEM; >+ >+ vports = lpfc_create_vport_work_array(phba); >+ if (vports != NULL) { >+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++){ >+ shost = lpfc_shost_from_vport(vports[i]); >+ scsi_block_requests(shost); >+ } >+ lpfc_destroy_vport_work_array(phba, vports); >+ } >+ else { >+ shost = lpfc_shost_from_vport(phba->pport); >+ scsi_block_requests(shost); >+ } >+ >+ while (pring->txcmplq_cnt) { >+ if (i++ > 500) /* wait up to 5 seconds */ >+ break; >+ >+ mdelay(10); >+ } >+ >+ memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); >+ pmboxq->mb.mbxCommand = MBX_DOWN_LINK; >+ pmboxq->mb.mbxOwner = OWN_HOST; >+ >+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); >+ >+ if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { >+ >+ /* wait for link down before proceeding */ >+ i = 0; >+ while (phba->link_state != LPFC_LINK_DOWN) { >+ if (i++ > timeout) { >+ rc = ETIMEDOUT; >+ goto loopback_mode_exit; >+ } >+ msleep(10); >+ } >+ >+ memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); >+ if (link_flags == INTERNAL_LOOP_BACK) >+ pmboxq->mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; >+ else >+ pmboxq->mb.un.varInitLnk.link_flags = >+ FLAGS_TOPOLOGY_MODE_LOOP; >+ >+ pmboxq->mb.mbxCommand = MBX_INIT_LINK; >+ pmboxq->mb.mbxOwner = OWN_HOST; >+ >+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, >+ LPFC_MBOX_TMO); >+ >+ if ((mbxstatus != MBX_SUCCESS) || (pmboxq->mb.mbxStatus)) >+ rc = ENODEV; >+ else { >+ phba->link_flag |= LS_LOOPBACK_MODE; >+ /* wait for the link attention interrupt */ >+ msleep(100); >+ >+ i = 0; >+ while (phba->link_state != LPFC_HBA_READY) { >+ if (i++ > timeout) { >+ rc = ETIMEDOUT; >+ break; >+ } >+ msleep(10); >+ } >+ } >+ } else >+ rc = ENODEV; >+ >+loopback_mode_exit: >+ vports = lpfc_create_vport_work_array(phba); >+ if (vports != NULL) { >+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++){ >+ shost = lpfc_shost_from_vport(vports[i]); >+ scsi_unblock_requests(shost); >+ } >+ lpfc_destroy_vport_work_array(phba, vports); >+ } >+ else { >+ shost = lpfc_shost_from_vport(phba->pport); >+ scsi_unblock_requests(shost); >+ } >+ >+ /* >+ * Let SLI layer release mboxq if mbox command completed after timeout. >+ */ >+ if (mbxstatus != MBX_TIMEOUT) >+ mempool_free( pmboxq, phba->mbox_mem_pool); >+ >+ return rc; >+} >+ >+static int lpfcdfc_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi) >+{ >+ LPFC_MBOXQ_t *mbox; >+ struct lpfc_dmabuf *dmabuff; >+ int status; >+ >+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); >+ if (mbox == NULL) >+ return ENOMEM; >+ >+ status = lpfc_reg_login(phba, 0, phba->pport->fc_myDID, >+ (uint8_t *)&phba->pport->fc_sparam, mbox, 0); >+ if (status) { >+ mempool_free(mbox, phba->mbox_mem_pool); >+ return ENOMEM; >+ } >+ >+ dmabuff = (struct lpfc_dmabuf *) mbox->context1; >+ mbox->context1 = NULL; >+ status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); >+ >+ if ((status != MBX_SUCCESS) || (mbox->mb.mbxStatus)) { >+ lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); >+ kfree(dmabuff); >+ if (status != MBX_TIMEOUT) >+ mempool_free(mbox, phba->mbox_mem_pool); >+ return ENODEV; >+ } >+ >+ *rpi = mbox->mb.un.varWords[0]; >+ >+ lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); >+ kfree(dmabuff); >+ mempool_free(mbox, phba->mbox_mem_pool); >+ >+ return 0; >+} >+ >+static int lpfcdfc_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) >+{ >+ LPFC_MBOXQ_t * mbox; >+ int status; >+ >+ /* Allocate mboxq structure */ >+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); >+ if (mbox == NULL) >+ return ENOMEM; >+ >+ lpfc_unreg_login(phba, 0, rpi, mbox); >+ status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); >+ >+ if ((status != MBX_SUCCESS) || (mbox->mb.mbxStatus)) { >+ if (status != MBX_TIMEOUT) >+ mempool_free(mbox, phba->mbox_mem_pool); >+ return EIO; >+ } >+ >+ mempool_free(mbox, phba->mbox_mem_pool); >+ return 0; >+} >+ >+ >+static int lpfcdfc_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, >+ uint16_t *txxri, uint16_t * rxxri) >+{ >+ struct lpfc_sli *psli = &phba->sli; >+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; >+ >+ struct lpfcdfc_host * dfchba; >+ struct lpfcdfc_event * evt; >+ >+ struct lpfc_iocbq *cmdiocbq, *rspiocbq; >+ IOCB_t *cmd, *rsp; >+ >+ struct lpfc_dmabuf * dmabuf; >+ struct ulp_bde64 *bpl = NULL; >+ struct lpfc_sli_ct_request *ctreq = NULL; >+ >+ int ret_val = 0; >+ >+ *txxri = 0; >+ *rxxri = 0; >+ >+ mutex_lock(&lpfcdfc_lock); >+ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) { >+ if (dfchba->phba == phba) >+ break; >+ } >+ mutex_unlock(&lpfcdfc_lock); >+ BUG_ON(&dfchba->node == &lpfcdfc_hosts); >+ >+ evt = lpfcdfc_event_new(FC_REG_CT_EVENT, current->pid, >+ SLI_CT_ELX_LOOPBACK); >+ if (evt == NULL) >+ return ENOMEM; >+ >+ mutex_lock(&lpfcdfc_lock); >+ list_add(&evt->node, &dfchba->ev_waiters); >+ lpfcdfc_event_ref(evt); >+ mutex_unlock(&lpfcdfc_lock); >+ >+ cmdiocbq = lpfc_sli_get_iocbq(phba); >+ rspiocbq = lpfc_sli_get_iocbq(phba); >+ >+ dmabuf = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); >+ if (dmabuf) { >+ dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); >+ INIT_LIST_HEAD(&dmabuf->list); >+ bpl = (struct ulp_bde64 *) dmabuf->virt; >+ memset(bpl, 0, sizeof(*bpl)); >+ ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); >+ bpl->addrHigh = >+ le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl))); >+ bpl->addrLow = >+ le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl))); >+ bpl->tus.f.bdeFlags = 0; >+ bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; >+ bpl->tus.w = le32_to_cpu(bpl->tus.w); >+ } >+ >+ if (cmdiocbq == NULL || rspiocbq == NULL || >+ dmabuf == NULL || bpl == NULL || ctreq == NULL) { >+ ret_val = ENOMEM; >+ goto err_get_xri_exit; >+ } >+ >+ cmd = &cmdiocbq->iocb; >+ rsp = &rspiocbq->iocb; >+ >+ memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); >+ >+ ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; >+ ctreq->RevisionId.bits.InId = 0; >+ ctreq->FsType = SLI_CT_ELX_LOOPBACK; >+ ctreq->FsSubType = 0; >+ ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; >+ ctreq->CommandResponse.bits.Size = 0; >+ >+ >+ cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys); >+ cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys); >+ cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDL; >+ cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl); >+ >+ cmd->un.xseq64.w5.hcsw.Fctl = LA; >+ cmd->un.xseq64.w5.hcsw.Dfctl = 0; >+ cmd->un.xseq64.w5.hcsw.Rctl = FC_UNSOL_CTL; >+ cmd->un.xseq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; >+ >+ cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; >+ cmd->ulpBdeCount = 1; >+ cmd->ulpLe = 1; >+ cmd->ulpClass = CLASS3; >+ cmd->ulpContext = rpi; >+ >+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; >+ cmdiocbq->vport = phba->pport; >+ >+ ret_val = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq, >+ (phba->fc_ratov * 2) >+ + LPFC_DRVR_TIMEOUT); >+ if (ret_val) >+ goto err_get_xri_exit; >+ >+ *txxri = rsp->ulpContext; >+ >+ evt->waiting = 1; >+ evt->wait_time_stamp = jiffies; >+ ret_val = wait_event_interruptible_timeout( >+ evt->wq, !list_empty(&evt->events_to_see), >+ ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); >+ if (list_empty(&evt->events_to_see)) >+ ret_val = (ret_val) ? EINTR : ETIMEDOUT; >+ else { >+ ret_val = IOCB_SUCCESS; >+ mutex_lock(&lpfcdfc_lock); >+ list_move(evt->events_to_see.prev, &evt->events_to_get); >+ mutex_unlock(&lpfcdfc_lock); >+ *rxxri = (list_entry(evt->events_to_get.prev, >+ typeof(struct event_data), >+ node))->immed_dat; >+ } >+ evt->waiting = 0; >+ >+err_get_xri_exit: >+ mutex_lock(&lpfcdfc_lock); >+ lpfcdfc_event_unref(evt); /* release ref */ >+ lpfcdfc_event_unref(evt); /* delete */ >+ mutex_unlock(&lpfcdfc_lock); >+ >+ if(dmabuf) { >+ if(dmabuf->virt) >+ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); >+ kfree(dmabuf); >+ } >+ >+ if (cmdiocbq && (ret_val != IOCB_TIMEDOUT)) >+ lpfc_sli_release_iocbq(phba, cmdiocbq); >+ if (rspiocbq) >+ lpfc_sli_release_iocbq(phba, rspiocbq); >+ >+ return ret_val; >+} >+ >+static int lpfcdfc_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, >+ size_t len) >+{ >+ struct lpfc_sli *psli = &phba->sli; >+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; >+ struct lpfc_iocbq *cmdiocbq; >+ IOCB_t *cmd = NULL; >+ struct list_head head, *curr, *next; >+ struct lpfc_dmabuf *rxbmp; >+ struct lpfc_dmabuf *dmp; >+ struct lpfc_dmabuf *mp[2] = {NULL, NULL}; >+ struct ulp_bde64 *rxbpl = NULL; >+ uint32_t num_bde; >+ struct lpfc_dmabufext *rxbuffer = NULL; >+ int ret_val = 0; >+ int i = 0; >+ >+ cmdiocbq = lpfc_sli_get_iocbq(phba); >+ rxbmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); >+ if (rxbmp != NULL) { >+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); >+ INIT_LIST_HEAD(&rxbmp->list); >+ rxbpl = (struct ulp_bde64 *) rxbmp->virt; >+ rxbuffer = dfc_cmd_data_alloc(phba, NULL, rxbpl, len); >+ } >+ >+ if(cmdiocbq == NULL || rxbmp == NULL || >+ rxbpl == NULL || rxbuffer == NULL) { >+ ret_val = ENOMEM; >+ goto err_post_rxbufs_exit; >+ } >+ >+ /* Queue buffers for the receive exchange */ >+ num_bde = (uint32_t)rxbuffer->flag; >+ dmp = &rxbuffer->dma; >+ >+ cmd = &cmdiocbq->iocb; >+ i = 0; >+ >+ INIT_LIST_HEAD(&head); >+ list_add_tail(&head, &dmp->list); >+ list_for_each_safe(curr, next, &head) { >+ mp[i] = list_entry(curr, struct lpfc_dmabuf, list); >+ list_del(curr); >+ >+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { >+ mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); >+ cmd->un.quexri64cx.buff.bde.addrHigh = >+ putPaddrHigh(mp[i]->phys); >+ cmd->un.quexri64cx.buff.bde.addrLow = >+ putPaddrLow(mp[i]->phys); >+ cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = >+ ((struct lpfc_dmabufext *)mp[i])->size; >+ cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; >+ cmd->ulpCommand = CMD_QUE_XRI64_CX; >+ cmd->ulpPU = 0; >+ cmd->ulpLe = 1; >+ cmd->ulpBdeCount = 1; >+ cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; >+ >+ } else { >+ cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); >+ cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); >+ cmd->un.cont64[i].tus.f.bdeSize = >+ ((struct lpfc_dmabufext *)mp[i])->size; >+ cmd->ulpBdeCount = ++i; >+ >+ if ((--num_bde > 0) && (i < 2)) >+ continue; >+ >+ cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; >+ cmd->ulpLe = 1; >+ } >+ >+ cmd->ulpClass = CLASS3; >+ cmd->ulpContext = rxxri; >+ >+ ret_val = lpfc_sli_issue_iocb(phba, pring, cmdiocbq, 0); >+ >+ if (ret_val == IOCB_ERROR) { >+ dfc_cmd_data_free(phba, (struct lpfc_dmabufext *)mp[0]); >+ if (mp[1]) >+ dfc_cmd_data_free(phba, >+ (struct lpfc_dmabufext *)mp[1]); >+ dmp = list_entry(next, struct lpfc_dmabuf, list); >+ ret_val = EIO; >+ goto err_post_rxbufs_exit; >+ } >+ >+ lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); >+ if (mp[1]) { >+ lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); >+ mp[1] = NULL; >+ } >+ >+ /* The iocb was freed by lpfc_sli_issue_iocb */ >+ if ((cmdiocbq = lpfc_sli_get_iocbq(phba)) == NULL) { >+ dmp = list_entry(next, struct lpfc_dmabuf, list); >+ ret_val = EIO; >+ goto err_post_rxbufs_exit; >+ } >+ cmd = &cmdiocbq->iocb; >+ i = 0; >+ } >+ list_del(&head); >+ >+err_post_rxbufs_exit: >+ >+ if(rxbmp) { >+ if(rxbmp->virt) >+ lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); >+ kfree(rxbmp); >+ } >+ >+ if (cmdiocbq) >+ lpfc_sli_release_iocbq(phba, cmdiocbq); >+ >+ return ret_val; >+} >+static int >+lpfc_ioctl_loopback_test(struct lpfc_hba *phba, >+ struct lpfcCmdInput *cip, void *dataout) >+{ >+ struct lpfcdfc_host * dfchba; >+ struct lpfcdfc_event * evt; >+ struct event_data * evdat; >+ >+ struct lpfc_sli *psli = &phba->sli; >+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; >+ uint32_t size = cip->lpfc_outsz; >+ uint32_t full_size = size + ELX_LOOPBACK_HEADER_SZ; >+ size_t segment_len = 0, segment_offset = 0, current_offset = 0; >+ uint16_t rpi; >+ struct lpfc_iocbq *cmdiocbq, *rspiocbq; >+ IOCB_t *cmd, *rsp; >+ struct lpfc_sli_ct_request *ctreq; >+ struct lpfc_dmabuf *txbmp; >+ struct ulp_bde64 *txbpl = NULL; >+ struct lpfc_dmabufext *txbuffer = NULL; >+ struct list_head head; >+ struct lpfc_dmabuf *curr; >+ uint16_t txxri, rxxri; >+ uint32_t num_bde; >+ uint8_t *ptr = NULL, *rx_databuf = NULL; >+ int rc; >+ >+ if ((phba->link_state == LPFC_HBA_ERROR) || >+ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || >+ (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) >+ return EACCES; >+ >+ if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) >+ return EACCES; >+ >+ if ((size == 0) || (size > 80 * BUF_SZ_4K)) >+ return ERANGE; >+ >+ mutex_lock(&lpfcdfc_lock); >+ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) { >+ if (dfchba->phba == phba) >+ break; >+ } >+ mutex_unlock(&lpfcdfc_lock); >+ BUG_ON(&dfchba->node == &lpfcdfc_hosts); >+ >+ rc = lpfcdfc_loop_self_reg(phba, &rpi); >+ if (rc) >+ return rc; >+ >+ rc = lpfcdfc_loop_get_xri(phba, rpi, &txxri, &rxxri); >+ if (rc) { >+ lpfcdfc_loop_self_unreg(phba, rpi); >+ return rc; >+ } >+ >+ rc = lpfcdfc_loop_post_rxbufs(phba, rxxri, full_size); >+ if (rc) { >+ lpfcdfc_loop_self_unreg(phba, rpi); >+ return rc; >+ } >+ >+ evt = lpfcdfc_event_new(FC_REG_CT_EVENT, current->pid, >+ SLI_CT_ELX_LOOPBACK); >+ if (evt == NULL) { >+ lpfcdfc_loop_self_unreg(phba, rpi); >+ return ENOMEM; >+ } >+ >+ mutex_lock(&lpfcdfc_lock); >+ list_add(&evt->node, &dfchba->ev_waiters); >+ lpfcdfc_event_ref(evt); >+ mutex_unlock(&lpfcdfc_lock); >+ >+ cmdiocbq = lpfc_sli_get_iocbq(phba); >+ rspiocbq = lpfc_sli_get_iocbq(phba); >+ txbmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); >+ >+ if (txbmp) { >+ txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); >+ INIT_LIST_HEAD(&txbmp->list); >+ txbpl = (struct ulp_bde64 *) txbmp->virt; >+ if (txbpl) >+ txbuffer = dfc_cmd_data_alloc(phba, NULL, >+ txbpl, full_size); >+ } >+ >+ if (cmdiocbq == NULL || rspiocbq == NULL >+ || txbmp == NULL || txbpl == NULL || txbuffer == NULL) { >+ rc = ENOMEM; >+ goto err_loopback_test_exit; >+ } >+ >+ cmd = &cmdiocbq->iocb; >+ rsp = &rspiocbq->iocb; >+ >+ INIT_LIST_HEAD(&head); >+ list_add_tail(&head, &txbuffer->dma.list); >+ list_for_each_entry(curr, &head, list) { >+ segment_len = ((struct lpfc_dmabufext *)curr)->size; >+ if (current_offset == 0) { >+ ctreq = curr->virt; >+ memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); >+ ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; >+ ctreq->RevisionId.bits.InId = 0; >+ ctreq->FsType = SLI_CT_ELX_LOOPBACK; >+ ctreq->FsSubType = 0; >+ ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA ; >+ ctreq->CommandResponse.bits.Size = size; >+ segment_offset = ELX_LOOPBACK_HEADER_SZ; >+ } else >+ segment_offset = 0; >+ >+ BUG_ON(segment_offset >= segment_len); >+ if (copy_from_user (curr->virt + segment_offset, >+ (void __user *)cip->lpfc_arg1 >+ + current_offset, >+ segment_len - segment_offset)) { >+ rc = EIO; >+ list_del(&head); >+ goto err_loopback_test_exit; >+ } >+ >+ current_offset += segment_len - segment_offset; >+ BUG_ON(current_offset > size); >+ } >+ list_del(&head); >+ >+ /* Build the XMIT_SEQUENCE iocb */ >+ >+ num_bde = (uint32_t)txbuffer->flag; >+ >+ cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys); >+ cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys); >+ cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDL; >+ cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64)); >+ >+ cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); >+ cmd->un.xseq64.w5.hcsw.Dfctl = 0; >+ cmd->un.xseq64.w5.hcsw.Rctl = FC_UNSOL_CTL; >+ cmd->un.xseq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; >+ >+ cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; >+ cmd->ulpBdeCount = 1; >+ cmd->ulpLe = 1; >+ cmd->ulpClass = CLASS3; >+ cmd->ulpContext = txxri; >+ >+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; >+ cmdiocbq->vport = phba->pport; >+ >+ rc = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq, >+ (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); >+ >+ if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) { >+ rc = EIO; >+ goto err_loopback_test_exit; >+ } >+ >+ evt->waiting = 1; >+ rc = wait_event_interruptible_timeout( >+ evt->wq, !list_empty(&evt->events_to_see), >+ ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); >+ evt->waiting = 0; >+ if (list_empty(&evt->events_to_see)) >+ rc = (rc) ? EINTR : ETIMEDOUT; >+ else { >+ ptr = dataout; >+ mutex_lock(&lpfcdfc_lock); >+ list_move(evt->events_to_see.prev, &evt->events_to_get); >+ evdat = list_entry(evt->events_to_get.prev, >+ typeof(*evdat), node); >+ mutex_unlock(&lpfcdfc_lock); >+ rx_databuf = evdat->data; >+ if (evdat->len != full_size) { >+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, >+ "1603 Loopback test did not receive expected " >+ "data length. actual length 0x%x expected " >+ "length 0x%x\n", >+ evdat->len, full_size); >+ rc = EIO; >+ } >+ else if (rx_databuf == NULL) >+ rc = EIO; >+ else { >+ rx_databuf += ELX_LOOPBACK_HEADER_SZ; >+ memcpy(ptr, rx_databuf, size); >+ rc = IOCB_SUCCESS; >+ } >+ } >+ >+err_loopback_test_exit: >+ lpfcdfc_loop_self_unreg(phba, rpi); >+ >+ mutex_lock(&lpfcdfc_lock); >+ lpfcdfc_event_unref(evt); /* release ref */ >+ lpfcdfc_event_unref(evt); /* delete */ >+ mutex_unlock(&lpfcdfc_lock); >+ >+ if (cmdiocbq != NULL) >+ lpfc_sli_release_iocbq(phba, cmdiocbq); >+ >+ if (rspiocbq != NULL) >+ lpfc_sli_release_iocbq(phba, rspiocbq); >+ >+ if (txbmp != NULL) { >+ if (txbpl != NULL) { >+ if (txbuffer != NULL) >+ dfc_cmd_data_free(phba, txbuffer); >+ lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); >+ } >+ kfree(txbmp); >+ } >+ return rc; >+} >+ >+static int >+dfc_rsp_data_copy(struct lpfc_hba * phba, >+ uint8_t * outdataptr, struct lpfc_dmabufext * mlist, >+ uint32_t size) >+{ >+ struct lpfc_dmabufext *mlast = NULL; >+ int cnt, offset = 0; >+ struct list_head head, *curr, *next; >+ >+ if (!mlist) >+ return 0; >+ >+ list_add_tail(&head, &mlist->dma.list); >+ >+ list_for_each_safe(curr, next, &head) { >+ mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); >+ if (!size) >+ break; >+ >+ /* We copy chucks of 4K */ >+ if (size > BUF_SZ_4K) >+ cnt = BUF_SZ_4K; >+ else >+ cnt = size; >+ >+ if (outdataptr) { >+ pci_dma_sync_single_for_device(phba->pcidev, >+ mlast->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE); >+ >+ /* Copy data to user space */ >+ if (copy_to_user >+ ((void __user *) (outdataptr + offset), >+ (uint8_t *) mlast->dma.virt, cnt)) >+ return 1; >+ } >+ offset += cnt; >+ size -= cnt; >+ } >+ list_del(&head); >+ return 0; >+} >+ >+static int >+lpfc_issue_ct_rsp(struct lpfc_hba * phba, uint32_t tag, >+ struct lpfc_dmabuf * bmp, >+ struct lpfc_dmabufext * inp) >+{ >+ struct lpfc_sli *psli; >+ IOCB_t *icmd; >+ struct lpfc_iocbq *ctiocb; >+ struct lpfc_sli_ring *pring; >+ uint32_t num_entry; >+ int rc = 0; >+ >+ psli = &phba->sli; >+ pring = &psli->ring[LPFC_ELS_RING]; >+ num_entry = inp->flag; >+ inp->flag = 0; >+ >+ /* Allocate buffer for command iocb */ >+ ctiocb = lpfc_sli_get_iocbq(phba); >+ if (!ctiocb) { >+ rc = ENOMEM; >+ goto issue_ct_rsp_exit; >+ } >+ icmd = &ctiocb->iocb; >+ >+ icmd->un.xseq64.bdl.ulpIoTag32 = 0; >+ icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); >+ icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); >+ icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDL; >+ icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64)); >+ icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); >+ icmd->un.xseq64.w5.hcsw.Dfctl = 0; >+ icmd->un.xseq64.w5.hcsw.Rctl = FC_SOL_CTL; >+ icmd->un.xseq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; >+ >+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys, LPFC_BPL_SIZE, >+ PCI_DMA_TODEVICE); >+ >+ /* Fill in rest of iocb */ >+ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; >+ icmd->ulpBdeCount = 1; >+ icmd->ulpLe = 1; >+ icmd->ulpClass = CLASS3; >+ icmd->ulpContext = (ushort) tag; >+ icmd->ulpTimeout = phba->fc_ratov * 2; >+ >+ /* Xmit CT response on exchange <xid> */ >+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, >+ "1200 Xmit CT response on exchange x%x Data: x%x x%x\n", >+ icmd->ulpContext, icmd->ulpIoTag, phba->link_state); >+ >+ ctiocb->iocb_cmpl = NULL; >+ ctiocb->iocb_flag |= LPFC_IO_LIBDFC; >+ ctiocb->vport = phba->pport; >+ rc = lpfc_sli_issue_iocb_wait(phba, pring, ctiocb, NULL, >+ phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT); >+ >+ if (rc == IOCB_TIMEDOUT) { >+ ctiocb->context1 = NULL; >+ ctiocb->context2 = NULL; >+ ctiocb->iocb_cmpl = lpfc_ioctl_timeout_iocb_cmpl; >+ return rc; >+ } >+ >+ /* Calling routine takes care of IOCB_ERROR => EIO translation */ >+ if (rc != IOCB_SUCCESS) >+ rc = IOCB_ERROR; >+ >+ lpfc_sli_release_iocbq(phba, ctiocb); >+issue_ct_rsp_exit: >+ return rc; >+} >+ >+ >+static void >+lpfcdfc_ct_unsol_event(struct lpfc_hba * phba, >+ struct lpfc_sli_ring * pring, >+ struct lpfc_iocbq * piocbq) >+{ >+ struct lpfcdfc_host * dfchba = lpfcdfc_host_from_hba(phba); >+ uint32_t evt_req_id = 0; >+ uint32_t cmd; >+ uint32_t len; >+ struct lpfc_dmabuf *dmabuf = NULL; >+ struct lpfcdfc_event * evt; >+ struct event_data * evt_dat = NULL; >+ struct lpfc_iocbq * iocbq; >+ size_t offset = 0; >+ struct list_head head; >+ struct ulp_bde64 * bde; >+ dma_addr_t dma_addr; >+ int i; >+ struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; >+ struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; >+ struct lpfc_hbq_entry *hbqe; >+ >+ BUG_ON(&dfchba->node == &lpfcdfc_hosts); >+ INIT_LIST_HEAD(&head); >+ if (piocbq->iocb.ulpBdeCount == 0 || >+ piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) >+ goto error_unsol_ct_exit; >+ >+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) >+ dmabuf = bdeBuf1; >+ else { >+ dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh, >+ piocbq->iocb.un.cont64[0].addrLow); >+ dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); >+ } >+ BUG_ON(dmabuf == NULL); >+ evt_req_id = ((struct lpfc_sli_ct_request *)(dmabuf->virt))->FsType; >+ cmd = ((struct lpfc_sli_ct_request *) >+ (dmabuf->virt))->CommandResponse.bits.CmdRsp; >+ len = ((struct lpfc_sli_ct_request *) >+ (dmabuf->virt))->CommandResponse.bits.Size; >+ if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) >+ lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); >+ >+ mutex_lock(&lpfcdfc_lock); >+ list_for_each_entry(evt, &dfchba->ev_waiters, node) { >+ if (!(evt->type_mask & FC_REG_CT_EVENT) || >+ evt->req_id != evt_req_id) >+ continue; >+ >+ lpfcdfc_event_ref(evt); >+ >+ if ((evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL)) == NULL) { >+ lpfcdfc_event_unref(evt); >+ break; >+ } >+ >+ mutex_unlock(&lpfcdfc_lock); >+ >+ INIT_LIST_HEAD(&head); >+ list_add_tail(&head, &piocbq->list); >+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { >+ /* take accumulated byte count from the last iocbq */ >+ iocbq = list_entry(head.prev, typeof(*iocbq), list); >+ evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; >+ } else { >+ list_for_each_entry(iocbq, &head, list) { >+ for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) >+ evt_dat->len += >+ iocbq->iocb.un.cont64[i].tus.f.bdeSize; >+ } >+ } >+ >+ >+ evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); >+ if (evt_dat->data == NULL) { >+ kfree (evt_dat); >+ mutex_lock(&lpfcdfc_lock); >+ lpfcdfc_event_unref(evt); >+ mutex_unlock(&lpfcdfc_lock); >+ goto error_unsol_ct_exit; >+ } >+ >+ list_for_each_entry(iocbq, &head, list) { >+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { >+ bdeBuf1 = iocbq->context2; >+ bdeBuf2 = iocbq->context3; >+ } >+ for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { >+ int size = 0; >+ if (phba->sli3_options & >+ LPFC_SLI3_HBQ_ENABLED) { >+ BUG_ON(i>1); >+ if (i == 0) { >+ hbqe = (struct lpfc_hbq_entry *) >+ &iocbq->iocb.un.ulpWord[0]; >+ size = hbqe->bde.tus.f.bdeSize; >+ dmabuf = bdeBuf1; >+ } else if (i == 1) { >+ hbqe = (struct lpfc_hbq_entry *) >+ &iocbq->iocb.unsli3. >+ sli3Words[4]; >+ size = hbqe->bde.tus.f.bdeSize; >+ dmabuf = bdeBuf2; >+ } >+ if ((offset + size) > evt_dat->len) >+ size = evt_dat->len - offset; >+ } else { >+ size = iocbq->iocb.un.cont64[i]. >+ tus.f.bdeSize; >+ bde = &iocbq->iocb.un.cont64[i]; >+ dma_addr = getPaddr(bde->addrHigh, >+ bde->addrLow); >+ dmabuf = lpfc_sli_ringpostbuf_get(phba, >+ pring, dma_addr); >+ } >+ if (dmabuf == NULL) { >+ kfree (evt_dat->data); >+ kfree (evt_dat); >+ mutex_lock(&lpfcdfc_lock); >+ lpfcdfc_event_unref(evt); >+ mutex_unlock(&lpfcdfc_lock); >+ goto error_unsol_ct_exit; >+ } >+ memcpy ((char *)(evt_dat->data) + offset, >+ dmabuf->virt, size); >+ offset += size; >+ if (evt_req_id != SLI_CT_ELX_LOOPBACK && >+ !(phba->sli3_options & >+ LPFC_SLI3_HBQ_ENABLED)) >+ lpfc_sli_ringpostbuf_put(phba, pring, >+ dmabuf); >+ else { >+ switch (cmd) { >+ case ELX_LOOPBACK_DATA: >+ dfc_cmd_data_free(phba, >+ (struct lpfc_dmabufext *) >+ dmabuf); >+ break; >+ case ELX_LOOPBACK_XRI_SETUP: >+ if (!(phba->sli3_options & >+ LPFC_SLI3_HBQ_ENABLED)) >+ lpfc_post_buffer(phba, >+ pring, >+ 1, 1); >+ else >+ lpfc_in_buf_free(phba, >+ dmabuf); >+ break; >+ default: >+ if (!(phba->sli3_options & >+ LPFC_SLI3_HBQ_ENABLED)) >+ lpfc_post_buffer(phba, >+ pring, >+ 1, 1); >+ break; >+ } >+ } >+ } >+ } >+ >+ mutex_lock(&lpfcdfc_lock); >+ evt_dat->immed_dat = piocbq->iocb.ulpContext; >+ evt_dat->type = FC_REG_CT_EVENT; >+ list_add(&evt_dat->node, &evt->events_to_see); >+ wake_up_interruptible(&evt->wq); >+ lpfcdfc_event_unref(evt); >+ if (evt_req_id == SLI_CT_ELX_LOOPBACK) >+ break; >+ } >+ mutex_unlock(&lpfcdfc_lock); >+ >+error_unsol_ct_exit: >+ if(!list_empty(&head)) >+ list_del(&head); >+ if (evt_req_id != SLI_CT_ELX_LOOPBACK && >+ dfchba->base_ct_unsol_event != NULL) >+ (dfchba->base_ct_unsol_event)(phba, pring, piocbq); >+ >+ return; >+} >+ >+ >+struct lpfc_dmabufext * >+__dfc_cmd_data_alloc(struct lpfc_hba * phba, >+ char *indataptr, struct ulp_bde64 * bpl, uint32_t size, >+ int nocopydata) >+{ >+ struct lpfc_dmabufext *mlist = NULL; >+ struct lpfc_dmabufext *dmp; >+ int cnt, offset = 0, i = 0; >+ struct pci_dev *pcidev; >+ >+ pcidev = phba->pcidev; >+ >+ while (size) { >+ /* We get chunks of 4K */ >+ if (size > BUF_SZ_4K) >+ cnt = BUF_SZ_4K; >+ else >+ cnt = size; >+ >+ /* allocate struct lpfc_dmabufext buffer header */ >+ dmp = kmalloc(sizeof (struct lpfc_dmabufext), GFP_KERNEL); >+ if (dmp == 0) >+ goto out; >+ >+ INIT_LIST_HEAD(&dmp->dma.list); >+ >+ /* Queue it to a linked list */ >+ if (mlist) >+ list_add_tail(&dmp->dma.list, &mlist->dma.list); >+ else >+ mlist = dmp; >+ >+ /* allocate buffer */ >+ dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, >+ cnt, >+ &(dmp->dma.phys), >+ GFP_KERNEL); >+ >+ if (dmp->dma.virt == NULL) >+ goto out; >+ >+ dmp->size = cnt; >+ >+ if (indataptr || nocopydata) { >+ if (indataptr) >+ /* Copy data from user space in */ >+ if (copy_from_user ((uint8_t *) dmp->dma.virt, >+ (void __user *) (indataptr + offset), >+ cnt)) { >+ goto out; >+ } >+ bpl->tus.f.bdeFlags = 0; >+ >+ pci_dma_sync_single_for_device(phba->pcidev, >+ dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE); >+ >+ } else { >+ memset((uint8_t *)dmp->dma.virt, 0, cnt); >+ bpl->tus.f.bdeFlags = BUFF_USE_RCV; >+ } >+ >+ /* build buffer ptr list for IOCB */ >+ bpl->addrLow = le32_to_cpu( putPaddrLow(dmp->dma.phys) ); >+ bpl->addrHigh = le32_to_cpu( putPaddrHigh(dmp->dma.phys) ); >+ bpl->tus.f.bdeSize = (ushort) cnt; >+ bpl->tus.w = le32_to_cpu(bpl->tus.w); >+ bpl++; >+ >+ i++; >+ offset += cnt; >+ size -= cnt; >+ } >+ >+ mlist->flag = i; >+ return mlist; >+out: >+ dfc_cmd_data_free(phba, mlist); >+ return NULL; >+} >+ >+static struct lpfc_dmabufext * >+dfc_cmd_data_alloc(struct lpfc_hba * phba, >+ char *indataptr, struct ulp_bde64 * bpl, uint32_t size) >+{ >+ /* if indataptr is null it is a rsp buffer. */ >+ return __dfc_cmd_data_alloc(phba, indataptr, bpl, size, >+ 0 /* don't copy user data */); >+} >+ >+int >+__dfc_cmd_data_free(struct lpfc_hba * phba, struct lpfc_dmabufext * mlist) >+{ >+ return dfc_cmd_data_free(phba, mlist); >+} >+static int >+dfc_cmd_data_free(struct lpfc_hba * phba, struct lpfc_dmabufext * mlist) >+{ >+ struct lpfc_dmabufext *mlast; >+ struct pci_dev *pcidev; >+ struct list_head head, *curr, *next; >+ >+ if ((!mlist) || (!lpfc_is_link_up(phba) && >+ (phba->link_flag & LS_LOOPBACK_MODE))) { >+ return 0; >+ } >+ >+ pcidev = phba->pcidev; >+ list_add_tail(&head, &mlist->dma.list); >+ >+ list_for_each_safe(curr, next, &head) { >+ mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); >+ if (mlast->dma.virt) >+ dma_free_coherent(&pcidev->dev, >+ mlast->size, >+ mlast->dma.virt, >+ mlast->dma.phys); >+ kfree(mlast); >+ } >+ return 0; >+} >+ >+ >+/* The only reason we need that reverce find, is because we >+ * are bent on keeping original calling conventions. >+ */ >+static struct lpfcdfc_host * >+lpfcdfc_host_from_hba(struct lpfc_hba * phba) >+{ >+ struct lpfcdfc_host * dfchba; >+ >+ mutex_lock(&lpfcdfc_lock); >+ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) { >+ if (dfchba->phba == phba) >+ break; >+ } >+ mutex_unlock(&lpfcdfc_lock); >+ >+ return dfchba; >+} >+ >+struct lpfcdfc_host * >+lpfcdfc_host_add (struct pci_dev * dev, >+ struct Scsi_Host * host, >+ struct lpfc_hba * phba) >+{ >+ struct lpfcdfc_host * dfchba = NULL; >+ struct lpfc_sli_ring_mask * prt = NULL; >+ >+ dfchba = kzalloc(sizeof(*dfchba), GFP_KERNEL); >+ if (dfchba == NULL) >+ return NULL; >+ >+ dfchba->inst = phba->brd_no; >+ dfchba->phba = phba; >+ dfchba->vport = phba->pport; >+ dfchba->host = host; >+ dfchba->dev = dev; >+ dfchba->blocked = 0; >+ >+ spin_lock_irq(&phba->hbalock); >+ prt = phba->sli.ring[LPFC_ELS_RING].prt; >+ dfchba->base_ct_unsol_event = prt[2].lpfc_sli_rcv_unsol_event; >+ prt[2].lpfc_sli_rcv_unsol_event = lpfcdfc_ct_unsol_event; >+ prt[3].lpfc_sli_rcv_unsol_event = lpfcdfc_ct_unsol_event; >+ spin_unlock_irq(&phba->hbalock); >+ mutex_lock(&lpfcdfc_lock); >+ list_add_tail(&dfchba->node, &lpfcdfc_hosts); >+ INIT_LIST_HEAD(&dfchba->ev_waiters); >+ mutex_unlock(&lpfcdfc_lock); >+ >+ return dfchba; >+} >+ >+ >+void >+lpfcdfc_host_del (struct lpfcdfc_host * dfchba) >+{ >+ struct Scsi_Host * host; >+ struct lpfc_hba * phba = NULL; >+ struct lpfc_sli_ring_mask * prt = NULL; >+ struct lpfcdfc_event * evt; >+ >+ mutex_lock(&lpfcdfc_lock); >+ dfchba->blocked = 1; >+ >+ list_for_each_entry(evt, &dfchba->ev_waiters, node) { >+ wake_up_interruptible(&evt->wq); >+ } >+ >+ while (dfchba->ref_count) { >+ mutex_unlock(&lpfcdfc_lock); >+ msleep(2000); >+ mutex_lock(&lpfcdfc_lock); >+ } >+ >+ if (dfchba->dev->driver) { >+ host = pci_get_drvdata(dfchba->dev); >+ if ((host != NULL) && >+ (struct lpfc_vport *)host->hostdata == dfchba->vport) { >+ phba = dfchba->phba; >+ mutex_unlock(&lpfcdfc_lock); >+ spin_lock_irq(&phba->hbalock); >+ prt = phba->sli.ring[LPFC_ELS_RING].prt; >+ prt[2].lpfc_sli_rcv_unsol_event = >+ dfchba->base_ct_unsol_event; >+ prt[3].lpfc_sli_rcv_unsol_event = >+ dfchba->base_ct_unsol_event; >+ spin_unlock_irq(&phba->hbalock); >+ mutex_lock(&lpfcdfc_lock); >+ } >+ } >+ list_del_init(&dfchba->node); >+ mutex_unlock(&lpfcdfc_lock); >+ kfree (dfchba); >+} >+ >+/* >+ * Retrieve lpfc_hba * matching instance (board no) >+ * If found return lpfc_hba * >+ * If not found return NULL >+ */ >+static struct lpfcdfc_host * >+lpfcdfc_get_phba_by_inst(int inst) >+{ >+ struct Scsi_Host * host = NULL; >+ struct lpfcdfc_host * dfchba; >+ >+ mutex_lock(&lpfcdfc_lock); >+ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) { >+ if (dfchba->inst == inst) { >+ if (dfchba->dev->driver) { >+ host = pci_get_drvdata(dfchba->dev); >+ if ((host != NULL) && >+ (struct lpfc_vport *)host->hostdata == >+ dfchba->vport) { >+ mutex_unlock(&lpfcdfc_lock); >+ BUG_ON(dfchba->phba->brd_no != inst); >+ return dfchba; >+ } >+ } >+ mutex_unlock(&lpfcdfc_lock); >+ return NULL; >+ } >+ } >+ mutex_unlock(&lpfcdfc_lock); >+ >+ return NULL; >+} >+ >+static int >+lpfcdfc_do_ioctl(struct lpfcCmdInput *cip) >+{ >+ struct lpfcdfc_host * dfchba = NULL; >+ struct lpfc_hba *phba = NULL; >+ int rc; >+ uint32_t total_mem; >+ void *dataout; >+ >+ >+ /* Some ioctls are per module and do not need phba */ >+ switch (cip->lpfc_cmd) { >+ case LPFC_GET_DFC_REV: >+ break; >+ default: >+ dfchba = lpfcdfc_get_phba_by_inst(cip->lpfc_brd); >+ if (dfchba == NULL) >+ return EINVAL; >+ phba = dfchba->phba; >+ break; >+ }; >+ >+ if (phba) >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1601 libdfc ioctl entry Data: x%x x%lx x%lx x%x\n", >+ cip->lpfc_cmd, (unsigned long) cip->lpfc_arg1, >+ (unsigned long) cip->lpfc_arg2, cip->lpfc_outsz); >+ mutex_lock(&lpfcdfc_lock); >+ if (dfchba && dfchba->blocked) { >+ mutex_unlock(&lpfcdfc_lock); >+ return EINVAL; >+ } >+ if (dfchba) >+ dfchba->ref_count++; >+ mutex_unlock(&lpfcdfc_lock); >+ if (cip->lpfc_outsz >= BUF_SZ_4K) { >+ >+ /* >+ * Allocate memory for ioctl data. If buffer is bigger than 64k, >+ * then we allocate 64k and re-use that buffer over and over to >+ * xfer the whole block. This is because Linux kernel has a >+ * problem allocating more than 120k of kernel space memory. Saw >+ * problem with GET_FCPTARGETMAPPING... >+ */ >+ if (cip->lpfc_outsz <= (64 * 1024)) >+ total_mem = cip->lpfc_outsz; >+ else >+ total_mem = 64 * 1024; >+ } else { >+ /* Allocate memory for ioctl data */ >+ total_mem = BUF_SZ_4K; >+ } >+ >+ /* >+ * For LPFC_HBA_GET_EVENT allocate memory which is needed to store >+ * event info. Allocating maximum possible buffer size (64KB) can fail >+ * some times under heavy IO. >+ */ >+ if (cip->lpfc_cmd == LPFC_HBA_GET_EVENT) { >+ dataout = NULL; >+ } else { >+ dataout = kmalloc(total_mem, GFP_KERNEL); >+ >+ if (!dataout && dfchba != NULL) { >+ mutex_lock(&lpfcdfc_lock); >+ if (dfchba) >+ dfchba->ref_count--; >+ mutex_unlock(&lpfcdfc_lock); >+ return ENOMEM; >+ } >+ } >+ >+ switch (cip->lpfc_cmd) { >+ >+ case LPFC_GET_DFC_REV: >+ ((struct DfcRevInfo *) dataout)->a_Major = DFC_MAJOR_REV; >+ ((struct DfcRevInfo *) dataout)->a_Minor = DFC_MINOR_REV; >+ cip->lpfc_outsz = sizeof (struct DfcRevInfo); >+ rc = 0; >+ break; >+ >+ case LPFC_SEND_ELS: >+ rc = lpfc_ioctl_send_els(phba, cip, dataout); >+ break; >+ >+ case LPFC_HBA_SEND_MGMT_RSP: >+ rc = lpfc_ioctl_send_mgmt_rsp(phba, cip); >+ break; >+ >+ case LPFC_HBA_SEND_MGMT_CMD: >+ case LPFC_CT: >+ rc = lpfc_ioctl_send_mgmt_cmd(phba, cip, dataout); >+ break; >+ >+ case LPFC_HBA_GET_EVENT: >+ rc = lpfc_ioctl_hba_get_event(phba, cip, &dataout, &total_mem); >+ if ((total_mem) && (copy_to_user ((void __user *) >+ cip->lpfc_dataout, (uint8_t *) dataout, total_mem))) >+ rc = EIO; >+ /* This is to prevent copy_to_user at end of the function. */ >+ cip->lpfc_outsz = 0; >+ break; >+ >+ case LPFC_HBA_SET_EVENT: >+ rc = lpfc_ioctl_hba_set_event(phba, cip); >+ break; >+ >+ case LPFC_LOOPBACK_MODE: >+ rc = lpfc_ioctl_loopback_mode(phba, cip, dataout); >+ break; >+ >+ case LPFC_LOOPBACK_TEST: >+ rc = lpfc_ioctl_loopback_test(phba, cip, dataout); >+ break; >+ >+ case LPFC_HBA_RNID: >+ rc = lpfc_ioctl_hba_rnid(phba, cip, dataout); >+ break; >+ >+ default: >+ rc = EINVAL; >+ break; >+ } >+ >+ if (phba) >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1602 libdfc ioctl exit Data: x%x x%x x%lx\n", >+ rc, cip->lpfc_outsz, (unsigned long) cip->lpfc_dataout); >+ /* Copy data to user space config method */ >+ if (rc == 0) { >+ if (cip->lpfc_outsz) { >+ if (copy_to_user >+ ((void __user *) cip->lpfc_dataout, >+ (uint8_t *) dataout, cip->lpfc_outsz)) { >+ rc = EIO; >+ } >+ } >+ } >+ >+ kfree(dataout); >+ mutex_lock(&lpfcdfc_lock); >+ if (dfchba) >+ dfchba->ref_count--; >+ mutex_unlock(&lpfcdfc_lock); >+ >+ return rc; >+} >+ >+static int >+lpfcdfc_ioctl(struct inode *inode, >+ struct file *file, unsigned int cmd, unsigned long arg) >+{ >+ int rc; >+ struct lpfcCmdInput *ci; >+ >+ if (!arg) >+ return -EINVAL; >+ >+ ci = (struct lpfcCmdInput *) kmalloc(sizeof (struct lpfcCmdInput), >+ GFP_KERNEL); >+ >+ if (!ci) >+ return -ENOMEM; >+ >+ if ((rc = copy_from_user >+ ((uint8_t *) ci, (void __user *) arg, >+ sizeof (struct lpfcCmdInput)))) { >+ kfree(ci); >+ return -EIO; >+ } >+ >+ rc = lpfcdfc_do_ioctl(ci); >+ >+ kfree(ci); >+ return -rc; >+} >+ >+#ifdef CONFIG_COMPAT >+static long >+lpfcdfc_compat_ioctl(struct file * file, unsigned int cmd, unsigned long arg) >+{ >+ struct lpfcCmdInput32 arg32; >+ struct lpfcCmdInput arg64; >+ int ret; >+ >+ if(copy_from_user(&arg32, (void __user *)arg, >+ sizeof(struct lpfcCmdInput32))) >+ return -EFAULT; >+ >+ arg64.lpfc_brd = arg32.lpfc_brd; >+ arg64.lpfc_ring = arg32.lpfc_ring; >+ arg64.lpfc_iocb = arg32.lpfc_iocb; >+ arg64.lpfc_flag = arg32.lpfc_flag; >+ arg64.lpfc_arg1 = (void *)(unsigned long) arg32.lpfc_arg1; >+ arg64.lpfc_arg2 = (void *)(unsigned long) arg32.lpfc_arg2; >+ arg64.lpfc_arg3 = (void *)(unsigned long) arg32.lpfc_arg3; >+ arg64.lpfc_dataout = (void *)(unsigned long) arg32.lpfc_dataout; >+ arg64.lpfc_cmd = arg32.lpfc_cmd; >+ arg64.lpfc_outsz = arg32.lpfc_outsz; >+ arg64.lpfc_arg4 = arg32.lpfc_arg4; >+ arg64.lpfc_arg5 = arg32.lpfc_arg5; >+ >+ ret = lpfcdfc_do_ioctl(&arg64); >+ >+ arg32.lpfc_brd = arg64.lpfc_brd; >+ arg32.lpfc_ring = arg64.lpfc_ring; >+ arg32.lpfc_iocb = arg64.lpfc_iocb; >+ arg32.lpfc_flag = arg64.lpfc_flag; >+ arg32.lpfc_arg1 = (u32)(unsigned long) arg64.lpfc_arg1; >+ arg32.lpfc_arg2 = (u32)(unsigned long) arg64.lpfc_arg2; >+ arg32.lpfc_arg3 = (u32)(unsigned long) arg64.lpfc_arg3; >+ arg32.lpfc_dataout = (u32)(unsigned long) arg64.lpfc_dataout; >+ arg32.lpfc_cmd = arg64.lpfc_cmd; >+ arg32.lpfc_outsz = arg64.lpfc_outsz; >+ arg32.lpfc_arg4 = arg64.lpfc_arg4; >+ arg32.lpfc_arg5 = arg64.lpfc_arg5; >+ >+ if(copy_to_user((void __user *)arg, &arg32, >+ sizeof(struct lpfcCmdInput32))) >+ return -EFAULT; >+ >+ return -ret; >+} >+#endif >+ >+static struct file_operations lpfc_fops = { >+ .owner = THIS_MODULE, >+ .ioctl = lpfcdfc_ioctl, >+#ifdef CONFIG_COMPAT >+ .compat_ioctl = lpfcdfc_compat_ioctl, >+#endif >+}; >+ >+int >+lpfc_cdev_init(void) >+{ >+ >+ lpfcdfc_major = register_chrdev(0, LPFC_CHAR_DEV_NAME, &lpfc_fops); >+ if (lpfcdfc_major < 0) { >+ printk(KERN_ERR "%s:%d Unable to register \"%s\" device.\n", >+ __FUNCTION__, __LINE__, LPFC_CHAR_DEV_NAME); >+ return lpfcdfc_major; >+ } >+ >+ return 0; >+} >+ >+void >+lpfc_cdev_exit(void) >+{ >+ unregister_chrdev(lpfcdfc_major, LPFC_CHAR_DEV_NAME); >+} >diff -urpN a/drivers/scsi/lpfc/lpfc_ioctl.h b/drivers/scsi/lpfc/lpfc_ioctl.h >--- a/drivers/scsi/lpfc/lpfc_ioctl.h 1969-12-31 19:00:00.000000000 -0500 >+++ b/drivers/scsi/lpfc/lpfc_ioctl.h 2008-09-05 17:47:49.769877000 -0400 >@@ -0,0 +1,190 @@ >+/******************************************************************* >+ * This file is part of the Emulex Linux Device Driver for * >+ * Fibre Channel Host Bus Adapters. * >+ * Copyright (C) 2006 Emulex. All rights reserved. * >+ * EMULEX and SLI are trademarks of Emulex. * >+ * www.emulex.com * >+ * * >+ * This program is free software; you can redistribute it and/or * >+ * modify it under the terms of version 2 of the GNU General * >+ * Public License as published by the Free Software Foundation. * >+ * This program is distributed in the hope that it will be useful. * >+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * >+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * >+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * >+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * >+ * TO BE LEGALLY INVALID. See the GNU General Public License for * >+ * more details, a copy of which can be found in the file COPYING * >+ * included with this package. * >+ *******************************************************************/ >+ >+#define DFC_MAJOR_REV 81 >+#define DFC_MINOR_REV 0 >+ >+#define LPFC_MAX_EVENT 128 >+ >+/* Event definitions for RegisterForEvent */ >+#define FC_REG_LINK_EVENT 0x1 /* Register for link up / down events */ >+#define FC_REG_RSCN_EVENT 0x2 /* Register for RSCN events */ >+#define FC_REG_CT_EVENT 0x4 /* Register for CT request events */ >+#ifndef FC_REG_DUMP_EVENT >+#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ >+#endif >+#ifndef FC_REG_TEMPERATURE_EVENT >+#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature >+ event */ >+#endif >+ >+#define FC_REG_EVENT_MASK 0xff /* event mask */ >+ >+ >+#define LPFC_CT 0x42 /* Send CT passthru command */ >+#define LPFC_HBA_RNID 0x52 /* Send an RNID request */ >+#define LPFC_HBA_REFRESHINFO 0x56 /* Do a refresh of the stats */ >+#define LPFC_SEND_ELS 0x57 /* Send out an ELS command */ >+#define LPFC_HBA_SET_EVENT 0x59 /* Set FCP event(s) */ >+#define LPFC_HBA_GET_EVENT 0x5a /* Get FCP event(s) */ >+#define LPFC_HBA_SEND_MGMT_CMD 0x5b /* Send a management command */ >+#define LPFC_HBA_SEND_MGMT_RSP 0x5c /* Send a management response */ >+ >+#define LPFC_GET_DFC_REV 0x68 /* Get the rev of the ioctl >+ driver */ >+#define LPFC_LOOPBACK_TEST 0x72 /* Run Loopback test */ >+#define LPFC_LOOPBACK_MODE 0x73 /* Enter Loopback mode */ >+/* LPFC_LAST_IOCTL_USED 0x73 Last LPFC Ioctl used */ >+ >+#define INTERNAL_LOOP_BACK 0x1 >+#define EXTERNAL_LOOP_BACK 0x2 >+ >+/* the DfcRevInfo structure */ >+struct DfcRevInfo { >+ uint32_t a_Major; >+ uint32_t a_Minor; >+} ; >+ >+#define LPFC_WWPN_TYPE 0 >+#define LPFC_PORTID_TYPE 1 >+#define LPFC_WWNN_TYPE 2 >+ >+struct nport_id { >+ uint32_t idType; /* 0 - wwpn, 1 - d_id, 2 - wwnn */ >+ uint32_t d_id; >+ uint8_t wwpn[8]; >+}; >+ >+#define LPFC_EVENT_LIP_OCCURRED 1 >+#define LPFC_EVENT_LINK_UP 2 >+#define LPFC_EVENT_LINK_DOWN 3 >+#define LPFC_EVENT_LIP_RESET_OCCURRED 4 >+#define LPFC_EVENT_RSCN 5 >+#define LPFC_EVENT_PROPRIETARY 0xFFFF >+ >+struct lpfc_hba_event_info { >+ uint32_t event_code; >+ uint32_t port_id; >+ union { >+ uint32_t rscn_event_info; >+ uint32_t pty_event_info; >+ } event; >+}; >+ >+ >+#define LPFC_CHAR_DEV_NAME "lpfcdfc" >+ >+/* >+ * Diagnostic (DFC) Command & Input structures: (LPFC) >+ */ >+struct lpfcCmdInput { >+ short lpfc_brd; >+ short lpfc_ring; >+ short lpfc_iocb; >+ short lpfc_flag; >+ void *lpfc_arg1; >+ void *lpfc_arg2; >+ void *lpfc_arg3; >+ char *lpfc_dataout; >+ uint32_t lpfc_cmd; >+ uint32_t lpfc_outsz; >+ uint32_t lpfc_arg4; >+ uint32_t lpfc_arg5; >+}; >+/* Used for ioctl command */ >+#define LPFC_DFC_CMD_IOCTL_MAGIC 0xFC >+#define LPFC_DFC_CMD_IOCTL _IOWR(LPFC_DFC_CMD_IOCTL_MAGIC, 0x1,\ >+ struct lpfcCmdInput) >+ >+#ifdef CONFIG_COMPAT >+/* 32 bit version */ >+struct lpfcCmdInput32 { >+ short lpfc_brd; >+ short lpfc_ring; >+ short lpfc_iocb; >+ short lpfc_flag; >+ u32 lpfc_arg1; >+ u32 lpfc_arg2; >+ u32 lpfc_arg3; >+ u32 lpfc_dataout; >+ uint32_t lpfc_cmd; >+ uint32_t lpfc_outsz; >+ uint32_t lpfc_arg4; >+ uint32_t lpfc_arg5; >+}; >+#endif >+ >+#define SLI_CT_ELX_LOOPBACK 0x10 >+ >+enum ELX_LOOPBACK_CMD { >+ ELX_LOOPBACK_XRI_SETUP, >+ ELX_LOOPBACK_DATA, >+}; >+ >+ >+struct lpfc_link_info { >+ uint32_t a_linkEventTag; >+ uint32_t a_linkUp; >+ uint32_t a_linkDown; >+ uint32_t a_linkMulti; >+ uint32_t a_DID; >+ uint8_t a_topology; >+ uint8_t a_linkState; >+ uint8_t a_alpa; >+ uint8_t a_alpaCnt; >+ uint8_t a_alpaMap[128]; >+ uint8_t a_wwpName[8]; >+ uint8_t a_wwnName[8]; >+}; >+ >+enum lpfc_host_event_code { >+ LPFCH_EVT_LIP = 0x1, >+ LPFCH_EVT_LINKUP = 0x2, >+ LPFCH_EVT_LINKDOWN = 0x3, >+ LPFCH_EVT_LIPRESET = 0x4, >+ LPFCH_EVT_RSCN = 0x5, >+ LPFCH_EVT_ADAPTER_CHANGE = 0x103, >+ LPFCH_EVT_PORT_UNKNOWN = 0x200, >+ LPFCH_EVT_PORT_OFFLINE = 0x201, >+ LPFCH_EVT_PORT_ONLINE = 0x202, >+ LPFCH_EVT_PORT_FABRIC = 0x204, >+ LPFCH_EVT_LINK_UNKNOWN = 0x500, >+ LPFCH_EVT_VENDOR_UNIQUE = 0xffff, >+}; >+ >+#define ELX_LOOPBACK_HEADER_SZ \ >+ (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) >+ >+struct lpfc_host_event { >+ uint32_t seq_num; >+ enum lpfc_host_event_code event_code; >+ uint32_t data; >+}; >+ >+#ifdef __KERNEL__ >+struct lpfcdfc_host; >+ >+/* Initialize/Un-initialize char device */ >+int lpfc_cdev_init(void); >+void lpfc_cdev_exit(void); >+void lpfcdfc_host_del(struct lpfcdfc_host *); >+struct lpfcdfc_host *lpfcdfc_host_add(struct pci_dev *, struct Scsi_Host *, >+ struct lpfc_hba *); >+#endif /* __KERNEL__ */ >diff -urpN a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h >--- a/drivers/scsi/lpfc/lpfc_logmsg.h 2008-09-05 17:47:41.695250000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_logmsg.h 2008-09-05 17:47:49.620877000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2005 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * * >@@ -26,19 +26,25 @@ > #define LOG_IP 0x20 /* IP traffic history */ > #define LOG_FCP 0x40 /* FCP traffic history */ > #define LOG_NODE 0x80 /* Node table events */ >+#define LOG_TEMP 0x100 /* Temperature sensor events */ > #define LOG_MISC 0x400 /* Miscellaneous events */ > #define LOG_SLI 0x800 /* SLI events */ > #define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ > #define LOG_LIBDFC 0x2000 /* Libdfc events */ > #define LOG_VPORT 0x4000 /* NPIV events */ >+#define LOG_SECURITY 0x8000 /* FC Security */ > #define LOG_ALL_MSG 0xffff /* LOG all messages */ > > #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ >+ do { \ > { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ > dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ >- fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } >+ fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ >+ } while (0) > > #define lpfc_printf_log(phba, level, mask, fmt, arg...) \ >+ do { \ > { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ > dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ >- fmt, phba->brd_no, ##arg); } >+ fmt, phba->brd_no, ##arg); } \ >+ } while (0) >diff -urpN a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c >--- a/drivers/scsi/lpfc/lpfc_mbox.c 2008-09-05 17:47:41.710246000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_mbox.c 2008-09-05 17:47:49.729876000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * Portions Copyright (C) 2004-2005 Christoph Hellwig * >@@ -82,6 +82,24 @@ lpfc_read_nv(struct lpfc_hba * phba, LPF > } > > /**********************************************/ >+/* lpfc_config_async Issue a */ >+/* MBX_ASYNC_EVT_ENABLE mailbox command */ >+/**********************************************/ >+void >+lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, >+ uint32_t ring) >+{ >+ MAILBOX_t *mb; >+ >+ mb = &pmb->mb; >+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); >+ mb->mbxCommand = MBX_ASYNCEVT_ENABLE; >+ mb->un.varCfgAsyncEvent.ring = ring; >+ mb->mbxOwner = OWN_HOST; >+ return; >+} >+ >+/**********************************************/ > /* lpfc_heart_beat Issue a HEART_BEAT */ > /* mailbox command */ > /**********************************************/ >@@ -511,9 +529,45 @@ lpfc_config_pcb_setup(struct lpfc_hba * > pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr); > pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr); > iocbCnt += pring->numRiocb; >+#if 0 >+ printk("lpfc_config_pcb_setup: brdno:%d, Ring #%d:\n" >+ "numCiocb:%d, sizeCiocb:%d,\n" >+ "numRiocb:%d, sizeRiocb:%d,\n" >+ "cmdAddrLow:0x%x, rspAddrLow:0x%x, iocbCnt:0x%x\n\n", >+ phba->brd_no, i, pcbp->rdsc[i].cmdEntries, >+ pring->sizeCiocb, pcbp->rdsc[i].rspEntries, >+ pring->sizeRiocb, pcbp->rdsc[i].cmdAddrLow, >+ pcbp->rdsc[i].rspAddrLow, iocbCnt); >+#endif > } > } > >+/***********************************************/ >+/* command to write slim */ >+/***********************************************/ >+void >+lpfc_set_var(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t addr, >+ uint32_t value) >+{ >+ MAILBOX_t *mb; >+ >+ mb = &pmb->mb; >+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); >+ >+ /* >+ * Always turn on DELAYED ABTS for ELS timeouts >+ */ >+ if ((addr == 0x052198) && (value == 0)) >+ value = 1; >+ >+ mb->un.varWords[0] = addr; >+ mb->un.varWords[1] = value; >+ >+ mb->mbxCommand = MBX_SET_VARIABLE; >+ mb->mbxOwner = OWN_HOST; >+ return; >+} >+ > void > lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) > { >@@ -707,7 +761,8 @@ lpfc_config_port(struct lpfc_hba *phba, > > /* Setup Mailbox pointers */ > phba->slim2p->pcb.mailBoxSize = offsetof(MAILBOX_t, us) + >- sizeof(struct sli2_desc); >+ sizeof(struct sli2_desc) + >+ MAILBOX_EXT_WSIZE * sizeof(uint32_t); > offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p; > pdma_addr = phba->slim2p_mapping + offset; > phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr); >@@ -874,7 +929,7 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, > case MBX_DOWN_LOAD: /* 0x1C */ > case MBX_DEL_LD_ENTRY: /* 0x1D */ > case MBX_LOAD_AREA: /* 0x81 */ >- case MBX_FLASH_WR_ULA: /* 0x98 */ >+ case MBX_WRITE_WWN: /* 0x98 */ > case MBX_LOAD_EXP_ROM: /* 0x9C */ > return LPFC_MBOX_TMO_FLASH_CMD; > } >diff -urpN a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c >--- a/drivers/scsi/lpfc/lpfc_mem.c 2008-09-05 17:47:41.713244000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_mem.c 2008-09-05 17:47:49.646876000 -0400 >@@ -98,6 +98,7 @@ lpfc_mem_alloc(struct lpfc_hba * phba) > > fail_free_hbq_pool: > lpfc_sli_hbqbuf_free_all(phba); >+ pci_pool_destroy(phba->lpfc_hbq_pool); > fail_free_nlp_mem_pool: > mempool_destroy(phba->nlp_mem_pool); > phba->nlp_mem_pool = NULL; >@@ -236,11 +237,11 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba > { > struct hbq_dmabuf *hbqbp; > >- hbqbp = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); >+ hbqbp = kmalloc(sizeof(struct hbq_dmabuf), GFP_ATOMIC); > if (!hbqbp) > return NULL; > >- hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, >+ hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_ATOMIC, > &hbqbp->dbuf.phys); > if (!hbqbp->dbuf.virt) { > kfree(hbqbp); >@@ -263,15 +264,27 @@ void > lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) > { > struct hbq_dmabuf *hbq_entry; >+ unsigned long flags; >+ >+ if (!mp) >+ return; > > if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { >+ /* Check whether HBQ is still in use */ >+ spin_lock_irqsave(&phba->hbalock, flags); >+ if (!phba->hbq_in_use) { >+ spin_unlock_irqrestore(&phba->hbalock, flags); >+ return; >+ } > hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); >+ list_del(&hbq_entry->dbuf.list); > if (hbq_entry->tag == -1) { > (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) > (phba, hbq_entry); > } else { > lpfc_sli_free_hbq(phba, hbq_entry); > } >+ spin_unlock_irqrestore(&phba->hbalock, flags); > } else { > lpfc_mbuf_free(phba, mp->virt, mp->phys); > kfree(mp); >@@ -279,3 +292,4 @@ lpfc_in_buf_free(struct lpfc_hba *phba, > return; > } > >+ >diff -urpN a/drivers/scsi/lpfc/lpfc_menlo.c b/drivers/scsi/lpfc/lpfc_menlo.c >--- a/drivers/scsi/lpfc/lpfc_menlo.c 1969-12-31 19:00:00.000000000 -0500 >+++ b/drivers/scsi/lpfc/lpfc_menlo.c 2008-09-05 17:47:49.752876000 -0400 >@@ -0,0 +1,1178 @@ >+/******************************************************************* >+ * This file is part of the Emulex Linux Device Driver for * >+ * Fibre Channel Host Bus Adapters. * >+ * Copyright (C) 2007-2008 Emulex. All rights reserved. * >+ * EMULEX and SLI are trademarks of Emulex. * >+ * www.emulex.com * >+ * * >+ * This program is free software; you can redistribute it and/or * >+ * modify it under the terms of version 2 of the GNU General * >+ * Public License as published by the Free Software Foundation. * >+ * This program is distributed in the hope that it will be useful. * >+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * >+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * >+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * >+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * >+ * TO BE LEGALLY INVALID. See the GNU General Public License for * >+ * more details, a copy of which can be found in the file COPYING * >+ * included with this package. * >+ *******************************************************************/ >+ >+#include <linux/ctype.h> >+#include <linux/delay.h> >+#include <linux/pci.h> >+#include <linux/interrupt.h> >+ >+#include <scsi/scsi.h> >+#include <scsi/scsi_device.h> >+#include <scsi/scsi_host.h> >+#include <scsi/scsi_tcq.h> >+#include <scsi/scsi_transport_fc.h> >+ >+#include "lpfc_hw.h" >+#include "lpfc_sli.h" >+#include "lpfc_disc.h" >+#include "lpfc_scsi.h" >+#include "lpfc.h" >+#include "lpfc_logmsg.h" >+#include "lpfc_version.h" >+#include "lpfc_compat.h" >+#include "lpfc_crtn.h" >+#include "lpfc_vport.h" >+#include "lpfc_auth_access.h" >+ >+#define MENLO_CMD_FW_DOWNLOAD 0x00000002 >+ >+static void lpfc_menlo_iocb_timeout_cmpl(struct lpfc_hba *, >+ struct lpfc_iocbq *, struct lpfc_iocbq *); >+ >+extern int >+__dfc_cmd_data_free(struct lpfc_hba * phba, struct lpfc_dmabufext * mlist); >+ >+extern struct lpfc_dmabufext * >+__dfc_cmd_data_alloc(struct lpfc_hba * phba, >+ char *indataptr, struct ulp_bde64 * bpl, uint32_t size, >+ int nocopydata); >+/* >+ * The size for the menlo interface is set at 336k because it only uses >+ * one bpl. A bpl can contain 85 BDE descriptors. Each BDE can represent >+ * up to 4k. I used 84 BDE entries to do this calculation because the >+ * 1st sysfs_menlo_write is for just the cmd header which is 12 bytes. >+ * size = PAGE_SZ * (sizeof(bpl) / sizeof(BDE)) -1; >+ */ >+#define SYSFS_MENLO_ATTR_SIZE 344064 >+typedef struct menlo_get_cmd >+{ >+ uint32_t code; /* Command code */ >+ uint32_t context; /* Context */ >+ uint32_t length; /* Max response length */ >+} menlo_get_cmd_t; >+ >+typedef struct menlo_init_rsp >+{ >+ uint32_t code; >+ uint32_t bb_credit; /* Menlo FC BB Credit */ >+ uint32_t frame_size; /* Menlo FC receive frame size */ >+ uint32_t fw_version; /* Menlo firmware version */ >+ uint32_t reset_status; /* Reason for previous reset */ >+ >+#define MENLO_RESET_STATUS_NORMAL 0 >+#define MENLO_RESET_STATUS_PANIC 1 >+ >+ uint32_t maint_status; /* Menlo Maintenance Mode status at link up */ >+ >+ >+#define MENLO_MAINTENANCE_MODE_DISABLE 0 >+#define MENLO_MAINTENANCE_MODE_ENABLE 1 >+ uint32_t fw_type; >+ uint32_t fru_data_valid; /* 0=invalid, 1=valid */ >+} menlo_init_rsp_t; >+ >+#define MENLO_CMD_GET_INIT 0x00000007 >+#define MENLO_FW_TYPE_OPERATIONAL 0xABCD0001 >+#define MENLO_FW_TYPE_GOLDEN 0xABCD0002 >+#define MENLO_FW_TYPE_DIAG 0xABCD0003 >+ >+void >+BE_swap32_buffer(void *srcp, uint32_t cnt) >+{ >+ uint32_t *src = srcp; >+ uint32_t *dest = srcp; >+ uint32_t ldata; >+ int i; >+ >+ for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { >+ ldata = *src; >+ ldata = cpu_to_le32(ldata); >+ *dest = ldata; >+ src++; >+ dest++; >+ } >+} >+ >+ >+static int >+lpfc_alloc_menlo_genrequest64(struct lpfc_hba * phba, >+ struct lpfc_menlo_genreq64 *sysfs_menlo, >+ struct lpfc_sysfs_menlo_hdr *cmdhdr) >+{ >+ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); >+ struct ulp_bde64 *bpl = NULL; >+ IOCB_t *cmd = NULL, *rsp = NULL; >+ struct lpfc_sli *psli = NULL; >+ struct lpfc_sli_ring *pring = NULL; >+ int rc = 0; >+ uint32_t cmdsize; >+ uint32_t rspsize; >+ >+ psli = &phba->sli; >+ pring = &psli->ring[LPFC_ELS_RING]; >+ >+ if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { >+ rc = EACCES; >+ goto send_menlomgmt_cmd_exit; >+ } >+ >+ if (!sysfs_menlo) { >+ rc = EINVAL; >+ goto send_menlomgmt_cmd_exit; >+ } >+ >+ cmdsize = cmdhdr->cmdsize; >+ rspsize = cmdhdr->rspsize; >+ >+ if (!cmdsize || !rspsize || (cmdsize + rspsize > 80 * BUF_SZ_4K)) { >+ rc = ERANGE; >+ goto send_menlomgmt_cmd_exit; >+ } >+ >+ spin_lock_irq(shost->host_lock); >+ sysfs_menlo->cmdiocbq = lpfc_sli_get_iocbq(phba); >+ if (!sysfs_menlo->cmdiocbq) { >+ rc = ENOMEM; >+ spin_unlock_irq(shost->host_lock); >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1202 alloc_menlo_genreq64: couldn't alloc cmdiocbq\n"); >+ goto send_menlomgmt_cmd_exit; >+ } >+ cmd = &sysfs_menlo->cmdiocbq->iocb; >+ >+ sysfs_menlo->rspiocbq = lpfc_sli_get_iocbq(phba); >+ if (!sysfs_menlo->rspiocbq) { >+ rc = ENOMEM; >+ spin_unlock_irq(shost->host_lock); >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1203 alloc_menlo_genreq64: couldn't alloc rspiocbq\n"); >+ goto send_menlomgmt_cmd_exit; >+ } >+ spin_unlock_irq(shost->host_lock); >+ >+ rsp = &sysfs_menlo->rspiocbq->iocb; >+ >+ >+ sysfs_menlo->bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); >+ if (!sysfs_menlo->bmp) { >+ rc = ENOMEM; >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1204 alloc_menlo_genreq64: couldn't alloc bmp\n"); >+ goto send_menlomgmt_cmd_exit; >+ } >+ >+ spin_lock_irq(shost->host_lock); >+ sysfs_menlo->bmp->virt = lpfc_mbuf_alloc(phba, 0, >+ &sysfs_menlo->bmp->phys); >+ if (!sysfs_menlo->bmp->virt) { >+ rc = ENOMEM; >+ spin_unlock_irq(shost->host_lock); >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1205 alloc_menlo_genreq64: couldn't alloc bpl\n"); >+ goto send_menlomgmt_cmd_exit; >+ } >+ spin_unlock_irq(shost->host_lock); >+ >+ INIT_LIST_HEAD(&sysfs_menlo->bmp->list); >+ bpl = (struct ulp_bde64 *) sysfs_menlo->bmp->virt; >+ memset((uint8_t*)bpl, 0 , 1024); >+ sysfs_menlo->indmp = __dfc_cmd_data_alloc(phba, NULL, bpl, cmdsize, 1); >+ if (!sysfs_menlo->indmp) { >+ rc = ENOMEM; >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1206 alloc_menlo_genreq64: couldn't alloc cmdbuf\n"); >+ goto send_menlomgmt_cmd_exit; >+ } >+ sysfs_menlo->cmdbpl = bpl; >+ INIT_LIST_HEAD(&sysfs_menlo->inhead); >+ list_add_tail(&sysfs_menlo->inhead, &sysfs_menlo->indmp->dma.list); >+ >+ /* flag contains total number of BPLs for xmit */ >+ >+ bpl += sysfs_menlo->indmp->flag; >+ >+ sysfs_menlo->outdmp = __dfc_cmd_data_alloc(phba, NULL, bpl, rspsize, 0); >+ if (!sysfs_menlo->outdmp) { >+ rc = ENOMEM; >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1207 alloc_menlo_genreq64: couldn't alloc rspbuf\n"); >+ goto send_menlomgmt_cmd_exit; >+ } >+ INIT_LIST_HEAD(&sysfs_menlo->outhead); >+ list_add_tail(&sysfs_menlo->outhead, &sysfs_menlo->outdmp->dma.list); >+ >+ cmd->un.genreq64.bdl.ulpIoTag32 = 0; >+ cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(sysfs_menlo->bmp->phys); >+ cmd->un.genreq64.bdl.addrLow = putPaddrLow(sysfs_menlo->bmp->phys); >+ cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL; >+ cmd->un.genreq64.bdl.bdeSize = >+ (sysfs_menlo->outdmp->flag + sysfs_menlo->indmp->flag) >+ * sizeof(struct ulp_bde64); >+ cmd->ulpCommand = CMD_GEN_REQUEST64_CR; >+ cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); >+ cmd->un.genreq64.w5.hcsw.Dfctl = 0; >+ cmd->un.genreq64.w5.hcsw.Rctl = FC_FCP_CMND; >+ cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ >+ cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ >+ cmd->ulpBdeCount = 1; >+ cmd->ulpClass = CLASS3; >+ cmd->ulpContext = MENLO_CONTEXT; /* 0 */ >+ cmd->ulpOwner = OWN_CHIP; >+ cmd->ulpPU = MENLO_PU; /* 3 */ >+ cmd->ulpLe = 1; /* Limited Edition */ >+ sysfs_menlo->cmdiocbq->vport = phba->pport; >+ sysfs_menlo->cmdiocbq->context1 = NULL; >+ sysfs_menlo->cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; >+ /* We want the firmware to timeout before we do */ >+ cmd->ulpTimeout = MENLO_TIMEOUT - 5; >+ >+ sysfs_menlo->timeout = cmd->ulpTimeout; >+ >+send_menlomgmt_cmd_exit: >+ return rc; >+} >+ >+void >+sysfs_menlo_genreq_free(struct lpfc_hba *phba, >+ struct lpfc_menlo_genreq64 *sysfs_menlo) >+{ >+ if ( !list_empty(&sysfs_menlo->outhead)) >+ list_del_init( &sysfs_menlo->outhead); >+ >+ if (!list_empty(&sysfs_menlo->inhead)) >+ list_del_init( &sysfs_menlo->inhead); >+ >+ if (sysfs_menlo->outdmp) { >+ __dfc_cmd_data_free(phba, sysfs_menlo->outdmp); >+ sysfs_menlo->outdmp = NULL; >+ } >+ if (sysfs_menlo->indmp) { >+ __dfc_cmd_data_free(phba, sysfs_menlo->indmp); >+ sysfs_menlo->indmp = NULL; >+ } >+ if (sysfs_menlo->bmp) { >+ lpfc_mbuf_free(phba, sysfs_menlo->bmp->virt, >+ sysfs_menlo->bmp->phys); >+ kfree(sysfs_menlo->bmp); >+ sysfs_menlo->bmp = NULL; >+ } >+ if (sysfs_menlo->rspiocbq) { >+ lpfc_sli_release_iocbq(phba, sysfs_menlo->rspiocbq); >+ sysfs_menlo->rspiocbq = NULL; >+ } >+ >+ if (sysfs_menlo->cmdiocbq) { >+ lpfc_sli_release_iocbq(phba, sysfs_menlo->cmdiocbq); >+ sysfs_menlo->cmdiocbq = NULL; >+ } >+} >+ >+static void >+sysfs_menlo_idle(struct lpfc_hba *phba, >+ struct lpfc_sysfs_menlo *sysfs_menlo) >+{ >+ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); >+ >+ spin_lock_irq(&phba->hbalock); >+ list_del_init(&sysfs_menlo->list); >+ spin_unlock_irq(&phba->hbalock); >+ spin_lock_irq(shost->host_lock); >+ >+ if (sysfs_menlo->cr.cmdiocbq) >+ sysfs_menlo_genreq_free(phba, &sysfs_menlo->cr); >+ if (sysfs_menlo->cx.cmdiocbq) >+ sysfs_menlo_genreq_free(phba, &sysfs_menlo->cx); >+ >+ spin_unlock_irq(shost->host_lock); >+ kfree(sysfs_menlo); >+} >+ >+static void >+lpfc_menlo_iocb_timeout_cmpl(struct lpfc_hba *phba, >+ struct lpfc_iocbq *cmdq, >+ struct lpfc_iocbq *rspq) >+{ >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1241 Menlo IOCB timeout: deleting %p\n", >+ cmdq->context3); >+ sysfs_menlo_idle(phba, (struct lpfc_sysfs_menlo *)cmdq->context3); >+} >+ >+static void >+lpfc_menlo_iocb_cmpl(struct lpfc_hba *phba, >+ struct lpfc_iocbq *cmdq, >+ struct lpfc_iocbq *rspq) >+{ >+ struct lpfc_sysfs_menlo * sysfs_menlo = >+ (struct lpfc_sysfs_menlo *)cmdq->context2; >+ struct lpfc_dmabufext *mlast = NULL; >+ IOCB_t *rsp = NULL; >+ IOCB_t *cmd = NULL; >+ uint32_t * tmpptr = NULL; >+ menlo_init_rsp_t *mlorsp = NULL; >+ >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1254 Menlo IOCB complete: %p\n", >+ cmdq->context2); >+ rsp = &rspq->iocb; >+ cmd = &cmdq->iocb; >+ if ( !sysfs_menlo ) { >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1255 Menlo IOCB complete:NULL CTX \n"); >+ return; >+ } >+ if ( rsp->ulpStatus ) { >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1242 iocb async cmpl: ulpStatus 0x%x " >+ "ulpWord[4] 0x%x\n", >+ rsp->ulpStatus, rsp->un.ulpWord[4]); >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1260 cr:%.08x %.08x %.08x %.08x " >+ "%.08x %.08x %.08x %.08x\n", >+ cmd->un.ulpWord[0], cmd->un.ulpWord[1], >+ cmd->un.ulpWord[2], cmd->un.ulpWord[3], >+ cmd->un.ulpWord[4], cmd->un.ulpWord[5], >+ cmd->un.ulpWord[6], cmd->un.ulpWord[7]); >+ mlast = list_get_first(&sysfs_menlo->cr.inhead, >+ struct lpfc_dmabufext, >+ dma.list); >+ if (!mlast) { >+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, >+ "1231 bad bpl:\n"); >+ goto lpfc_menlo_iocb_cmpl_ext; >+ } >+ tmpptr = ( uint32_t *) mlast->dma.virt; >+ BE_swap32_buffer ((uint8_t *) tmpptr, >+ sizeof( menlo_get_cmd_t)); >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1261 cmd:%.08x %.08x %.08x\n", >+ *tmpptr, *(tmpptr+1), *(tmpptr+2)); >+ goto lpfc_menlo_iocb_cmpl_ext; >+ } >+ >+ mlast = list_get_first(&sysfs_menlo->cr.outhead, >+ struct lpfc_dmabufext, >+ dma.list); >+ if (!mlast) { >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1256 bad bpl:\n"); >+ goto lpfc_menlo_iocb_cmpl_ext; >+ } >+ mlorsp = ( menlo_init_rsp_t *) mlast->dma.virt; >+ BE_swap32_buffer ((uint8_t *) mlorsp, >+ sizeof( menlo_init_rsp_t)); >+ >+ if (mlorsp->code != 0) { >+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT, >+ "1243 Menlo command error. code=%d.\n", mlorsp->code); >+ goto lpfc_menlo_iocb_cmpl_ext; >+ >+ } >+ >+ switch (mlorsp->fw_type) >+ { >+ case MENLO_FW_TYPE_OPERATIONAL: /* Menlo Operational */ >+ break; >+ case MENLO_FW_TYPE_GOLDEN: /* Menlo Golden */ >+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT, >+ "1246 FCoE chip is running golden firmware. " >+ "Update FCoE chip firmware immediately %x\n", >+ mlorsp->fw_type); >+ break; >+ case MENLO_FW_TYPE_DIAG: /* Menlo Diag */ >+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT, >+ "1247 FCoE chip is running diagnostic " >+ "firmware. Operational use suspended. %x\n", >+ mlorsp->fw_type); >+ break; >+ default: >+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT, >+ "1248 FCoE chip is running unknown " >+ "firmware x%x.\n", mlorsp->fw_type); >+ break; >+ } >+ if (!mlorsp->fru_data_valid >+ && (mlorsp->fw_type == MENLO_FW_TYPE_OPERATIONAL) >+ && (!mlorsp->maint_status)) >+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT, >+ "1249 Invalid FRU data found on adapter." >+ "Return adapter to Emulex for repair\n"); >+ >+lpfc_menlo_iocb_cmpl_ext: >+ sysfs_menlo_idle(phba, (struct lpfc_sysfs_menlo *)cmdq->context2); >+} >+ >+static struct lpfc_sysfs_menlo * >+lpfc_get_sysfs_menlo(struct lpfc_hba *phba, uint8_t create) >+{ >+ struct lpfc_sysfs_menlo *sysfs_menlo; >+ pid_t pid; >+ >+ pid = current->pid; >+ >+ spin_lock_irq(&phba->hbalock); >+ list_for_each_entry(sysfs_menlo, &phba->sysfs_menlo_list, list) { >+ if (sysfs_menlo->pid == pid) { >+ spin_unlock_irq(&phba->hbalock); >+ return sysfs_menlo; >+ } >+ } >+ if (!create) { >+ spin_unlock_irq(&phba->hbalock); >+ return NULL; >+ } >+ spin_unlock_irq(&phba->hbalock); >+ sysfs_menlo = kzalloc(sizeof(struct lpfc_sysfs_menlo), >+ GFP_KERNEL); >+ if (!sysfs_menlo) >+ return NULL; >+ sysfs_menlo->state = SMENLO_IDLE; >+ sysfs_menlo->pid = pid; >+ spin_lock_irq(&phba->hbalock); >+ list_add_tail(&sysfs_menlo->list, &phba->sysfs_menlo_list); >+ >+ spin_unlock_irq(&phba->hbalock); >+ return sysfs_menlo; >+ >+} >+ >+static ssize_t >+lpfc_menlo_write(struct lpfc_hba *phba, >+ char *buf, loff_t off, size_t count) >+{ >+ struct lpfc_sysfs_menlo *sysfs_menlo; >+ struct lpfc_dmabufext *mlast = NULL; >+ struct lpfc_sysfs_menlo_hdr cmdhdrCR; >+ struct lpfc_menlo_genreq64 *genreq = NULL; >+ loff_t temp_off = 0; >+ struct ulp_bde64 *bpl = NULL; >+ int mlastcnt = 0; >+ uint32_t * tmpptr = NULL; >+ uint32_t addr_high = 0; >+ uint32_t addr_low = 0; >+ int hdr_offset = sizeof(struct lpfc_sysfs_menlo_hdr); >+ >+ if (off % 4 || count % 4 || (unsigned long)buf % 4) >+ return -EINVAL; >+ >+ if (count == 0) >+ return 0; >+ >+ if (off == 0) { >+ ssize_t rc; >+ struct lpfc_sysfs_menlo_hdr *cmdhdr = >+ (struct lpfc_sysfs_menlo_hdr *)buf; >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1208 menlo_write: cmd %x cmdsz %d rspsz %d\n", >+ cmdhdr->cmd, cmdhdr->cmdsize, >+ cmdhdr->rspsize); >+ if (count != sizeof(struct lpfc_sysfs_menlo_hdr)) { >+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, >+ "1210 Invalid cmd size: cmd %x " >+ "cmdsz %d rspsz %d\n", >+ cmdhdr->cmd, cmdhdr->cmdsize, >+ cmdhdr->rspsize); >+ return -EINVAL; >+ } >+ >+ sysfs_menlo = lpfc_get_sysfs_menlo(phba, 1); >+ if (!sysfs_menlo) >+ return -ENOMEM; >+ sysfs_menlo->cmdhdr = *cmdhdr; >+ if (cmdhdr->cmd == MENLO_CMD_FW_DOWNLOAD) { >+ sysfs_menlo->cmdhdr.cmdsize >+ -= sizeof(struct lpfc_sysfs_menlo_hdr); >+ >+ rc = lpfc_alloc_menlo_genrequest64(phba, >+ &sysfs_menlo->cx, >+ &sysfs_menlo->cmdhdr); >+ if (rc != 0) { >+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, >+ "1211 genreq alloc failed: %d\n", >+ (int) rc); >+ sysfs_menlo_idle(phba,sysfs_menlo); >+ return -ENOMEM; >+ } >+ cmdhdrCR.cmd = cmdhdr->cmd; >+ cmdhdrCR.cmdsize = sizeof(struct lpfc_sysfs_menlo_hdr); >+ cmdhdrCR.rspsize = 4; >+ } else >+ cmdhdrCR = *cmdhdr; >+ >+ rc = lpfc_alloc_menlo_genrequest64(phba, >+ &sysfs_menlo->cr,&cmdhdrCR); >+ if (rc != 0) { >+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, >+ "1223 menlo_write: couldn't alloc genreq %d\n", >+ (int) rc); >+ sysfs_menlo_idle(phba,sysfs_menlo); >+ return -ENOMEM; >+ } >+ } else { >+ sysfs_menlo = lpfc_get_sysfs_menlo(phba, 0); >+ if (!sysfs_menlo) >+ return -EAGAIN; >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1212 menlo_write: sysfs_menlo %p cmd %x cmdsz %d" >+ " rspsz %d cr-off %d cx-off %d count %d\n", >+ sysfs_menlo, >+ sysfs_menlo->cmdhdr.cmd, >+ sysfs_menlo->cmdhdr.cmdsize, >+ sysfs_menlo->cmdhdr.rspsize, >+ (int)sysfs_menlo->cr.offset, >+ (int)sysfs_menlo->cx.offset, >+ (int)count); >+ } >+ >+ if ((count + sysfs_menlo->cr.offset) > sysfs_menlo->cmdhdr.cmdsize) { >+ if ( sysfs_menlo->cmdhdr.cmdsize != 4) { >+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, >+ "1213 FCoE cmd overflow: off %d + cnt %d > cmdsz %d\n", >+ (int)sysfs_menlo->cr.offset, >+ (int)count, >+ (int)sysfs_menlo->cmdhdr.cmdsize); >+ sysfs_menlo_idle(phba, sysfs_menlo); >+ return -ERANGE; >+ } >+ } >+ >+ spin_lock_irq(&phba->hbalock); >+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD) >+ genreq = &sysfs_menlo->cx; >+ else >+ genreq = &sysfs_menlo->cr; >+ >+ if (off == 0) { >+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD) { >+ tmpptr = NULL; >+ genreq = &sysfs_menlo->cr; >+ >+ if (!mlast) { >+ mlast = list_get_first(&genreq->inhead, >+ struct lpfc_dmabufext, >+ dma.list); >+ } >+ if (mlast) { >+ bpl = genreq->cmdbpl; >+ memcpy((uint8_t *) mlast->dma.virt, buf, count); >+ genreq->offset += count; >+ tmpptr = (uint32_t *)mlast->dma.virt; >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1258 cmd %x cmdsz %d rspsz %d " >+ "copied %d addrL:%x addrH:%x\n", >+ *tmpptr, >+ *(tmpptr+1), >+ *(tmpptr+2), >+ (int)count, >+ bpl->addrLow,bpl->addrHigh); >+ } else { >+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, >+ "1230 Could not find buffer for FCoE" >+ " cmd:off %d indmp %p %d\n", (int)off, >+ genreq->indmp,(int)count); >+ } >+ } >+ >+ sysfs_menlo->state = SMENLO_WRITING; >+ spin_unlock_irq(&phba->hbalock); >+ return count; >+ } else { >+ ssize_t adj_off = off - sizeof(struct lpfc_sysfs_menlo_hdr); >+ int found = 0; >+ if (sysfs_menlo->state != SMENLO_WRITING || >+ genreq->offset != adj_off) { >+ spin_unlock_irq(&phba->hbalock); >+ sysfs_menlo_idle(phba, sysfs_menlo); >+ return -EAGAIN; >+ } >+ mlast = NULL; >+ temp_off = sizeof(struct lpfc_sysfs_menlo_hdr); >+ if (genreq->indmp) { >+ list_for_each_entry(mlast, >+ &genreq->inhead, dma.list) { >+ if (temp_off == off) >+ break; >+ else >+ temp_off += BUF_SZ_4K; >+ mlastcnt++; >+ } >+ } >+ addr_low = le32_to_cpu( putPaddrLow(mlast->dma.phys) ); >+ addr_high = le32_to_cpu( putPaddrHigh(mlast->dma.phys) ); >+ bpl = genreq->cmdbpl; >+ bpl += mlastcnt; >+ if (bpl->addrLow != addr_low || bpl->addrHigh != addr_high) { >+ mlast = NULL; >+ list_for_each_entry(mlast, >+ &genreq->inhead, dma.list) { >+ >+ addr_low = le32_to_cpu( >+ putPaddrLow(mlast->dma.phys) ); >+ addr_high = le32_to_cpu( >+ putPaddrHigh(mlast->dma.phys) ); >+ if (bpl->addrLow == addr_low >+ && bpl->addrHigh == addr_high) { >+ found = 1; >+ break; >+ } >+ if ( mlastcnt < 3 ) >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1234 menlo_write: off:%d " >+ " mlastcnt:%d addl:%x addl:%x " >+ " addrh:%x addrh:%x mlast:%p\n", >+ (int)genreq->offset, >+ mlastcnt, >+ bpl->addrLow, >+ addr_low, >+ bpl->addrHigh, >+ addr_high,mlast); >+ } >+ } else >+ found = 1; >+ >+ if (!found) { >+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, >+ "1235 Could not find buffer for FCoE" >+ " cmd: off:%d poff:%d cnt:%d" >+ " mlastcnt:%d addl:%x addh:%x mdsz:%d \n", >+ (int)genreq->offset, >+ (int)off, >+ (int)count, >+ mlastcnt, >+ bpl->addrLow, >+ bpl->addrHigh, >+ (int)sysfs_menlo->cmdhdr.cmdsize); >+ mlast = NULL; >+ } >+ >+ } >+ >+ if (mlast) { >+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD ) { >+ bpl = genreq->cmdbpl; >+ bpl += mlastcnt; >+ tmpptr = (uint32_t *)mlast->dma.virt; >+ if ( genreq->offset < hdr_offset ) { >+ memcpy((uint8_t *) mlast->dma.virt, >+ buf+hdr_offset, >+ count-hdr_offset); >+ bpl->tus.f.bdeSize = (ushort)count-hdr_offset; >+ mlast->size = (ushort)count-hdr_offset; >+ bpl->tus.f.bdeFlags = 0; >+ bpl->tus.w = le32_to_cpu(bpl->tus.w); >+ } else { >+ bpl->tus.f.bdeSize = (ushort)count; >+ mlast->size = (ushort)count; >+ bpl->tus.f.bdeFlags = 0; >+ bpl->tus.w = le32_to_cpu(bpl->tus.w); >+ memcpy((uint8_t *) mlast->dma.virt, buf, count); >+ } >+ >+ } else >+ memcpy((uint8_t *) mlast->dma.virt, buf, count); >+ >+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD >+ && genreq->offset < hdr_offset) { >+ if (sysfs_menlo->cr.indmp >+ && sysfs_menlo->cr.indmp->dma.virt) { >+ mlast = sysfs_menlo->cr.indmp; >+ memcpy((uint8_t *) mlast->dma.virt, >+ buf, hdr_offset); >+ tmpptr = (uint32_t *)mlast->dma.virt; >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1237 cmd %x cmd1 %x cmd2 %x " >+ "copied %d\n", >+ *tmpptr, >+ *(tmpptr+1), >+ *(tmpptr+2), >+ hdr_offset); >+ } >+ } >+ genreq->offset += count; >+ } else { >+ spin_unlock_irq(&phba->hbalock); >+ sysfs_menlo_idle(phba,sysfs_menlo); >+ return -ERANGE; >+ } >+ >+ spin_unlock_irq(&phba->hbalock); >+ return count; >+ >+} >+ >+ >+static ssize_t >+sysfs_menlo_write(struct kobject *kobj, struct bin_attribute *bin_attr, >+ char *buf, loff_t off, size_t count) >+{ >+ struct class_device *cdev = container_of(kobj, struct class_device, >+ kobj); >+ struct Scsi_Host *shost = class_to_shost(cdev); >+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; >+ struct lpfc_hba *phba = vport->phba; >+ >+ return lpfc_menlo_write(phba, buf, off, count); >+} >+ >+ >+static ssize_t >+sysfs_menlo_issue_iocb_wait(struct lpfc_hba *phba, >+ struct lpfc_menlo_genreq64 *req, >+ struct lpfc_sysfs_menlo *sysfs_menlo) >+{ >+ struct lpfc_sli *psli = NULL; >+ struct lpfc_sli_ring *pring = NULL; >+ int rc = 0; >+ IOCB_t *rsp = NULL; >+ struct lpfc_iocbq *cmdiocbq = NULL; >+ >+ psli = &phba->sli; >+ pring = &psli->ring[LPFC_ELS_RING]; >+ rsp = &req->rspiocbq->iocb; >+ cmdiocbq = req->cmdiocbq; >+ >+ rc = lpfc_sli_issue_iocb_wait(phba, pring, req->cmdiocbq, req->rspiocbq, >+ req->timeout); >+ >+ if (rc == IOCB_TIMEDOUT) { >+ >+ cmdiocbq->context2 = NULL; >+ cmdiocbq->context3 = sysfs_menlo; >+ cmdiocbq->iocb_cmpl = lpfc_menlo_iocb_timeout_cmpl; >+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, >+ "1227 FCoE IOCB TMO: handler set for %p\n", >+ cmdiocbq->context3); >+ return -EACCES; >+ } >+ >+ if (rc != IOCB_SUCCESS) { >+ rc = -EFAULT; >+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, >+ "1216 FCoE IOCB failed: off %d rc=%d \n", >+ (int)req->offset, rc); >+ goto sysfs_menlo_issue_iocb_wait_exit; >+ } >+ >+ if (rsp->ulpStatus) { >+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { >+ switch (rsp->un.ulpWord[4] & 0xff) { >+ case IOERR_SEQUENCE_TIMEOUT: >+ rc = -ETIMEDOUT; >+ break; >+ case IOERR_INVALID_RPI: >+ rc = -EFAULT; >+ break; >+ default: >+ rc = -EFAULT; >+ break; >+ } >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1217 mlo_issueIocb:2 off %d rc=%d " >+ "ulpWord[4] 0x%x\n", >+ (int)req->offset, rc, rsp->un.ulpWord[4]); >+ } >+ } >+sysfs_menlo_issue_iocb_wait_exit: >+ return rc; >+} >+ >+ >+static ssize_t >+sysfs_menlo_issue_iocb(struct lpfc_hba *phba, struct lpfc_menlo_genreq64 *req, >+ struct lpfc_sysfs_menlo *sysfs_menlo) >+{ >+ struct lpfc_sli *psli = NULL; >+ struct lpfc_sli_ring *pring = NULL; >+ int rc = 0; >+ IOCB_t *rsp = NULL; >+ struct lpfc_iocbq *cmdiocbq = NULL; >+ >+ psli = &phba->sli; >+ pring = &psli->ring[LPFC_ELS_RING]; >+ rsp = &req->rspiocbq->iocb; >+ cmdiocbq = req->cmdiocbq; >+ cmdiocbq->context2 = sysfs_menlo; >+ cmdiocbq->iocb_cmpl = lpfc_menlo_iocb_cmpl; >+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, >+ "1257 lpfc_menlo_issue_iocb: handler set for %p\n", >+ cmdiocbq->context3); >+ >+ rc = lpfc_sli_issue_iocb(phba, pring, req->cmdiocbq, 0); >+ >+ if (rc == IOCB_TIMEDOUT) { >+ >+ cmdiocbq->context2 = NULL; >+ cmdiocbq->context3 = sysfs_menlo; >+ cmdiocbq->iocb_cmpl = lpfc_menlo_iocb_timeout_cmpl; >+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, >+ "1228 FCoE IOCB TMO: handler set for %p\n", >+ cmdiocbq->context3); >+ return -EACCES; >+ } >+ >+ if (rc != IOCB_SUCCESS) { >+ rc = -EFAULT; >+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, >+ "1238 FCoE IOCB failed: off %d rc=%d \n", >+ (int)req->offset, rc); >+ goto sysfs_menlo_issue_iocb_exit; >+ } >+ >+ if (rsp->ulpStatus) { >+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { >+ switch (rsp->un.ulpWord[4] & 0xff) { >+ case IOERR_SEQUENCE_TIMEOUT: >+ rc = -ETIMEDOUT; >+ break; >+ case IOERR_INVALID_RPI: >+ rc = -EFAULT; >+ break; >+ default: >+ rc = -EFAULT; >+ break; >+ } >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1239 mlo_issueIocb:2 off %d rc=%d " >+ "ulpWord[4] 0x%x\n", >+ (int)req->offset, rc, rsp->un.ulpWord[4]); >+ } >+ } >+sysfs_menlo_issue_iocb_exit: >+ return rc; >+} >+ >+static ssize_t >+lpfc_menlo_read(struct lpfc_hba *phba, char *buf, loff_t off, size_t count, >+ int wait) >+{ >+ struct lpfc_sli *psli = NULL; >+ struct lpfc_sli_ring *pring = NULL; >+ int rc = 0; >+ struct lpfc_sysfs_menlo *sysfs_menlo; >+ struct lpfc_dmabufext *mlast = NULL; >+ loff_t temp_off = 0; >+ struct lpfc_menlo_genreq64 *genreq = NULL; >+ IOCB_t *cmd = NULL, *rsp = NULL; >+ uint32_t * uptr = NULL; >+ >+ >+ psli = &phba->sli; >+ pring = &psli->ring[LPFC_ELS_RING]; >+ >+ if (off > SYSFS_MENLO_ATTR_SIZE) >+ return -ERANGE; >+ >+ if ((count + off) > SYSFS_MENLO_ATTR_SIZE) >+ count = SYSFS_MENLO_ATTR_SIZE - off; >+ >+ if (off % 4 || count % 4 || (unsigned long)buf % 4) >+ return -EINVAL; >+ >+ if (off && count == 0) >+ return 0; >+ >+ sysfs_menlo = lpfc_get_sysfs_menlo(phba, 0); >+ >+ if (!sysfs_menlo) >+ return -EPERM; >+ >+ if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { >+ sysfs_menlo_idle(phba, sysfs_menlo); >+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, >+ "1214 Can not issue FCoE cmd," >+ " SLI not active: off %d rc= -EACCESS\n", >+ (int)off); >+ return -EACCES; >+ } >+ >+ >+ if ((phba->link_state < LPFC_LINK_UP) >+ && !(psli->sli_flag & LPFC_MENLO_MAINT) >+ && wait) { >+ rc = -EPERM; >+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, >+ "1215 Can not issue FCoE cmd:" >+ " not ready or not in maint mode" >+ " off %d rc=%d \n", >+ (int)off, rc); >+ spin_lock_irq(&phba->hbalock); >+ goto lpfc_menlo_read_err_exit; >+ } >+ >+ if (off == 0 && sysfs_menlo->state == SMENLO_WRITING) { >+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD) { >+ spin_lock_irq(&phba->hbalock); >+ genreq = &sysfs_menlo->cr; >+ spin_unlock_irq(&phba->hbalock); >+ } >+ if ( wait ) >+ rc = sysfs_menlo_issue_iocb_wait(phba, >+ &sysfs_menlo->cr, >+ sysfs_menlo); >+ else { >+ rc = sysfs_menlo_issue_iocb(phba, >+ &sysfs_menlo->cr, >+ sysfs_menlo); >+ return rc; >+ } >+ >+ spin_lock_irq(&phba->hbalock); >+ if (rc < 0) { >+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, >+ "1224 FCoE iocb failed: off %d rc=%d \n", >+ (int)off, rc); >+ if (rc != -EACCES) >+ goto lpfc_menlo_read_err_exit; >+ else { >+ spin_unlock_irq(&phba->hbalock); >+ return rc; >+ } >+ } >+ >+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD) { >+ cmd = &sysfs_menlo->cx.cmdiocbq->iocb; >+ rsp = &sysfs_menlo->cr.rspiocbq->iocb; >+ mlast = list_get_first(&sysfs_menlo->cr.outhead, >+ struct lpfc_dmabufext, >+ dma.list); >+ if ( *((uint32_t *) mlast->dma.virt) != 0 ) { >+ memcpy(buf,(uint8_t *) mlast->dma.virt, count); >+ goto lpfc_menlo_read_err_exit; >+ } >+ mlast = NULL; >+ >+ cmd->ulpCommand = CMD_GEN_REQUEST64_CX; >+ cmd->ulpContext = rsp->ulpContext; >+ cmd->ulpPU = 1; /* RelOffset */ >+ cmd->un.ulpWord[4] = 0; /* offset 0 */ >+ >+ spin_unlock_irq(&phba->hbalock); >+ rc = sysfs_menlo_issue_iocb_wait(phba, &sysfs_menlo->cx, >+ sysfs_menlo); >+ spin_lock_irq(&phba->hbalock); >+ if (rc < 0) { >+ uptr = (uint32_t *) rsp; >+ >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1225 menlo_read: off %d rc=%d " >+ "rspxri %d cmdxri %d \n", >+ (int)off, rc, rsp->ulpContext, >+ cmd->ulpContext); >+ uptr = (uint32_t *) >+ &sysfs_menlo->cr.cmdiocbq->iocb; >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1236 cr:%.08x %.08x %.08x %.08x " >+ "%.08x %.08x %.08x %.08x %.08x\n", >+ *uptr, *(uptr+1), *(uptr+2), >+ *(uptr+3), *(uptr+4), *(uptr+5), >+ *(uptr+6), *(uptr+7), *(uptr+8)); >+ uptr = (uint32_t *)rsp; >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1232 cr-rsp:%.08x %.08x %.08x %.08x " >+ "%.08x %.08x %.08x %.08x %.08x\n", >+ *uptr, *(uptr+1), *(uptr+2), >+ *(uptr+3), *(uptr+4), *(uptr+5), >+ *(uptr+6), *(uptr+7), *(uptr+8)); >+ uptr = (uint32_t *)cmd; >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1233 cx:%.08x %.08x %.08x %.08x " >+ "%.08x %.08x %.08x %.08x %.08x\n", >+ *uptr, *(uptr+1), *(uptr+2), >+ *(uptr+3), *(uptr+4), *(uptr+5), >+ *(uptr+6), *(uptr+7), *(uptr+8)); >+ if (rc != -EACCES) >+ goto lpfc_menlo_read_err_exit; >+ else { >+ spin_unlock_irq(&phba->hbalock); >+ return rc; >+ } >+ } >+ } >+ sysfs_menlo->state = SMENLO_READING; >+ sysfs_menlo->cr.offset = 0; >+ >+ } else >+ spin_lock_irq(&phba->hbalock); >+ >+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD) >+ genreq = &sysfs_menlo->cx; >+ else >+ genreq = &sysfs_menlo->cr; >+ >+ /* Copy back response data */ >+ if (sysfs_menlo->cmdhdr.rspsize > count) { >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1218 MloMgnt Rqst err Data: x%x %d %d %d %d\n", >+ genreq->outdmp->flag, >+ sysfs_menlo->cmdhdr.rspsize, >+ (int)count, (int)off, (int)genreq->offset); >+ } >+ >+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { >+ rc = -EAGAIN; >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1219 menlo_read:4 off %d rc=%d \n", >+ (int)off, rc); >+ goto lpfc_menlo_read_err_exit; >+ } >+ else if ( sysfs_menlo->state != SMENLO_READING) { >+ rc = -EAGAIN; >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1220 menlo_read:5 off %d reg off %d rc=%d state %x\n", >+ (int)off,(int)genreq->offset, sysfs_menlo->state, rc); >+ goto lpfc_menlo_read_err_exit; >+ } >+ temp_off = 0; >+ mlast = NULL; >+ list_for_each_entry(mlast, &genreq->outhead, dma.list) { >+ if (temp_off == off) >+ break; >+ else >+ temp_off += BUF_SZ_4K; >+ } >+ if (mlast) >+ memcpy(buf,(uint8_t *) mlast->dma.virt, count); >+ else { >+ rc = -ERANGE; >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1221 menlo_read:6 off %d rc=%d \n", >+ (int)off, rc); >+ goto lpfc_menlo_read_err_exit; >+ } >+ genreq->offset += count; >+ >+ >+ if (genreq->offset >= sysfs_menlo->cmdhdr.rspsize) { >+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, >+ "1222 menlo_read: done off %d rc=%d" >+ " cnt %d rsp_code %x\n", >+ (int)off, rc, (int)count,*((uint32_t *)buf)); >+ rc = count; >+ goto lpfc_menlo_read_err_exit; >+ } >+ >+ if (count >= sysfs_menlo->cmdhdr.rspsize) >+ rc = sysfs_menlo->cmdhdr.rspsize; >+ else /* Can there be a > 4k response */ >+ rc = count; >+ if (genreq->offset < sysfs_menlo->cmdhdr.rspsize) { >+ spin_unlock_irq(&phba->hbalock); >+ return rc; >+ } >+ >+lpfc_menlo_read_err_exit: >+ spin_unlock_irq(&phba->hbalock); >+ sysfs_menlo_idle(phba,sysfs_menlo); >+ return rc; >+} >+ >+ >+static ssize_t >+sysfs_menlo_read(struct kobject *kobj, struct bin_attribute *bin_attr, >+ char *buf, loff_t off, size_t count) >+{ >+ struct class_device *cdev = container_of(kobj, struct class_device, >+ kobj); >+ struct Scsi_Host *shost = class_to_shost(cdev); >+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; >+ struct lpfc_hba *phba = vport->phba; >+ >+ return lpfc_menlo_read(phba, buf, off, count, 1); >+} >+int need_non_blocking = 0; >+void lpfc_check_menlo_cfg(struct lpfc_hba *phba) >+{ >+ uint32_t cmd_size; >+ uint32_t rsp_size; >+ menlo_get_cmd_t *cmd = NULL; >+ menlo_init_rsp_t *rsp = NULL; >+ int rc = 0; >+ >+ lpfc_printf_log (phba, KERN_INFO, LOG_LINK_EVENT, >+ "1253 Checking FCoE chip firmware.\n"); >+ if ( need_non_blocking ) /* Need non blocking issue_iocb */ >+ return; >+ >+ cmd_size = sizeof (menlo_get_cmd_t); >+ cmd = kmalloc(cmd_size, GFP_KERNEL); >+ if (!cmd ) { >+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT, >+ "1240 Unable to allocate command buffer memory.\n"); >+ return; >+ } >+ >+ rsp_size = sizeof (menlo_init_rsp_t); >+ rsp = kmalloc(rsp_size, GFP_KERNEL); >+ if (!rsp ) { >+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT, >+ "1244 Unable to allocate response buffer memory.\n"); >+ kfree(rsp); >+ return; >+ } >+ >+ memset(cmd,0, cmd_size); >+ memset(rsp,0, rsp_size); >+ >+ cmd->code = MENLO_CMD_GET_INIT; >+ cmd->context = cmd_size; >+ cmd->length = rsp_size; >+ rc = lpfc_menlo_write (phba, (char *) cmd, 0, cmd_size); >+ if ( rc != cmd_size ) { >+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT, >+ "1250 Menlo command error. code=%d.\n", rc); >+ >+ kfree (cmd); >+ kfree (rsp); >+ return; >+ } >+ cmd->code = MENLO_CMD_GET_INIT; >+ cmd->context = 0; >+ cmd->length = rsp_size; >+ BE_swap32_buffer ((uint8_t *) cmd, cmd_size); >+ rc = lpfc_menlo_write (phba, (char *) cmd, cmd_size, cmd_size); >+ if ( rc != cmd_size ) { >+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT, >+ "1251 Menlo command error. code=%d.\n", rc); >+ >+ kfree (cmd); >+ kfree (rsp); >+ return; >+ } >+ rc = lpfc_menlo_read (phba, (char *) rsp, 0, rsp_size,0); >+ if ( rc && rc != rsp_size ) { >+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT, >+ "1252 Menlo command error. code=%d.\n", rc); >+ >+ } >+ kfree (cmd); >+ kfree (rsp); >+ return; >+} >+ >+struct bin_attribute sysfs_menlo_attr = { >+ .attr = { >+ .name = "menlo", >+ .mode = S_IRUSR | S_IWUSR, >+ .owner = THIS_MODULE, >+ }, >+ .size = SYSFS_MENLO_ATTR_SIZE, >+ .read = sysfs_menlo_read, >+ .write = sysfs_menlo_write, >+}; >diff -urpN a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c >--- a/drivers/scsi/lpfc/lpfc_nportdisc.c 2008-09-05 17:47:41.719248000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c 2008-09-05 17:47:49.724878000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * Portions Copyright (C) 2004-2005 Christoph Hellwig * >@@ -235,10 +235,7 @@ lpfc_els_abort(struct lpfc_hba *phba, st > (iocb->iocb_cmpl) (phba, iocb, iocb); > } > } >- >- /* If we are delaying issuing an ELS command, cancel it */ >- if (ndlp->nlp_flag & NLP_DELAY_TMO) >- lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); >+ lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); > return 0; > } > >@@ -287,6 +284,24 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, > pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; > lp = (uint32_t *) pcmd->virt; > sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); >+ if (wwn_to_u64(sp->portName.u.wwn) == 0) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >+ "0140 PLOGI Reject: invalid nname\n"); >+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; >+ stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME; >+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, >+ NULL); >+ return 0; >+ } >+ if (wwn_to_u64(sp->nodeName.u.wwn) == 0) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >+ "0141 PLOGI Reject: invalid pname\n"); >+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; >+ stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME; >+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, >+ NULL); >+ return 0; >+ } > if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) { > /* Reject this request because invalid parameters */ > stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; >@@ -343,8 +358,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, > lpfc_config_link(phba, mbox); > mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; > mbox->vport = vport; >- rc = lpfc_sli_issue_mbox >- (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); >+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); > if (rc == MBX_NOT_FINISHED) { > mempool_free(mbox, phba->mbox_mem_pool); > goto out; >@@ -409,7 +423,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, > } > lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); > return 1; >- > out: > stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; > stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; >@@ -492,7 +505,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, > else > lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); > >- if (!(ndlp->nlp_type & NLP_FABRIC) || >+ if ((!(ndlp->nlp_type & NLP_FABRIC) && >+ ((ndlp->nlp_type & NLP_FCP_TARGET) || >+ !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || > (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { > /* Only try to re-login if this is NOT a Fabric Node */ > mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); >@@ -501,12 +516,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, > spin_unlock_irq(shost->host_lock); > > ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; >- ndlp->nlp_prev_state = ndlp->nlp_state; >- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); >- } else { >- ndlp->nlp_prev_state = ndlp->nlp_state; >- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); > } >+ ndlp->nlp_prev_state = ndlp->nlp_state; >+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); > > spin_lock_irq(shost->host_lock); > ndlp->nlp_flag &= ~NLP_NPR_ADISC; >@@ -569,13 +581,15 @@ lpfc_disc_set_adisc(struct lpfc_vport *v > return 0; > } > >- /* Check config parameter use-adisc or FCP-2 */ >- if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || >- ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { >- spin_lock_irq(shost->host_lock); >- ndlp->nlp_flag |= NLP_NPR_ADISC; >- spin_unlock_irq(shost->host_lock); >- return 1; >+ if (!(vport->fc_flag & FC_PT2PT)) { >+ /* Check config parameter use-adisc or FCP-2 */ >+ if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || >+ ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { >+ spin_lock_irq(shost->host_lock); >+ ndlp->nlp_flag |= NLP_NPR_ADISC; >+ spin_unlock_irq(shost->host_lock); >+ return 1; >+ } > } > ndlp->nlp_flag &= ~NLP_NPR_ADISC; > lpfc_unreg_rpi(vport, ndlp); >@@ -587,13 +601,32 @@ lpfc_disc_illegal(struct lpfc_vport *vpo > void *arg, uint32_t evt) > { > lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, >- "0253 Illegal State Transition: node x%x " >+ "0271 Illegal State Transition: node x%x " > "event x%x, state x%x Data: x%x x%x\n", > ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, > ndlp->nlp_flag); > return ndlp->nlp_state; > } > >+static uint32_t >+lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, >+ void *arg, uint32_t evt) >+{ >+ /* This transition is only legal if we previously >+ * rcv'ed a PLOGI. Since we don't want 2 discovery threads >+ * working on the same NPortID, do nothing for this thread >+ * to stop it. >+ */ >+ if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, >+ "0272 Illegal State Transition: node x%x " >+ "event x%x, state x%x Data: x%x x%x\n", >+ ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, >+ ndlp->nlp_flag); >+ } >+ return ndlp->nlp_state; >+} >+ > /* Start of Discovery State Machine routines */ > > static uint32_t >@@ -605,11 +638,8 @@ lpfc_rcv_plogi_unused_node(struct lpfc_v > cmdiocb = (struct lpfc_iocbq *) arg; > > if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { >- ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; >- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); > return ndlp->nlp_state; > } >- lpfc_drop_node(vport, ndlp); > return NLP_STE_FREED_NODE; > } > >@@ -618,7 +648,6 @@ lpfc_rcv_els_unused_node(struct lpfc_vpo > void *arg, uint32_t evt) > { > lpfc_issue_els_logo(vport, ndlp, 0); >- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); > return ndlp->nlp_state; > } > >@@ -633,7 +662,6 @@ lpfc_rcv_logo_unused_node(struct lpfc_vp > ndlp->nlp_flag |= NLP_LOGO_ACC; > spin_unlock_irq(shost->host_lock); > lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); >- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); > > return ndlp->nlp_state; > } >@@ -642,7 +670,6 @@ static uint32_t > lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, > void *arg, uint32_t evt) > { >- lpfc_drop_node(vport, ndlp); > return NLP_STE_FREED_NODE; > } > >@@ -650,7 +677,6 @@ static uint32_t > lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, > void *arg, uint32_t evt) > { >- lpfc_drop_node(vport, ndlp); > return NLP_STE_FREED_NODE; > } > >@@ -658,6 +684,7 @@ static uint32_t > lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, > void *arg, uint32_t evt) > { >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); > struct lpfc_hba *phba = vport->phba; > struct lpfc_iocbq *cmdiocb = arg; > struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; >@@ -683,7 +710,22 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_v > lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, > NULL); > } else { >- lpfc_rcv_plogi(vport, ndlp, cmdiocb); >+ if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) && >+ (ndlp->nlp_flag & NLP_NPR_2B_DISC) && >+ (vport->num_disc_nodes)) { >+ spin_lock_irq(shost->host_lock); >+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; >+ spin_unlock_irq(shost->host_lock); >+ /* Check if there are more PLOGIs to be sent */ >+ lpfc_more_plogi(vport); >+ if (vport->num_disc_nodes == 0) { >+ spin_lock_irq(shost->host_lock); >+ vport->fc_flag &= ~FC_NDISC_ACTIVE; >+ spin_unlock_irq(shost->host_lock); >+ lpfc_can_disctmo(vport); >+ lpfc_end_rscn(vport); >+ } >+ } > } /* If our portname was less */ > > return ndlp->nlp_state; >@@ -752,6 +794,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_ > uint32_t evt) > { > struct lpfc_hba *phba = vport->phba; >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); > struct lpfc_iocbq *cmdiocb, *rspiocb; > struct lpfc_dmabuf *pcmd, *prsp, *mp; > uint32_t *lp; >@@ -778,6 +821,15 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_ > > lp = (uint32_t *) prsp->virt; > sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); >+ >+ /* Some switches have FDMI servers returning 0 for WWN */ >+ if ((ndlp->nlp_DID != FDMI_DID) && >+ (wwn_to_u64(sp->portName.u.wwn) == 0 || >+ wwn_to_u64(sp->nodeName.u.wwn) == 0)) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, >+ "0142 PLOGI RSP: Invalid WWN.\n"); >+ goto out; >+ } > if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3)) > goto out; > /* PLOGI chkparm OK */ >@@ -828,13 +880,15 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_ > } > mbox->context2 = lpfc_nlp_get(ndlp); > mbox->vport = vport; >- if (lpfc_sli_issue_mbox(phba, mbox, >- (MBX_NOWAIT | MBX_STOP_IOCB)) >+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) > != MBX_NOT_FINISHED) { > lpfc_nlp_set_state(vport, ndlp, > NLP_STE_REG_LOGIN_ISSUE); > return ndlp->nlp_state; > } >+ /* decrement node reference count to the failed mbox >+ * command >+ */ > lpfc_nlp_put(ndlp); > mp = (struct lpfc_dmabuf *) mbox->context1; > lpfc_mbuf_free(phba, mp->virt, mp->phys); >@@ -864,13 +918,27 @@ out: > "0261 Cannot Register NameServer login\n"); > } > >- /* Free this node since the driver cannot login or has the wrong >- sparm */ >- lpfc_drop_node(vport, ndlp); >+ spin_lock_irq(shost->host_lock); >+ ndlp->nlp_flag |= NLP_DEFER_RM; >+ spin_unlock_irq(shost->host_lock); > return NLP_STE_FREED_NODE; > } > > static uint32_t >+lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, >+ void *arg, uint32_t evt) >+{ >+ return ndlp->nlp_state; >+} >+ >+static uint32_t >+lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport, >+ struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) >+{ >+ return ndlp->nlp_state; >+} >+ >+static uint32_t > lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, > void *arg, uint32_t evt) > { >@@ -921,6 +989,7 @@ static uint32_t > lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, > void *arg, uint32_t evt) > { >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); > struct lpfc_hba *phba = vport->phba; > struct lpfc_iocbq *cmdiocb; > >@@ -929,9 +998,28 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_v > > cmdiocb = (struct lpfc_iocbq *) arg; > >- if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) >- return ndlp->nlp_state; >+ if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { >+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { >+ spin_lock_irq(shost->host_lock); >+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; >+ spin_unlock_irq(shost->host_lock); > >+ if (vport->num_disc_nodes) { >+ lpfc_more_adisc(vport); >+ if ((vport->num_disc_nodes == 0) && >+ (vport->fc_npr_cnt)) >+ lpfc_els_disc_plogi(vport); >+ if (vport->num_disc_nodes == 0) { >+ spin_lock_irq(shost->host_lock); >+ vport->fc_flag &= ~FC_NDISC_ACTIVE; >+ spin_unlock_irq(shost->host_lock); >+ lpfc_can_disctmo(vport); >+ lpfc_end_rscn(vport); >+ } >+ } >+ } >+ return ndlp->nlp_state; >+ } > ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; > lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); > lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); >@@ -1137,7 +1225,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc > (ndlp == (struct lpfc_nodelist *) mb->context2)) { > mp = (struct lpfc_dmabuf *) (mb->context1); > if (mp) { >- lpfc_mbuf_free(phba, mp->virt, mp->phys); >+ __lpfc_mbuf_free(phba, mp->virt, mp->phys); > kfree(mp); > } > lpfc_nlp_put(ndlp); >@@ -1197,8 +1285,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct > * retry discovery. > */ > if (mb->mbxStatus == MBXERR_RPI_FULL) { >- ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; >- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); >+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; >+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); > return ndlp->nlp_state; > } > >@@ -1378,7 +1466,7 @@ out: > lpfc_issue_els_logo(vport, ndlp, 0); > > ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; >- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); >+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); > return ndlp->nlp_state; > } > >@@ -1623,24 +1711,21 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vpor > struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; > > /* Ignore PLOGI if we have an outstanding LOGO */ >- if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) { >+ if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) > return ndlp->nlp_state; >- } >- > if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { >+ lpfc_cancel_retry_delay_tmo(vport, ndlp); > spin_lock_irq(shost->host_lock); >- ndlp->nlp_flag &= ~NLP_NPR_ADISC; >+ ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC); > spin_unlock_irq(shost->host_lock); >- return ndlp->nlp_state; >- } >- >- /* send PLOGI immediately, move to PLOGI issue state */ >- if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { >- ndlp->nlp_prev_state = NLP_STE_NPR_NODE; >- lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); >- lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); >+ } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { >+ /* send PLOGI immediately, move to PLOGI issue state */ >+ if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { >+ ndlp->nlp_prev_state = NLP_STE_NPR_NODE; >+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); >+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); >+ } > } >- > return ndlp->nlp_state; > } > >@@ -1691,7 +1776,6 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vpo > struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; > > lpfc_rcv_padisc(vport, ndlp, cmdiocb); >- > /* > * Do not start discovery if discovery is about to start > * or discovery in progress for this node. Starting discovery >@@ -1753,7 +1837,7 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vpo > > irsp = &rspiocb->iocb; > if (irsp->ulpStatus) { >- lpfc_drop_node(vport, ndlp); >+ ndlp->nlp_flag |= NLP_DEFER_RM; > return NLP_STE_FREED_NODE; > } > return ndlp->nlp_state; >@@ -1781,8 +1865,13 @@ static uint32_t > lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, > void *arg, uint32_t evt) > { >+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport); >+ if (ndlp->nlp_DID == Fabric_DID) { >+ spin_lock_irq(shost->host_lock); >+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); >+ spin_unlock_irq(shost->host_lock); >+ } > lpfc_unreg_rpi(vport, ndlp); >- /* This routine does nothing, just return the current state */ > return ndlp->nlp_state; > } > >@@ -1854,9 +1943,7 @@ lpfc_device_recov_npr_node(struct lpfc_v > spin_lock_irq(shost->host_lock); > ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); > spin_unlock_irq(shost->host_lock); >- if (ndlp->nlp_flag & NLP_DELAY_TMO) { >- lpfc_cancel_retry_delay_tmo(vport, ndlp); >- } >+ lpfc_cancel_retry_delay_tmo(vport, ndlp); > return ndlp->nlp_state; > } > >@@ -1942,9 +2029,9 @@ static uint32_t (*lpfc_disc_action[NLP_S > lpfc_rcv_els_plogi_issue, /* RCV_PRLO */ > lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */ > lpfc_disc_illegal, /* CMPL_PRLI */ >- lpfc_disc_illegal, /* CMPL_LOGO */ >+ lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */ > lpfc_disc_illegal, /* CMPL_ADISC */ >- lpfc_disc_illegal, /* CMPL_REG_LOGIN */ >+ lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */ > lpfc_device_rm_plogi_issue, /* DEVICE_RM */ > lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */ > >@@ -1968,7 +2055,7 @@ static uint32_t (*lpfc_disc_action[NLP_S > lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */ > lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */ > lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */ >- lpfc_disc_illegal, /* CMPL_PLOGI */ >+ lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ > lpfc_disc_illegal, /* CMPL_PRLI */ > lpfc_disc_illegal, /* CMPL_LOGO */ > lpfc_disc_illegal, /* CMPL_ADISC */ >@@ -1982,7 +2069,7 @@ static uint32_t (*lpfc_disc_action[NLP_S > lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */ > lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */ > lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */ >- lpfc_disc_illegal, /* CMPL_PLOGI */ >+ lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ > lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */ > lpfc_disc_illegal, /* CMPL_LOGO */ > lpfc_disc_illegal, /* CMPL_ADISC */ >@@ -2040,8 +2127,11 @@ lpfc_disc_state_machine(struct lpfc_vpor > uint32_t cur_state, rc; > uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, > uint32_t); >+ uint32_t got_ndlp = 0; >+ >+ if (lpfc_nlp_get(ndlp)) >+ got_ndlp = 1; > >- lpfc_nlp_get(ndlp); > cur_state = ndlp->nlp_state; > > /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ >@@ -2058,15 +2148,24 @@ lpfc_disc_state_machine(struct lpfc_vpor > rc = (func) (vport, ndlp, arg, evt); > > /* DSM out state <rc> on NPort <nlp_DID> */ >- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, >+ if (got_ndlp) { >+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, > "0212 DSM out state %d on NPort x%x Data: x%x\n", > rc, ndlp->nlp_DID, ndlp->nlp_flag); > >- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, >- "DSM out: ste:%d did:x%x flg:x%x", >- rc, ndlp->nlp_DID, ndlp->nlp_flag); >+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, >+ "DSM out: ste:%d did:x%x flg:x%x", >+ rc, ndlp->nlp_DID, ndlp->nlp_flag); >+ /* Decrement the ndlp reference count held for this function */ >+ lpfc_nlp_put(ndlp); >+ } else { >+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, >+ "0213 DSM out state %d on NPort free\n", rc); > >- lpfc_nlp_put(ndlp); >+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, >+ "DSM out: ste:%d did:x%x flg:x%x", >+ rc, 0, 0); >+ } > > return rc; > } >diff -urpN a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c >--- a/drivers/scsi/lpfc/lpfc_scsi.c 2008-09-05 17:47:41.725247000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_scsi.c 2008-09-05 17:47:49.699878000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * Portions Copyright (C) 2004-2005 Christoph Hellwig * >@@ -50,6 +50,7 @@ void > lpfc_adjust_queue_depth(struct lpfc_hba *phba) > { > unsigned long flags; >+ uint32_t evt_posted; > > spin_lock_irqsave(&phba->hbalock, flags); > atomic_inc(&phba->num_rsrc_err); >@@ -65,17 +66,13 @@ lpfc_adjust_queue_depth(struct lpfc_hba > spin_unlock_irqrestore(&phba->hbalock, flags); > > spin_lock_irqsave(&phba->pport->work_port_lock, flags); >- if ((phba->pport->work_port_events & >- WORKER_RAMP_DOWN_QUEUE) == 0) { >+ evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; >+ if (!evt_posted) > phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; >- } > spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); > >- spin_lock_irqsave(&phba->hbalock, flags); >- if (phba->work_wait) >- wake_up(phba->work_wait); >- spin_unlock_irqrestore(&phba->hbalock, flags); >- >+ if (!evt_posted) >+ lpfc_worker_wake_up(phba); > return; > } > >@@ -89,6 +86,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor > { > unsigned long flags; > struct lpfc_hba *phba = vport->phba; >+ uint32_t evt_posted; > atomic_inc(&phba->num_cmd_success); > > if (vport->cfg_lun_queue_depth <= sdev->queue_depth) >@@ -103,16 +101,14 @@ lpfc_rampup_queue_depth(struct lpfc_vpor > spin_unlock_irqrestore(&phba->hbalock, flags); > > spin_lock_irqsave(&phba->pport->work_port_lock, flags); >- if ((phba->pport->work_port_events & >- WORKER_RAMP_UP_QUEUE) == 0) { >+ evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE; >+ if (!evt_posted) > phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; >- } > spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); > >- spin_lock_irqsave(&phba->hbalock, flags); >- if (phba->work_wait) >- wake_up(phba->work_wait); >- spin_unlock_irqrestore(&phba->hbalock, flags); >+ if (!evt_posted) >+ lpfc_worker_wake_up(phba); >+ return; > } > > void >@@ -130,7 +126,7 @@ lpfc_ramp_down_queue_handler(struct lpfc > > vports = lpfc_create_vport_work_array(phba); > if (vports != NULL) >- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { >+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { > shost = lpfc_shost_from_vport(vports[i]); > shost_for_each_device(sdev, shost) { > new_queue_depth = >@@ -151,7 +147,7 @@ lpfc_ramp_down_queue_handler(struct lpfc > new_queue_depth); > } > } >- lpfc_destroy_vport_work_array(vports); >+ lpfc_destroy_vport_work_array(phba, vports); > atomic_set(&phba->num_rsrc_err, 0); > atomic_set(&phba->num_cmd_success, 0); > } >@@ -166,9 +162,12 @@ lpfc_ramp_up_queue_handler(struct lpfc_h > > vports = lpfc_create_vport_work_array(phba); > if (vports != NULL) >- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { >+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { > shost = lpfc_shost_from_vport(vports[i]); > shost_for_each_device(sdev, shost) { >+ if (vports[i]->cfg_lun_queue_depth <= >+ sdev->queue_depth) >+ continue; > if (sdev->ordered_tags) > scsi_adjust_queue_depth(sdev, > MSG_ORDERED_TAG, >@@ -179,7 +178,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_h > sdev->queue_depth+1); > } > } >- lpfc_destroy_vport_work_array(vports); >+ lpfc_destroy_vport_work_array(phba, vports); > atomic_set(&phba->num_rsrc_err, 0); > atomic_set(&phba->num_cmd_success, 0); > } >@@ -314,9 +313,13 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * > struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; > struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; > IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; >+ uint32_t vpi = (lpfc_cmd->cur_iocbq.vport >+ ? lpfc_cmd->cur_iocbq.vport->vpi >+ : 0); > dma_addr_t physaddr; > uint32_t i, num_bde = 0; >- int nseg, datadir = scsi_cmnd->sc_data_direction; >+ int datadir = scsi_cmnd->sc_data_direction; >+ int dma_error; > > /* > * There are three possibilities here - use scatter-gather segment, use >@@ -325,26 +328,26 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * > * data bde entry. > */ > bpl += 2; >- if (scsi_sg_count(scsi_cmnd)) { >+ if (scsi_cmnd->use_sg) { > /* > * The driver stores the segment count returned from pci_map_sg > * because this a count of dma-mappings used to map the use_sg > * pages. They are not guaranteed to be the same for those > * architectures that implement an IOMMU. > */ >- >- nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), >- scsi_sg_count(scsi_cmnd), datadir); >- if (unlikely(!nseg)) >+ sgel = (struct scatterlist *)scsi_cmnd->request_buffer; >+ lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel, >+ scsi_cmnd->use_sg, datadir); >+ if (lpfc_cmd->seg_cnt == 0) > return 1; > >- lpfc_cmd->seg_cnt = nseg; > if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { > printk(KERN_ERR "%s: Too many sg segments from " > "dma_map_sg. Config %d, seg_cnt %d", > __FUNCTION__, phba->cfg_sg_seg_cnt, > lpfc_cmd->seg_cnt); >- scsi_dma_unmap(scsi_cmnd); >+ dma_unmap_sg(&phba->pcidev->dev, sgel, >+ lpfc_cmd->seg_cnt, datadir); > return 1; > } > >@@ -354,7 +357,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * > * single scsi command. Just run through the seg_cnt and format > * the bde's. > */ >- scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) { >+ for (i = 0; i < lpfc_cmd->seg_cnt; i++) { > physaddr = sg_dma_address(sgel); > bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); > bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); >@@ -365,8 +368,34 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * > bpl->tus.f.bdeFlags = BUFF_USE_RCV; > bpl->tus.w = le32_to_cpu(bpl->tus.w); > bpl++; >+ sgel++; > num_bde++; > } >+ } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { >+ physaddr = dma_map_single(&phba->pcidev->dev, >+ scsi_cmnd->request_buffer, >+ scsi_cmnd->request_bufflen, >+ datadir); >+ dma_error = dma_mapping_error(physaddr); >+ if (dma_error) { >+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP, >+ "(%d):0718 Unable to dma_map_single " >+ "request_buffer: x%x\n", >+ vpi, dma_error); >+ return 1; >+ } >+ >+ lpfc_cmd->nonsg_phys = physaddr; >+ bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); >+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); >+ bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen; >+ if (datadir == DMA_TO_DEVICE) >+ bpl->tus.f.bdeFlags = 0; >+ else >+ bpl->tus.f.bdeFlags = BUFF_USE_RCV; >+ bpl->tus.w = le32_to_cpu(bpl->tus.w); >+ num_bde = 1; >+ bpl++; > } > > /* >@@ -380,7 +409,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * > (num_bde * sizeof (struct ulp_bde64)); > iocb_cmd->ulpBdeCount = 1; > iocb_cmd->ulpLe = 1; >- fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd)); >+ fcp_cmnd->fcpDl = cpu_to_be32(scsi_cmnd->request_bufflen); > return 0; > } > >@@ -393,13 +422,21 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba > * a request buffer, but did not request use_sg. There is a third > * case, but it does not require resource deallocation. > */ >- if (psb->seg_cnt > 0) >- scsi_dma_unmap(psb->pCmd); >+ if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { >+ dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, >+ psb->seg_cnt, psb->pCmd->sc_data_direction); >+ } else { >+ if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { >+ dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, >+ psb->pCmd->request_bufflen, >+ psb->pCmd->sc_data_direction); >+ } >+ } > } > > static void >-lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, >- struct lpfc_iocbq *rsp_iocb) >+lpfc_handle_fcp_err(struct lpfc_hba *phba, struct lpfc_vport *vport, >+ struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) > { > struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; > struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; >@@ -436,15 +473,16 @@ lpfc_handle_fcp_err(struct lpfc_vport *v > if (!scsi_status && (resp_info & RESID_UNDER)) > logit = LOG_FCP; > >- lpfc_printf_vlog(vport, KERN_WARNING, logit, >- "0730 FCP command x%x failed: x%x SNS x%x x%x " >- "Data: x%x x%x x%x x%x x%x\n", >- cmnd->cmnd[0], scsi_status, >- be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, >- be32_to_cpu(fcprsp->rspResId), >- be32_to_cpu(fcprsp->rspSnsLen), >- be32_to_cpu(fcprsp->rspRspLen), >- fcprsp->rspInfo3); >+ lpfc_printf_log(phba, KERN_WARNING, logit, >+ "(%d):0730 FCP command x%x failed: x%x SNS x%x x%x " >+ "Data: x%x x%x x%x x%x x%x\n", >+ (vport ? vport->vpi : 0), >+ cmnd->cmnd[0], scsi_status, >+ be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, >+ be32_to_cpu(fcprsp->rspResId), >+ be32_to_cpu(fcprsp->rspSnsLen), >+ be32_to_cpu(fcprsp->rspRspLen), >+ fcprsp->rspInfo3); > > if (resp_info & RSP_LEN_VALID) { > rsplen = be32_to_cpu(fcprsp->rspRspLen); >@@ -455,17 +493,17 @@ lpfc_handle_fcp_err(struct lpfc_vport *v > } > } > >- scsi_set_resid(cmnd, 0); >+ cmnd->resid = 0; > if (resp_info & RESID_UNDER) { >- scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); >- >- lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, >- "0716 FCP Read Underrun, expected %d, " >- "residual %d Data: x%x x%x x%x\n", >- be32_to_cpu(fcpcmd->fcpDl), >- scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], >- cmnd->underflow); >+ cmnd->resid = be32_to_cpu(fcprsp->rspResId); > >+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP, >+ "(%d):0716 FCP Read Underrun, expected %d, " >+ "residual %d Data: x%x x%x x%x\n", >+ (vport ? vport->vpi : 0), >+ be32_to_cpu(fcpcmd->fcpDl), >+ cmnd->resid, fcpi_parm, cmnd->cmnd[0], >+ cmnd->underflow); > /* > * If there is an under run check if under run reported by > * storage array is same as the under run reported by HBA. >@@ -473,15 +511,16 @@ lpfc_handle_fcp_err(struct lpfc_vport *v > */ > if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && > fcpi_parm && >- (scsi_get_resid(cmnd) != fcpi_parm)) { >- lpfc_printf_vlog(vport, KERN_WARNING, >- LOG_FCP | LOG_FCP_ERROR, >- "0735 FCP Read Check Error " >- "and Underrun Data: x%x x%x x%x x%x\n", >- be32_to_cpu(fcpcmd->fcpDl), >- scsi_get_resid(cmnd), fcpi_parm, >- cmnd->cmnd[0]); >- scsi_set_resid(cmnd, scsi_bufflen(cmnd)); >+ (cmnd->resid != fcpi_parm)) { >+ lpfc_printf_log(phba, KERN_WARNING, >+ LOG_FCP | LOG_FCP_ERROR, >+ "(%d):0735 FCP Read Check Error " >+ "and Underrun Data: x%x x%x x%x x%x\n", >+ (vport ? vport->vpi : 0), >+ be32_to_cpu(fcpcmd->fcpDl), >+ cmnd->resid, fcpi_parm, >+ cmnd->cmnd[0]); >+ cmnd->resid = cmnd->request_bufflen; > host_status = DID_ERROR; > } > /* >@@ -492,21 +531,23 @@ lpfc_handle_fcp_err(struct lpfc_vport *v > */ > if (!(resp_info & SNS_LEN_VALID) && > (scsi_status == SAM_STAT_GOOD) && >- (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) >- < cmnd->underflow)) { >- lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, >- "0717 FCP command x%x residual " >- "underrun converted to error " >- "Data: x%x x%x x%x\n", >- cmnd->cmnd[0], scsi_bufflen(cmnd), >- scsi_get_resid(cmnd), cmnd->underflow); >+ (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) { >+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP, >+ "(%d):0717 FCP command x%x residual " >+ "underrun converted to error " >+ "Data: x%x x%x x%x\n", >+ (vport ? vport->vpi : 0), >+ cmnd->cmnd[0], cmnd->request_bufflen, >+ cmnd->resid, cmnd->underflow); > host_status = DID_ERROR; > } > } else if (resp_info & RESID_OVER) { >- lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, >- "0720 FCP command x%x residual overrun error. " >- "Data: x%x x%x \n", cmnd->cmnd[0], >- scsi_bufflen(cmnd), scsi_get_resid(cmnd)); >+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, >+ "(%d):0720 FCP command x%x residual " >+ "overrun error. Data: x%x x%x \n", >+ (vport ? vport->vpi : 0), >+ cmnd->cmnd[0], >+ cmnd->request_bufflen, cmnd->resid); > host_status = DID_ERROR; > > /* >@@ -515,14 +556,16 @@ lpfc_handle_fcp_err(struct lpfc_vport *v > */ > } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && > (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { >- lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, >- "0734 FCP Read Check Error Data: " >- "x%x x%x x%x x%x\n", >- be32_to_cpu(fcpcmd->fcpDl), >- be32_to_cpu(fcprsp->rspResId), >- fcpi_parm, cmnd->cmnd[0]); >+ lpfc_printf_log(phba, KERN_WARNING, >+ LOG_FCP | LOG_FCP_ERROR, >+ "(%d):0734 FCP Read Check Error Data: " >+ "x%x x%x x%x x%x\n", >+ (vport ? vport->vpi : 0), >+ be32_to_cpu(fcpcmd->fcpDl), >+ be32_to_cpu(fcprsp->rspResId), >+ fcpi_parm, cmnd->cmnd[0]); > host_status = DID_ERROR; >- scsi_set_resid(cmnd, scsi_bufflen(cmnd)); >+ cmnd->resid = cmnd->request_bufflen; > } > > out: >@@ -542,6 +585,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba > int result; > struct scsi_device *sdev, *tmp_sdev; > int depth = 0; >+ unsigned long flags; > > lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; > lpfc_cmd->status = pIocbOut->iocb.ulpStatus; >@@ -553,11 +597,11 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba > else if (lpfc_cmd->status >= IOSTAT_CNT) > lpfc_cmd->status = IOSTAT_DEFAULT; > >- lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, >+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, > "0729 FCP cmd x%x failed <%d/%d> " > "status: x%x result: x%x Data: x%x x%x\n", > cmd->cmnd[0], >- cmd->device ? cmd->device->id : 0xffff, >+ vport ? vport->vpi : 0, > cmd->device ? cmd->device->lun : 0xffff, > lpfc_cmd->status, lpfc_cmd->result, > pIocbOut->iocb.ulpContext, >@@ -566,7 +610,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba > switch (lpfc_cmd->status) { > case IOSTAT_FCP_RSP_ERROR: > /* Call FCP RSP handler to determine result */ >- lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut); >+ lpfc_handle_fcp_err(phba, vport, lpfc_cmd, pIocbOut); > break; > case IOSTAT_NPORT_BSY: > case IOSTAT_FABRIC_BSY: >@@ -577,14 +621,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba > lpfc_cmd->result == IOERR_NO_RESOURCES || > lpfc_cmd->result == RJT_LOGIN_REQUIRED) { > cmd->result = ScsiResult(DID_REQUEUE, 0); >- break; >- } /* else: fall through */ >+ break; >+ } /* else: fall through */ > default: > cmd->result = ScsiResult(DID_ERROR, 0); > break; > } > >- if ((pnode == NULL ) >+ if (!pnode || !NLP_CHK_NODE_ACT(pnode) > || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) > cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); > } else { >@@ -594,12 +638,12 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba > if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { > uint32_t *lp = (uint32_t *)cmd->sense_buffer; > >- lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, >+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP, > "0710 Iodone <%d/%d> cmd %p, error " > "x%x SNS x%x x%x Data: x%x x%x\n", >- cmd->device->id, cmd->device->lun, cmd, >+ vport ? vport->vpi : 0, cmd->device->lun, cmd, > cmd->result, *lp, *(lp + 3), cmd->retries, >- scsi_get_resid(cmd)); >+ cmd->resid); > } > > result = cmd->result; >@@ -608,15 +652,26 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba > cmd->scsi_done(cmd); > > if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { >+ /* >+ * If there is a thread waiting for command completion >+ * wake up the thread. >+ */ >+ spin_lock_irqsave(sdev->host->host_lock, flags); >+ lpfc_cmd->pCmd = NULL; >+ if (lpfc_cmd->waitq) >+ wake_up(lpfc_cmd->waitq); >+ spin_unlock_irqrestore(sdev->host->host_lock, flags); > lpfc_release_scsi_buf(phba, lpfc_cmd); > return; > } > >+ if (!vport) >+ goto out_vport_deleted; > > if (!result) > lpfc_rampup_queue_depth(vport, sdev); > >- if (!result && pnode != NULL && >+ if (!result && pnode && NLP_CHK_NODE_ACT(pnode) && > ((jiffies - pnode->last_ramp_up_time) > > LPFC_Q_RAMP_UP_INTERVAL * HZ) && > ((jiffies - pnode->last_q_full_time) > >@@ -644,7 +699,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba > * Check for queue full. If the lun is reporting queue full, then > * back off the lun queue depth to prevent target overloads. > */ >- if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) { >+ if (result == SAM_STAT_TASK_SET_FULL && pnode && >+ NLP_CHK_NODE_ACT(pnode)) { > pnode->last_q_full_time = jiffies; > > shost_for_each_device(tmp_sdev, sdev->host) { >@@ -669,6 +725,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba > } > } > >+ out_vport_deleted: >+ /* >+ * If there is a thread waiting for command completion >+ * wake up the thread. >+ */ >+ spin_lock_irqsave(sdev->host->host_lock, flags); >+ lpfc_cmd->pCmd = NULL; >+ if (lpfc_cmd->waitq) >+ wake_up(lpfc_cmd->waitq); >+ spin_unlock_irqrestore(sdev->host->host_lock, flags); >+ > lpfc_release_scsi_buf(phba, lpfc_cmd); > } > >@@ -684,6 +751,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *v > int datadir = scsi_cmnd->sc_data_direction; > char tag[2]; > >+ if (!pnode || !NLP_CHK_NODE_ACT(pnode)) >+ return; >+ > lpfc_cmd->fcp_rsp->rspSnsLen = 0; > /* clear task management bits */ > lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; >@@ -714,7 +784,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *v > * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first > * data bde entry. > */ >- if (scsi_sg_count(scsi_cmnd)) { >+ if (scsi_cmnd->use_sg) { > if (datadir == DMA_TO_DEVICE) { > iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; > iocb_cmd->un.fcpi.fcpi_parm = 0; >@@ -724,7 +794,23 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *v > } else { > iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; > iocb_cmd->ulpPU = PARM_READ_CHECK; >- iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); >+ iocb_cmd->un.fcpi.fcpi_parm = >+ scsi_cmnd->request_bufflen; >+ fcp_cmnd->fcpCntl3 = READ_DATA; >+ phba->fc4InputRequests++; >+ } >+ } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { >+ if (datadir == DMA_TO_DEVICE) { >+ iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; >+ iocb_cmd->un.fcpi.fcpi_parm = 0; >+ iocb_cmd->ulpPU = 0; >+ fcp_cmnd->fcpCntl3 = WRITE_DATA; >+ phba->fc4OutputRequests++; >+ } else { >+ iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; >+ iocb_cmd->ulpPU = PARM_READ_CHECK; >+ iocb_cmd->un.fcpi.fcpi_parm = >+ scsi_cmnd->request_bufflen; > fcp_cmnd->fcpCntl3 = READ_DATA; > phba->fc4InputRequests++; > } >@@ -743,6 +829,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *v > piocbq->iocb.ulpContext = pnode->nlp_rpi; > if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) > piocbq->iocb.ulpFCP2Rcvy = 1; >+ else >+ piocbq->iocb.ulpFCP2Rcvy = 0; > > piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); > piocbq->context1 = lpfc_cmd; >@@ -763,9 +851,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc > struct lpfc_rport_data *rdata = lpfc_cmd->rdata; > struct lpfc_nodelist *ndlp = rdata->pnode; > >- if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || >+ ndlp->nlp_state != NLP_STE_MAPPED_NODE) > return 0; >- } > > piocbq = &(lpfc_cmd->cur_iocbq); > piocbq->vport = vport; >@@ -819,14 +907,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf > struct lpfc_iocbq *iocbq; > struct lpfc_iocbq *iocbqrsp; > int ret; >+ int status; > >- if (!rdata->pnode) >+ if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) > return FAILED; > > lpfc_cmd->rdata = rdata; >- ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, >+ status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, > FCP_TARGET_RESET); >- if (!ret) >+ if (!status) > return FAILED; > > iocbq = &lpfc_cmd->cur_iocbq; >@@ -839,12 +928,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf > lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, > "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", > tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); >- ret = lpfc_sli_issue_iocb_wait(phba, >+ status = lpfc_sli_issue_iocb_wait(phba, > &phba->sli.ring[phba->sli.fcp_ring], > iocbq, iocbqrsp, lpfc_cmd->timeout); >- if (ret != IOCB_SUCCESS) { >- if (ret == IOCB_TIMEDOUT) >+ if (status != IOCB_SUCCESS) { >+ if (status == IOCB_TIMEDOUT) { > iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; >+ ret = TIMEOUT_ERROR; >+ } else >+ ret = FAILED; > lpfc_cmd->status = IOSTAT_DRIVER_REJECT; > } else { > ret = SUCCESS; >@@ -924,12 +1016,33 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd > struct lpfc_rport_data *rdata = cmnd->device->hostdata; > struct lpfc_nodelist *ndlp = rdata->pnode; > struct lpfc_scsi_buf *lpfc_cmd; >+ struct scsi_device *sdev; > struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); > int err; > > err = fc_remote_port_chkready(rport); > if (err) { > cmnd->result = err; >+ sdev = cmnd->device; >+ if ((phba->link_state == LPFC_HBA_ERROR) && >+ (cmnd->result == ScsiResult(DID_NO_CONNECT, 0)) && >+ (sdev->queue_depth != 1)) { >+ >+ /* If we reach this point, the HBA is not responding >+ * and has been taken offline. If alot of SCSI IO >+ * was active, this could cause a massive amount of >+ * SCSI layer initiated logging. On some systems, >+ * this massive amount of logging has been known to >+ * cause CPU soft lockups. In an attempt to throttle >+ * the amount of logging, set the sdev queue depth to 1. >+ */ >+ if (sdev->ordered_tags) >+ scsi_adjust_queue_depth(sdev, >+ MSG_ORDERED_TAG, 1); >+ else >+ scsi_adjust_queue_depth(sdev, >+ MSG_SIMPLE_TAG, 1); >+ } > goto out_fail_command; > } > >@@ -937,7 +1050,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd > * Catch race where our node has transitioned, but the > * transport is still transitioning. > */ >- if (!ndlp) { >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { > cmnd->result = ScsiResult(DID_BUS_BUSY, 0); > goto out_fail_command; > } >@@ -967,6 +1080,9 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd > > lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); > >+ if ((cmnd->cmnd[0] == REPORT_LUNS) && phba->cfg_enable_npiv) >+ mod_timer(&cmnd->eh_timeout, jiffies + 60 * HZ); >+ > err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], > &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); > if (err) >@@ -1018,8 +1134,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmn > struct lpfc_iocbq *abtsiocb; > struct lpfc_scsi_buf *lpfc_cmd; > IOCB_t *cmd, *icmd; >- unsigned int loop_count = 0; > int ret = SUCCESS; >+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); >+ > > lpfc_block_error_handler(cmnd); > lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; >@@ -1074,17 +1191,15 @@ lpfc_abort_handler(struct scsi_cmnd *cmn > if (phba->cfg_poll & DISABLE_FCP_RING_INT) > lpfc_sli_poll_fcp_ring (phba); > >+ lpfc_cmd->waitq = &waitq; > /* Wait for abort to complete */ >- while (lpfc_cmd->pCmd == cmnd) >- { >- if (phba->cfg_poll & DISABLE_FCP_RING_INT) >- lpfc_sli_poll_fcp_ring (phba); >+ wait_event_timeout(waitq, >+ (lpfc_cmd->pCmd != cmnd), >+ (2*vport->cfg_devloss_tmo*HZ)); > >- schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ); >- if (++loop_count >- > (2 * vport->cfg_devloss_tmo)/LPFC_ABORT_WAIT) >- break; >- } >+ spin_lock_irq(shost->host_lock); >+ lpfc_cmd->waitq = NULL; >+ spin_unlock_irq(shost->host_lock); > > if (lpfc_cmd->pCmd == cmnd) { > ret = FAILED; >@@ -1114,121 +1229,96 @@ lpfc_device_reset_handler(struct scsi_cm > struct lpfc_iocbq *iocbq, *iocbqrsp; > struct lpfc_rport_data *rdata = cmnd->device->hostdata; > struct lpfc_nodelist *pnode = rdata->pnode; >- uint32_t cmd_result = 0, cmd_status = 0; >- int ret = FAILED; >- int iocb_status = IOCB_SUCCESS; >- int cnt, loopcnt; >+ unsigned long later; >+ int ret = SUCCESS; >+ int status; >+ int cnt; > > lpfc_block_error_handler(cmnd); >- loopcnt = 0; > /* > * If target is not in a MAPPED state, delay the reset until > * target is rediscovered or devloss timeout expires. > */ >- while (1) { >- if (!pnode) >- goto out; >- >- if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { >- schedule_timeout_uninterruptible(msecs_to_jiffies(500)); >- loopcnt++; >- rdata = cmnd->device->hostdata; >- if (!rdata || >- (loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){ >- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, >- "0721 LUN Reset rport " >- "failure: cnt x%x rdata x%p\n", >- loopcnt, rdata); >- goto out; >- } >- pnode = rdata->pnode; >- if (!pnode) >- goto out; >- } >+ later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; >+ while (time_after(later, jiffies)) { >+ if (!pnode || !NLP_CHK_NODE_ACT(pnode)) >+ return FAILED; > if (pnode->nlp_state == NLP_STE_MAPPED_NODE) > break; >+ schedule_timeout_uninterruptible(msecs_to_jiffies(500)); >+ rdata = cmnd->device->hostdata; >+ if (!rdata) >+ break; >+ pnode = rdata->pnode; >+ } >+ if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, >+ "0721 LUN Reset rport " >+ "failure: msec x%x rdata x%p\n", >+ jiffies_to_msecs(jiffies - later), rdata); >+ return FAILED; > } >- > lpfc_cmd = lpfc_get_scsi_buf(phba); > if (lpfc_cmd == NULL) >- goto out; >- >+ return FAILED; > lpfc_cmd->timeout = 60; > lpfc_cmd->rdata = rdata; > >- ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun, >- FCP_TARGET_RESET); >- if (!ret) >- goto out_free_scsi_buf; >- >+ status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, >+ cmnd->device->lun, >+ FCP_TARGET_RESET); >+ if (!status) { >+ lpfc_release_scsi_buf(phba, lpfc_cmd); >+ return FAILED; >+ } > iocbq = &lpfc_cmd->cur_iocbq; > > /* get a buffer for this IOCB command response */ > iocbqrsp = lpfc_sli_get_iocbq(phba); >- if (iocbqrsp == NULL) >- goto out_free_scsi_buf; >- >+ if (iocbqrsp == NULL) { >+ lpfc_release_scsi_buf(phba, lpfc_cmd); >+ return FAILED; >+ } > lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, > "0703 Issue target reset to TGT %d LUN %d " > "rpi x%x nlp_flag x%x\n", cmnd->device->id, > cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); >- iocb_status = lpfc_sli_issue_iocb_wait(phba, >- &phba->sli.ring[phba->sli.fcp_ring], >- iocbq, iocbqrsp, lpfc_cmd->timeout); >- >- if (iocb_status == IOCB_TIMEDOUT) >+ status = lpfc_sli_issue_iocb_wait(phba, >+ &phba->sli.ring[phba->sli.fcp_ring], >+ iocbq, iocbqrsp, lpfc_cmd->timeout); >+ if (status == IOCB_TIMEDOUT) { > iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; >- >- if (iocb_status == IOCB_SUCCESS) >- ret = SUCCESS; >- else >- ret = iocb_status; >- >- cmd_result = iocbqrsp->iocb.un.ulpWord[4]; >- cmd_status = iocbqrsp->iocb.ulpStatus; >- >+ ret = TIMEOUT_ERROR; >+ } else { >+ if (status != IOCB_SUCCESS) >+ ret = FAILED; >+ lpfc_release_scsi_buf(phba, lpfc_cmd); >+ } >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, >+ "0713 SCSI layer issued device reset (%d, %d) " >+ "return x%x status x%x result x%x\n", >+ cmnd->device->id, cmnd->device->lun, ret, >+ iocbqrsp->iocb.ulpStatus, >+ iocbqrsp->iocb.un.ulpWord[4]); > lpfc_sli_release_iocbq(phba, iocbqrsp); >- >- /* >- * All outstanding txcmplq I/Os should have been aborted by the device. >- * Unfortunately, some targets do not abide by this forcing the driver >- * to double check. >- */ > cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun, >- LPFC_CTX_LUN); >+ LPFC_CTX_TGT); > if (cnt) > lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], > cmnd->device->id, cmnd->device->lun, >- LPFC_CTX_LUN); >- loopcnt = 0; >- while(cnt) { >- schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); >- >- if (++loopcnt >- > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT) >- break; >- >+ LPFC_CTX_TGT); >+ later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; >+ while (time_after(later, jiffies) && cnt) { >+ schedule_timeout_uninterruptible(msecs_to_jiffies(20)); > cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, >- cmnd->device->lun, LPFC_CTX_LUN); >+ cmnd->device->lun, LPFC_CTX_TGT); > } >- > if (cnt) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, > "0719 device reset I/O flush failure: " > "cnt x%x\n", cnt); > ret = FAILED; > } >- >-out_free_scsi_buf: >- if (iocb_status != IOCB_TIMEDOUT) { >- lpfc_release_scsi_buf(phba, lpfc_cmd); >- } >- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, >- "0713 SCSI layer issued device reset (%d, %d) " >- "return x%x status x%x result x%x\n", >- cmnd->device->id, cmnd->device->lun, ret, >- cmd_status, cmd_result); >-out: > return ret; > } > >@@ -1240,19 +1330,12 @@ lpfc_bus_reset_handler(struct scsi_cmnd > struct lpfc_hba *phba = vport->phba; > struct lpfc_nodelist *ndlp = NULL; > int match; >- int ret = FAILED, i, err_count = 0; >- int cnt, loopcnt; >+ int ret = SUCCESS, status, i; >+ int cnt; > struct lpfc_scsi_buf * lpfc_cmd; >+ unsigned long later; > > lpfc_block_error_handler(cmnd); >- >- lpfc_cmd = lpfc_get_scsi_buf(phba); >- if (lpfc_cmd == NULL) >- goto out; >- >- /* The lpfc_cmd storage is reused. Set all loop invariants. */ >- lpfc_cmd->timeout = 60; >- > /* > * Since the driver manages a single bus device, reset all > * targets known to the driver. Should any target reset >@@ -1263,8 +1346,10 @@ lpfc_bus_reset_handler(struct scsi_cmnd > match = 0; > spin_lock_irq(shost->host_lock); > list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { >+ if (!NLP_CHK_NODE_ACT(ndlp)) >+ continue; > if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && >- i == ndlp->nlp_sid && >+ ndlp->nlp_sid == i && > ndlp->rport) { > match = 1; > break; >@@ -1273,27 +1358,22 @@ lpfc_bus_reset_handler(struct scsi_cmnd > spin_unlock_irq(shost->host_lock); > if (!match) > continue; >- >- ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, >- cmnd->device->lun, >- ndlp->rport->dd_data); >- if (ret != SUCCESS) { >+ lpfc_cmd = lpfc_get_scsi_buf(phba); >+ if (lpfc_cmd) { >+ lpfc_cmd->timeout = 60; >+ status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, >+ cmnd->device->lun, >+ ndlp->rport->dd_data); >+ if (status != TIMEOUT_ERROR) >+ lpfc_release_scsi_buf(phba, lpfc_cmd); >+ } >+ if (!lpfc_cmd || status != SUCCESS) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, > "0700 Bus Reset on target %d failed\n", > i); >- err_count++; >- break; >+ ret = FAILED; > } > } >- >- if (ret != IOCB_TIMEDOUT) >- lpfc_release_scsi_buf(phba, lpfc_cmd); >- >- if (err_count == 0) >- ret = SUCCESS; >- else >- ret = FAILED; >- > /* > * All outstanding txcmplq I/Os should have been aborted by > * the targets. Unfortunately, some targets do not abide by >@@ -1303,27 +1383,19 @@ lpfc_bus_reset_handler(struct scsi_cmnd > if (cnt) > lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], > 0, 0, LPFC_CTX_HOST); >- loopcnt = 0; >- while(cnt) { >- schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); >- >- if (++loopcnt >- > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT) >- break; >- >+ later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; >+ while (time_after(later, jiffies) && cnt) { >+ schedule_timeout_uninterruptible(msecs_to_jiffies(20)); > cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); > } >- > if (cnt) { > lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, > "0715 Bus Reset I/O flush failure: " > "cnt x%x left x%x\n", cnt, i); > ret = FAILED; > } >- > lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, > "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); >-out: > return ret; > } > >@@ -1438,14 +1510,14 @@ struct scsi_host_template lpfc_template > .slave_destroy = lpfc_slave_destroy, > .scan_finished = lpfc_scan_finished, > .this_id = -1, >- .sg_tablesize = LPFC_SG_SEG_CNT, >- .use_sg_chaining = ENABLE_SG_CHAINING, >+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, > .cmd_per_lun = LPFC_CMD_PER_LUN, > .use_clustering = ENABLE_CLUSTERING, > .shost_attrs = lpfc_hba_attrs, > .max_sectors = 0xFFFF, > }; > >+ > struct scsi_host_template lpfc_vport_template = { > .module = THIS_MODULE, > .name = LPFC_DRIVER_NAME, >@@ -1459,10 +1531,9 @@ struct scsi_host_template lpfc_vport_tem > .slave_destroy = lpfc_slave_destroy, > .scan_finished = lpfc_scan_finished, > .this_id = -1, >- .sg_tablesize = LPFC_SG_SEG_CNT, >+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, > .cmd_per_lun = LPFC_CMD_PER_LUN, > .use_clustering = ENABLE_CLUSTERING, >- .use_sg_chaining = ENABLE_SG_CHAINING, > .shost_attrs = lpfc_vport_attrs, > .max_sectors = 0xFFFF, > }; >diff -urpN a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h >--- a/drivers/scsi/lpfc/lpfc_scsi.h 2008-09-05 17:47:41.728248000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_scsi.h 2008-09-05 17:47:49.737877000 -0400 >@@ -138,6 +138,7 @@ struct lpfc_scsi_buf { > * Iotag is in here > */ > struct lpfc_iocbq cur_iocbq; >+ wait_queue_head_t *waitq; > }; > > #define LPFC_SCSI_DMA_EXT_SIZE 264 >diff -urpN a/drivers/scsi/lpfc/lpfc_security.c b/drivers/scsi/lpfc/lpfc_security.c >--- a/drivers/scsi/lpfc/lpfc_security.c 1969-12-31 19:00:00.000000000 -0500 >+++ b/drivers/scsi/lpfc/lpfc_security.c 2008-09-05 17:47:49.747876000 -0400 >@@ -0,0 +1,338 @@ >+/******************************************************************* >+ * This file is part of the Emulex Linux Device Driver for * >+ * Fibre Channel Host Bus Adapters. * >+ * Copyright (C) 2006-2008 Emulex. All rights reserved. * >+ * EMULEX and SLI are trademarks of Emulex. * >+ * www.emulex.com * >+ * * >+ * This program is free software; you can redistribute it and/or * >+ * modify it under the terms of version 2 of the GNU General * >+ * Public License as published by the Free Software Foundation. * >+ * This program is distributed in the hope that it will be useful. * >+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * >+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * >+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * >+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * >+ * TO BE LEGALLY INVALID. See the GNU General Public License for * >+ * more details, a copy of which can be found in the file COPYING * >+ * included with this package. * >+ *******************************************************************/ >+ >+#include <linux/delay.h> >+#include <linux/pci.h> >+#include <linux/interrupt.h> >+ >+#include <scsi/scsi_tcq.h> >+#include <scsi/scsi_transport_fc.h> >+ >+#include "lpfc_hw.h" >+#include "lpfc_sli.h" >+#include "lpfc_disc.h" >+#include "lpfc.h" >+#include "lpfc_crtn.h" >+#include "lpfc_logmsg.h" >+#include "lpfc_security.h" >+#include "lpfc_auth_access.h" >+#include "lpfc_vport.h" >+ >+uint8_t lpfc_security_service_state = SECURITY_OFFLINE; >+ >+void >+lpfc_security_service_online(struct Scsi_Host *shost) >+{ >+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; >+ >+ lpfc_security_service_state = SECURITY_ONLINE; >+ if (vport->cfg_enable_auth && >+ vport->auth.auth_mode == FC_AUTHMODE_UNKNOWN && >+ vport->phba->link_state == LPFC_HBA_ERROR) >+ lpfc_selective_reset(vport->phba); >+} >+ >+void >+lpfc_security_service_offline(struct Scsi_Host *shost) >+{ >+ lpfc_security_service_state = SECURITY_OFFLINE; >+} >+ >+void >+lpfc_security_config(struct Scsi_Host *shost, int status, void *rsp) >+{ >+ struct fc_auth_rsp *auth_rsp = (struct fc_auth_rsp *)rsp; >+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; >+ struct lpfc_nodelist *ndlp; >+ uint32_t old_interval, new_interval; >+ unsigned long new_jiffies, temp_jiffies; >+ uint8_t last_auth_mode; >+ >+ if (status) >+ return; >+ ndlp = lpfc_findnode_did(vport, Fabric_DID); >+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) >+ return; >+ >+ vport->auth.bidirectional = >+ auth_rsp->u.dhchap_security_config.bidirectional; >+ memcpy(&vport->auth.hash_priority[0], >+ &auth_rsp->u.dhchap_security_config.hash_priority[0], >+ sizeof(vport->auth.hash_priority)); >+ vport->auth.hash_len = auth_rsp->u.dhchap_security_config.hash_len; >+ memcpy(&vport->auth.dh_group_priority[0], >+ &auth_rsp->u.dhchap_security_config. >+ dh_group_priority[0], >+ sizeof(vport->auth.dh_group_priority)); >+ vport->auth.dh_group_len = >+ auth_rsp->u.dhchap_security_config.dh_group_len; >+ old_interval = vport->auth.reauth_interval; >+ vport->auth.reauth_interval = >+ auth_rsp->u.dhchap_security_config.reauth_interval; >+ new_interval = vport->auth.reauth_interval; >+ /* >+ * If interval changed we need to adjust the running timer >+ * If enabled then start timer now. >+ * If disabled then stop the timer. >+ * If changed to chorter then elapsed time, then set to fire now >+ * If changed to longer than elapsed time, extend the timer. >+ */ >+ if (old_interval != new_interval && >+ vport->auth.auth_state == LPFC_AUTH_SUCCESS) { >+ new_jiffies = msecs_to_jiffies(new_interval * 60000); >+ del_timer_sync(&ndlp->nlp_reauth_tmr); >+ if (old_interval == 0) >+ temp_jiffies = jiffies + new_jiffies; >+ if (new_interval == 0) >+ temp_jiffies = 0; >+ else if (new_jiffies < (jiffies - vport->auth.last_auth)) >+ temp_jiffies = jiffies + msecs_to_jiffies(1); >+ else >+ temp_jiffies = jiffies + (new_jiffies - >+ (jiffies - vport->auth.last_auth)); >+ if (temp_jiffies) >+ mod_timer(&ndlp->nlp_reauth_tmr, temp_jiffies); >+ } >+ last_auth_mode = vport->auth.auth_mode; >+ vport->auth.auth_mode = >+ auth_rsp->u.dhchap_security_config.auth_mode; >+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY, >+ "1025 Received security config local_wwpn:" >+ "%llX remote_wwpn:%llX \nmode:0x%x " >+ "hash(%d):%x:%x:%x:%x bidir:0x%x " >+ "dh_group(%d):%x:%x:%x:%x:%x:%x:%x:%x " >+ "reauth_interval:0x%x\n", >+ (unsigned long long)auth_rsp->local_wwpn, >+ (unsigned long long)auth_rsp->remote_wwpn, >+ auth_rsp->u.dhchap_security_config.auth_mode, >+ auth_rsp->u.dhchap_security_config.hash_len, >+ auth_rsp->u.dhchap_security_config.hash_priority[0], >+ auth_rsp->u.dhchap_security_config.hash_priority[1], >+ auth_rsp->u.dhchap_security_config.hash_priority[2], >+ auth_rsp->u.dhchap_security_config.hash_priority[3], >+ auth_rsp->u.dhchap_security_config.bidirectional, >+ auth_rsp->u.dhchap_security_config.dh_group_len, >+ auth_rsp->u.dhchap_security_config.dh_group_priority[0], >+ auth_rsp->u.dhchap_security_config.dh_group_priority[1], >+ auth_rsp->u.dhchap_security_config.dh_group_priority[2], >+ auth_rsp->u.dhchap_security_config.dh_group_priority[3], >+ auth_rsp->u.dhchap_security_config.dh_group_priority[4], >+ auth_rsp->u.dhchap_security_config.dh_group_priority[5], >+ auth_rsp->u.dhchap_security_config.dh_group_priority[6], >+ auth_rsp->u.dhchap_security_config.dh_group_priority[7], >+ auth_rsp->u.dhchap_security_config.reauth_interval); >+ kfree(auth_rsp); >+ if (vport->auth.auth_mode == FC_AUTHMODE_ACTIVE) >+ vport->auth.security_active = 1; >+ else if (vport->auth.auth_mode == FC_AUTHMODE_PASSIVE) { >+ if (ndlp->nlp_flag & NLP_SC_REQ) >+ vport->auth.security_active = 1; >+ else { >+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY, >+ "1038 Authentication not " >+ "required by the fabric. " >+ "Disabled.\n"); >+ vport->auth.security_active = 0; >+ } >+ } else { >+ vport->auth.security_active = 0; >+ /* >+ * If switch require authentication and authentication >+ * is disabled for this HBA/Fabric port, fail the >+ * discovery. >+ */ >+ if (ndlp->nlp_flag & NLP_SC_REQ) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1050 Authentication mode is " >+ "disabled, but is required by " >+ "the fabric.\n"); >+ lpfc_vport_set_state(vport, FC_VPORT_FAILED); >+ /* Cancel discovery timer */ >+ lpfc_can_disctmo(vport); >+ } >+ } >+ if (last_auth_mode == FC_AUTHMODE_UNKNOWN) { >+ if (vport->auth.security_active) >+ lpfc_start_authentication(vport, ndlp); >+ else >+ lpfc_start_discovery(vport); >+ } >+} >+ >+int >+lpfc_get_security_enabled(struct Scsi_Host *shost) >+{ >+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; >+ >+ return(vport->cfg_enable_auth); >+} >+ >+int >+lpfc_security_wait(struct lpfc_hba *phba) >+{ >+ int i = 0; >+ if (lpfc_security_service_state == SECURITY_ONLINE) >+ return 0; >+ lpfc_printf_log(phba, KERN_WARNING, LOG_SECURITY, >+ "1058 Waiting for authentication service...\n"); >+ while (lpfc_security_service_state == SECURITY_OFFLINE) { >+ i++; >+ if (i > SECURITY_WAIT_TMO * 2) >+ return -ETIMEDOUT; >+ /* Delay for half of a second */ >+ msleep(500); >+ } >+ lpfc_printf_log(phba, KERN_WARNING, LOG_SECURITY, >+ "1059 Authentication service online.\n"); >+ return 0; >+} >+ >+int >+lpfc_security_config_wait(struct lpfc_vport *vport) >+{ >+ int i = 0; >+ >+ while (vport->auth.auth_mode == FC_AUTHMODE_UNKNOWN) { >+ i++; >+ if (i > 120) { >+ return -ETIMEDOUT; >+ } >+ /* Delay for half of a second */ >+ msleep(500); >+ } >+ return 0; >+} >+ >+void >+lpfc_reauth_node(unsigned long ptr) >+{ >+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr; >+ struct lpfc_vport *vport = ndlp->vport; >+ struct lpfc_hba *phba = vport->phba; >+ unsigned long flags; >+ struct lpfc_work_evt *evtp = &ndlp->els_reauth_evt; >+ >+ ndlp = (struct lpfc_nodelist *) ptr; >+ phba = ndlp->vport->phba; >+ >+ spin_lock_irqsave(&phba->hbalock, flags); >+ if (!list_empty(&evtp->evt_listp)) { >+ spin_unlock_irqrestore(&phba->hbalock, flags); >+ return; >+ } >+ >+ /* We need to hold the node resource by incrementing the reference >+ * count until this queued work is done >+ */ >+ evtp->evt_arg1 = lpfc_nlp_get(ndlp); >+ if (evtp->evt_arg1) { >+ evtp->evt = LPFC_EVT_REAUTH; >+ list_add_tail(&evtp->evt_listp, &phba->work_list); >+ lpfc_worker_wake_up(phba); >+ } >+ spin_unlock_irqrestore(&phba->hbalock, flags); >+ return; >+} >+ >+void >+lpfc_reauthentication_handler(struct lpfc_nodelist *ndlp) >+{ >+ struct lpfc_vport *vport = ndlp->vport; >+ if (vport->auth.auth_msg_state != LPFC_DHCHAP_SUCCESS) >+ return; >+ >+ if (lpfc_start_node_authentication(ndlp)) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1029 Reauthentication Failure\n"); >+ if (vport->auth.auth_state == LPFC_AUTH_SUCCESS) >+ lpfc_port_auth_failed(ndlp); >+ } >+} >+ >+/* >+ * This function will kick start authentication for a node. >+ * This is used for re-authentication of a node or a user >+ * initiated node authentication. >+ */ >+int >+lpfc_start_node_authentication(struct lpfc_nodelist *ndlp) >+{ >+ struct lpfc_vport *vport; >+ int ret; >+ >+ vport = ndlp->vport; >+ /* If there is authentication timer cancel the timer */ >+ del_timer_sync(&ndlp->nlp_reauth_tmr); >+ ret = lpfc_get_auth_config(ndlp, &ndlp->nlp_portname); >+ if (ret) >+ return ret; >+ ret = lpfc_security_config_wait(vport); >+ if (ret) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1032 Start Authentication: get config " >+ "timed out.\n"); >+ return ret; >+ } >+ return 0; >+} >+ >+int >+lpfc_get_auth_config(struct lpfc_nodelist *ndlp, struct lpfc_name *rwwn) >+{ >+ struct lpfc_vport *vport; >+ struct fc_auth_req auth_req; >+ struct fc_auth_rsp *auth_rsp; >+ struct Scsi_Host *shost; >+ int ret; >+ >+ vport = ndlp->vport; >+ shost = lpfc_shost_from_vport(vport); >+ >+ auth_req.local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn); >+ if (ndlp->nlp_type & NLP_FABRIC) >+ auth_req.remote_wwpn = AUTH_FABRIC_WWN; >+ else >+ auth_req.remote_wwpn = wwn_to_u64(rwwn->u.wwn); >+ if (lpfc_security_service_state == SECURITY_OFFLINE) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1053 Start Authentication: " >+ "Security service offline.\n"); >+ return -EINVAL; >+ } >+ auth_rsp = kmalloc(sizeof(struct fc_auth_rsp), GFP_KERNEL); >+ if (!auth_rsp) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1028 Start Authentication: No buffers\n"); >+ return -ENOMEM; >+ } >+ vport->auth.auth_mode = FC_AUTHMODE_UNKNOWN; >+ ret = lpfc_fc_security_get_config(shost, &auth_req, >+ sizeof(struct fc_auth_req), >+ auth_rsp, >+ sizeof(struct fc_auth_rsp)); >+ if (ret) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY, >+ "1031 Start Authentication: Get config " >+ "failed.\n"); >+ kfree(auth_rsp); >+ return ret; >+ } >+ return 0; >+} >diff -urpN a/drivers/scsi/lpfc/lpfc_security.h b/drivers/scsi/lpfc/lpfc_security.h >--- a/drivers/scsi/lpfc/lpfc_security.h 1969-12-31 19:00:00.000000000 -0500 >+++ b/drivers/scsi/lpfc/lpfc_security.h 2008-09-05 17:47:49.766879000 -0400 >@@ -0,0 +1,24 @@ >+/******************************************************************* >+ * This file is part of the Emulex Linux Device Driver for * >+ * Fibre Channel Host Bus Adapters. * >+ * Copyright (C) 2006-2007 Emulex. All rights reserved. * >+ * EMULEX and SLI are trademarks of Emulex. * >+ * www.emulex.com * >+ * * >+ * This program is free software; you can redistribute it and/or * >+ * modify it under the terms of version 2 of the GNU General * >+ * Public License as published by the Free Software Foundation. * >+ * This program is distributed in the hope that it will be useful. * >+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * >+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * >+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * >+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * >+ * TO BE LEGALLY INVALID. See the GNU General Public License for * >+ * more details, a copy of which can be found in the file COPYING * >+ * included with this package. * >+ *******************************************************************/ >+ >+#define SECURITY_OFFLINE 0x0 >+#define SECURITY_ONLINE 0x1 >+ >+#define SECURITY_WAIT_TMO 30 /* seconds to wait for the auth service */ >diff -urpN a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c >--- a/drivers/scsi/lpfc/lpfc_sli.c 2008-09-05 17:47:41.739249000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_sli.c 2008-09-05 17:47:49.809876000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * Portions Copyright (C) 2004-2005 Christoph Hellwig * >@@ -199,11 +199,29 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd > case CMD_RCV_ELS_REQ_CX: > case CMD_RCV_SEQUENCE64_CX: > case CMD_RCV_ELS_REQ64_CX: >+ case CMD_ASYNC_STATUS: > case CMD_IOCB_RCV_SEQ64_CX: > case CMD_IOCB_RCV_ELS64_CX: > case CMD_IOCB_RCV_CONT64_CX: >+ case CMD_IOCB_RET_XRI64_CX: > type = LPFC_UNSOL_IOCB; > break; >+ case CMD_IOCB_XMIT_MSEQ64_CR: >+ case CMD_IOCB_XMIT_MSEQ64_CX: >+ case CMD_IOCB_RCV_SEQ_LIST64_CX: >+ case CMD_IOCB_RCV_ELS_LIST64_CX: >+ case CMD_IOCB_CLOSE_EXTENDED_CN: >+ case CMD_IOCB_ABORT_EXTENDED_CN: >+ case CMD_IOCB_RET_HBQE64_CN: >+ case CMD_IOCB_FCP_IBIDIR64_CR: >+ case CMD_IOCB_FCP_IBIDIR64_CX: >+ case CMD_IOCB_FCP_ITASKMGT64_CX: >+ case CMD_IOCB_LOGENTRY_CN: >+ case CMD_IOCB_LOGENTRY_ASYNC_CN: >+ printk("%s - Unhandled SLI-3 Command x%x\n", >+ __FUNCTION__, iocb_cmnd); >+ type = LPFC_UNKNOWN_IOCB; >+ break; > default: > type = LPFC_UNKNOWN_IOCB; > break; >@@ -306,9 +324,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba > phba->work_ha |= HA_ERATT; > phba->work_hs = HS_FFER3; > >- /* hbalock should already be held */ >- if (phba->work_wait) >- lpfc_worker_wake_up(phba); >+ lpfc_worker_wake_up(phba); > > return NULL; > } >@@ -473,8 +489,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba *ph > if (pring->txq_cnt && > lpfc_is_link_up(phba) && > (pring->ringno != phba->sli.fcp_ring || >- phba->sli.sli_flag & LPFC_PROCESS_LA) && >- !(pring->flag & LPFC_STOP_IOCB_MBX)) { >+ phba->sli.sli_flag & LPFC_PROCESS_LA)) { > > while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && > (nextiocb = lpfc_sli_ringtx_get(phba, pring))) >@@ -489,31 +504,6 @@ lpfc_sli_resume_iocb(struct lpfc_hba *ph > return; > } > >-/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */ >-static void >-lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno) >-{ >- struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? >- &phba->slim2p->mbx.us.s3_pgp.port[ringno] : >- &phba->slim2p->mbx.us.s2.port[ringno]; >- unsigned long iflags; >- >- /* If the ring is active, flag it */ >- spin_lock_irqsave(&phba->hbalock, iflags); >- if (phba->sli.ring[ringno].cmdringaddr) { >- if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) { >- phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX; >- /* >- * Force update of the local copy of cmdGetInx >- */ >- phba->sli.ring[ringno].local_getidx >- = le32_to_cpu(pgp->cmdGetInx); >- lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]); >- } >- } >- spin_unlock_irqrestore(&phba->hbalock, iflags); >-} >- > struct lpfc_hbq_entry * > lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) > { >@@ -554,10 +544,13 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba > { > struct lpfc_dmabuf *dmabuf, *next_dmabuf; > struct hbq_dmabuf *hbq_buf; >+ unsigned long flags; > int i, hbq_count; >+ uint32_t hbqno; > > hbq_count = lpfc_sli_hbq_count(); > /* Return all memory used by all HBQs */ >+ spin_lock_irqsave(&phba->hbalock, flags); > for (i = 0; i < hbq_count; ++i) { > list_for_each_entry_safe(dmabuf, next_dmabuf, > &phba->hbqs[i].hbq_buffer_list, list) { >@@ -565,7 +558,30 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba > list_del(&hbq_buf->dbuf.list); > (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); > } >+ phba->hbqs[i].buffer_count = 0; > } >+ /* Return all HBQ buffer that are in-fly */ >+ list_for_each_entry_safe(dmabuf, next_dmabuf, >+ &phba->hbqbuf_in_list, list) { >+ hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); >+ list_del(&hbq_buf->dbuf.list); >+ if (hbq_buf->tag == -1) { >+ (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) >+ (phba, hbq_buf); >+ } else { >+ hbqno = hbq_buf->tag >> 16; >+ if (hbqno >= LPFC_MAX_HBQS) >+ (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) >+ (phba, hbq_buf); >+ else >+ (phba->hbqs[hbqno].hbq_free_buffer)(phba, >+ hbq_buf); >+ } >+ } >+ >+ /* Mark the HBQs not in use */ >+ phba->hbq_in_use = 0; >+ spin_unlock_irqrestore(&phba->hbalock, flags); > } > > static struct lpfc_hbq_entry * >@@ -627,29 +643,41 @@ static int > lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) > { > uint32_t i, start, end; >+ unsigned long flags; > struct hbq_dmabuf *hbq_buffer; > > if (!phba->hbqs[hbqno].hbq_alloc_buffer) { > return 0; > } > >- start = lpfc_hbq_defs[hbqno]->buffer_count; >- end = count + lpfc_hbq_defs[hbqno]->buffer_count; >+ start = phba->hbqs[hbqno].buffer_count; >+ end = count + start; > if (end > lpfc_hbq_defs[hbqno]->entry_count) { > end = lpfc_hbq_defs[hbqno]->entry_count; > } > >+ /* Check whether HBQ is still in use */ >+ spin_lock_irqsave(&phba->hbalock, flags); >+ if (!phba->hbq_in_use) { >+ spin_unlock_irqrestore(&phba->hbalock, flags); >+ return 0; >+ } >+ > /* Populate HBQ entries */ > for (i = start; i < end; i++) { > hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); >- if (!hbq_buffer) >+ if (!hbq_buffer) { >+ spin_unlock_irqrestore(&phba->hbalock, flags); > return 1; >+ } > hbq_buffer->tag = (i | (hbqno << 16)); > if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) >- lpfc_hbq_defs[hbqno]->buffer_count++; >+ phba->hbqs[hbqno].buffer_count++; > else > (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); > } >+ >+ spin_unlock_irqrestore(&phba->hbalock, flags); > return 0; > } > >@@ -686,7 +714,7 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *ph > } > lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, > "1803 Bad hbq tag. Data: x%x x%x\n", >- tag, lpfc_hbq_defs[tag >> 16]->buffer_count); >+ tag, phba->hbqs[tag >> 16].buffer_count); > return NULL; > } > >@@ -712,6 +740,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxComm > case MBX_LOAD_SM: > case MBX_READ_NV: > case MBX_WRITE_NV: >+ case MBX_WRITE_VPARMS: > case MBX_RUN_BIU_DIAG: > case MBX_INIT_LINK: > case MBX_DOWN_LINK: >@@ -739,7 +768,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxComm > case MBX_DEL_LD_ENTRY: > case MBX_RUN_PROGRAM: > case MBX_SET_MASK: >- case MBX_SET_SLIM: >+ case MBX_SET_VARIABLE: > case MBX_UNREG_D_ID: > case MBX_KILL_BOARD: > case MBX_CONFIG_FARP: >@@ -751,12 +780,16 @@ lpfc_sli_chk_mbx_command(uint8_t mbxComm > case MBX_READ_RPI64: > case MBX_REG_LOGIN64: > case MBX_READ_LA64: >- case MBX_FLASH_WR_ULA: >+ case MBX_WRITE_WWN: > case MBX_SET_DEBUG: > case MBX_LOAD_EXP_ROM: >+ case MBX_ASYNCEVT_ENABLE: > case MBX_REG_VPI: > case MBX_UNREG_VPI: > case MBX_HEARTBEAT: >+ case MBX_READ_EVENT_LOG_STATUS: >+ case MBX_READ_EVENT_LOG: >+ case MBX_WRITE_EVENT_LOG: > ret = mbxCommand; > break; > default: >@@ -932,16 +965,29 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba > uint32_t hbqno; > void *virt; /* virtual address ptr */ > dma_addr_t phys; /* mapped address */ >+ unsigned long flags; >+ >+ /* Check whether HBQ is still in use */ >+ spin_lock_irqsave(&phba->hbalock, flags); >+ if (!phba->hbq_in_use) { >+ spin_unlock_irqrestore(&phba->hbalock, flags); >+ return NULL; >+ } > > hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); >- if (hbq_entry == NULL) >+ if (hbq_entry == NULL) { >+ spin_unlock_irqrestore(&phba->hbalock, flags); > return NULL; >+ } > list_del(&hbq_entry->dbuf.list); > > hbqno = tag >> 16; > new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); >- if (new_hbq_entry == NULL) >+ if (new_hbq_entry == NULL) { >+ list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list); >+ spin_unlock_irqrestore(&phba->hbalock, flags); > return &hbq_entry->dbuf; >+ } > new_hbq_entry->tag = -1; > phys = new_hbq_entry->dbuf.phys; > virt = new_hbq_entry->dbuf.virt; >@@ -950,9 +996,23 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba > hbq_entry->dbuf.phys = phys; > hbq_entry->dbuf.virt = virt; > lpfc_sli_free_hbq(phba, hbq_entry); >+ list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list); >+ spin_unlock_irqrestore(&phba->hbalock, flags); >+ > return &new_hbq_entry->dbuf; > } > >+static struct lpfc_dmabuf * >+lpfc_sli_get_buff(struct lpfc_hba *phba, >+ struct lpfc_sli_ring *pring, >+ uint32_t tag) >+{ >+ if (tag & QUE_BUFTAG_BIT) >+ return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); >+ else >+ return lpfc_sli_replace_hbqbuff(phba, tag); >+} >+ > static int > lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, > struct lpfc_iocbq *saveq) >@@ -961,19 +1021,136 @@ lpfc_sli_process_unsol_iocb(struct lpfc_ > WORD5 * w5p; > uint32_t Rctl, Type; > uint32_t match, i; >+ struct lpfc_iocbq *iocbq; >+ struct lpfc_dmabuf *dmzbuf; > > match = 0; > irsp = &(saveq->iocb); >- if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) >- || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) >- || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX) >- || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) { >+ >+ if (irsp->ulpStatus == IOSTAT_NEED_BUFFER) >+ return 1; >+ if (irsp->ulpCommand == CMD_ASYNC_STATUS) { >+ if (pring->lpfc_sli_rcv_async_status) >+ pring->lpfc_sli_rcv_async_status(phba, pring, saveq); >+ else >+ lpfc_printf_log(phba, >+ KERN_WARNING, >+ LOG_SLI, >+ "0316 Ring %d handler: unexpected " >+ "ASYNC_STATUS iocb received evt_code " >+ "0x%x\n", >+ pring->ringno, >+ irsp->un.asyncstat.evt_code); >+ return 1; >+ } >+ >+ if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && >+ (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { >+ if (irsp->ulpBdeCount > 0) { >+ dmzbuf = lpfc_sli_get_buff(phba, pring, >+ irsp->un.ulpWord[3]); >+ lpfc_in_buf_free(phba, dmzbuf); >+ } >+ >+ if (irsp->ulpBdeCount > 1) { >+ dmzbuf = lpfc_sli_get_buff(phba, pring, >+ irsp->unsli3.sli3Words[3]); >+ lpfc_in_buf_free(phba, dmzbuf); >+ } >+ >+ if (irsp->ulpBdeCount > 2) { >+ dmzbuf = lpfc_sli_get_buff(phba, pring, >+ irsp->unsli3.sli3Words[7]); >+ lpfc_in_buf_free(phba, dmzbuf); >+ } >+ >+ return 1; >+ } >+ >+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { >+ if (irsp->ulpBdeCount != 0) { >+ saveq->context2 = lpfc_sli_get_buff(phba, pring, >+ irsp->un.ulpWord[3]); >+ if (!saveq->context2) >+ lpfc_printf_log(phba, >+ KERN_ERR, >+ LOG_SLI, >+ "0341 Ring %d Cannot find buffer for " >+ "an unsolicited iocb. tag 0x%x\n", >+ pring->ringno, >+ irsp->un.ulpWord[3]); >+ } >+ if (irsp->ulpBdeCount == 2) { >+ saveq->context3 = lpfc_sli_get_buff(phba, pring, >+ irsp->unsli3.sli3Words[7]); >+ if (!saveq->context3) >+ lpfc_printf_log(phba, >+ KERN_ERR, >+ LOG_SLI, >+ "0342 Ring %d Cannot find buffer for an" >+ " unsolicited iocb. tag 0x%x\n", >+ pring->ringno, >+ irsp->unsli3.sli3Words[7]); >+ } >+ list_for_each_entry(iocbq, &saveq->list, list) { >+ irsp = &(iocbq->iocb); >+ if (irsp->ulpBdeCount != 0) { >+ iocbq->context2 = lpfc_sli_get_buff(phba, pring, >+ irsp->un.ulpWord[3]); >+ if (!iocbq->context2) >+ lpfc_printf_log(phba, >+ KERN_ERR, >+ LOG_SLI, >+ "0343 Ring %d Cannot find " >+ "buffer for an unsolicited iocb" >+ ". tag 0x%x\n", pring->ringno, >+ irsp->un.ulpWord[3]); >+ } >+ if (irsp->ulpBdeCount == 2) { >+ iocbq->context3 = lpfc_sli_get_buff(phba, pring, >+ irsp->unsli3.sli3Words[7]); >+ if (!iocbq->context3) >+ lpfc_printf_log(phba, >+ KERN_ERR, >+ LOG_SLI, >+ "0344 Ring %d Cannot find " >+ "buffer for an unsolicited " >+ "iocb. tag 0x%x\n", >+ pring->ringno, >+ irsp->unsli3.sli3Words[7]); >+ } >+ } >+ } >+ if (irsp->ulpBdeCount != 0 && >+ (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || >+ irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { >+ int found = 0; >+ >+ /* search continue save q for same XRI */ >+ list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { >+ if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) { >+ list_add_tail(&saveq->list, &iocbq->list); >+ found = 1; >+ break; >+ } >+ } >+ if (!found) >+ list_add_tail(&saveq->clist, >+ &pring->iocb_continue_saveq); >+ if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { >+ list_del_init(&iocbq->clist); >+ saveq = iocbq; >+ irsp = &(saveq->iocb); >+ } else >+ return 0; >+ } >+ if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || >+ (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || >+ (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { > Rctl = FC_ELS_REQ; > Type = FC_ELS_DATA; > } else { >- w5p = >- (WORD5 *) & (saveq->iocb.un. >- ulpWord[5]); >+ w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); > Rctl = w5p->hcsw.Rctl; > Type = w5p->hcsw.Type; > >@@ -988,15 +1165,6 @@ lpfc_sli_process_unsol_iocb(struct lpfc_ > } > } > >- if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { >- if (irsp->ulpBdeCount != 0) >- saveq->context2 = lpfc_sli_replace_hbqbuff(phba, >- irsp->un.ulpWord[3]); >- if (irsp->ulpBdeCount == 2) >- saveq->context3 = lpfc_sli_replace_hbqbuff(phba, >- irsp->unsli3.sli3Words[7]); >- } >- > /* unSolicited Responses */ > if (pring->prt[0].profile) { > if (pring->prt[0].lpfc_sli_rcv_unsol_event) >@@ -1006,12 +1174,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_ > } else { > /* We must search, based on rctl / type > for the right routine */ >- for (i = 0; i < pring->num_mask; >- i++) { >- if ((pring->prt[i].rctl == >- Rctl) >- && (pring->prt[i]. >- type == Type)) { >+ for (i = 0; i < pring->num_mask; i++) { >+ if ((pring->prt[i].rctl == Rctl) >+ && (pring->prt[i].type == Type)) { > if (pring->prt[i].lpfc_sli_rcv_unsol_event) > (pring->prt[i].lpfc_sli_rcv_unsol_event) > (phba, pring, saveq); >@@ -1084,6 +1249,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hb > IOSTAT_LOCAL_REJECT; > saveq->iocb.un.ulpWord[4] = > IOERR_SLI_ABORTED; >+ >+ /* Firmware could still be in progress >+ * of DMAing payload, so don't free data >+ * buffer till after a hbeat. >+ */ >+ saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; > } > } > (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); >@@ -1141,9 +1312,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_ > phba->work_ha |= HA_ERATT; > phba->work_hs = HS_FFER3; > >- /* hbalock should already be held */ >- if (phba->work_wait) >- lpfc_worker_wake_up(phba); >+ lpfc_worker_wake_up(phba); > > return; > } >@@ -1243,8 +1412,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_ > memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); > memcpy(&adaptermsg[0], (uint8_t *) irsp, > MAX_MSG_DATA); >- dev_warn(&((phba->pcidev)->dev), >- "lpfc%d: %s\n", >+ dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s", > phba->brd_no, adaptermsg); > } else { > /* Unknown IOCB command */ >@@ -1431,8 +1599,7 @@ lpfc_sli_handle_fast_ring_event(struct l > memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); > memcpy(&adaptermsg[0], (uint8_t *) irsp, > MAX_MSG_DATA); >- dev_warn(&((phba->pcidev)->dev), >- "lpfc%d: %s\n", >+ dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s", > phba->brd_no, adaptermsg); > } else { > /* Unknown IOCB command */ >@@ -1572,12 +1739,7 @@ lpfc_sli_handle_slow_ring_event(struct l > > writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); > >- if (list_empty(&(pring->iocb_continueq))) { >- list_add(&rspiocbp->list, &(pring->iocb_continueq)); >- } else { >- list_add_tail(&rspiocbp->list, >- &(pring->iocb_continueq)); >- } >+ list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); > > pring->iocb_continueq_cnt++; > if (irsp->ulpLe) { >@@ -1642,17 +1804,17 @@ lpfc_sli_handle_slow_ring_event(struct l > iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; > type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); > if (type == LPFC_SOL_IOCB) { >- spin_unlock_irqrestore(&phba->hbalock, >- iflag); >+ spin_unlock_irqrestore(&phba->hbalock, iflag); > rc = lpfc_sli_process_sol_iocb(phba, pring, > saveq); > spin_lock_irqsave(&phba->hbalock, iflag); > } else if (type == LPFC_UNSOL_IOCB) { >- spin_unlock_irqrestore(&phba->hbalock, >- iflag); >+ spin_unlock_irqrestore(&phba->hbalock, iflag); > rc = lpfc_sli_process_unsol_iocb(phba, pring, > saveq); > spin_lock_irqsave(&phba->hbalock, iflag); >+ if (!rc) >+ free_saveq = 0; > } else if (type == LPFC_ABORT_IOCB) { > if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && > ((cmdiocbp = >@@ -1683,7 +1845,7 @@ lpfc_sli_handle_slow_ring_event(struct l > memcpy(&adaptermsg[0], (uint8_t *) irsp, > MAX_MSG_DATA); > dev_warn(&((phba->pcidev)->dev), >- "lpfc%d: %s\n", >+ "lpfc%d: %s", > phba->brd_no, adaptermsg); > } else { > /* Unknown IOCB command */ >@@ -2033,6 +2195,72 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) > } > > int >+lpfc_sli_set_dma_length(struct lpfc_hba * phba, uint32_t polling) >+{ >+ uint32_t dma_length; >+ LPFC_MBOXQ_t *mbox; >+ int ret = 0; >+ >+ switch (phba->cfg_pci_max_read) { >+ case 512: >+ dma_length = SLIM_VAL_MAX_DMA_512; >+ break; >+ case 1024: >+ dma_length = SLIM_VAL_MAX_DMA_1024; >+ break; >+ case 2048: >+ dma_length = SLIM_VAL_MAX_DMA_2048; >+ break; >+ case 4096: >+ dma_length = SLIM_VAL_MAX_DMA_4096; >+ break; >+ default: >+ return -EINVAL; >+ } >+ >+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); >+ if (!mbox) >+ goto failed; >+ >+ lpfc_set_var(phba, mbox, SLIM_VAR_MAX_DMA_LENGTH, dma_length); >+ >+ if (polling) >+ ret = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); >+ else >+ ret = lpfc_sli_issue_mbox_wait(phba, mbox, >+ LPFC_MBOX_TMO * 2); >+ >+ if (ret != MBX_SUCCESS) { >+ if (mbox->mb.mbxStatus != MBXERR_UNKNOWN_CMD) >+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, >+ "%d:0443 Adapter failed to set maximum" >+ " DMA length mbxStatus x%x \n", >+ phba->brd_no, mbox->mb.mbxStatus); >+ else >+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, >+ "%d:0447 Adapter failed to set maximum" >+ " DMA length mbxStatus x%x \n", >+ phba->brd_no, mbox->mb.mbxStatus); >+ goto failed; >+ } >+ >+ mempool_free( mbox, phba->mbox_mem_pool); >+ return 0; >+ >+failed: >+ /* If mailbox command failed, reset the value to default value */ >+ phba->cfg_pci_max_read = 2048; >+ if (ret == MBX_TIMEOUT) { >+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; >+ return -EPERM; >+ } else if (mbox) { >+ mempool_free( mbox, phba->mbox_mem_pool); >+ return -EPERM; >+ } else >+ return -ENOMEM; >+} >+ >+int > lpfc_sli_brdrestart(struct lpfc_hba *phba) > { > MAILBOX_t *mb; >@@ -2113,7 +2341,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *p > <status> */ > lpfc_printf_log(phba, KERN_ERR, LOG_INIT, > "0436 Adapter failed to init, " >- "timeout, status reg x%x\n", status); >+ "timeout, status reg x%x, " >+ "FW Data: A8 x%x AC x%x\n", status, >+ readl(phba->MBslimaddr + 0xa8), >+ readl(phba->MBslimaddr + 0xac)); > phba->link_state = LPFC_HBA_ERROR; > return -ETIMEDOUT; > } >@@ -2125,7 +2356,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *p > <status> */ > lpfc_printf_log(phba, KERN_ERR, LOG_INIT, > "0437 Adapter failed to init, " >- "chipset, status reg x%x\n", status); >+ "chipset, status reg x%x, " >+ "FW Data: A8 x%x AC x%x\n", status, >+ readl(phba->MBslimaddr + 0xa8), >+ readl(phba->MBslimaddr + 0xac)); > phba->link_state = LPFC_HBA_ERROR; > return -EIO; > } >@@ -2153,7 +2387,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *p > /* Adapter failed to init, chipset, status reg <status> */ > lpfc_printf_log(phba, KERN_ERR, LOG_INIT, > "0438 Adapter failed to init, chipset, " >- "status reg x%x\n", status); >+ "status reg x%x, " >+ "FW Data: A8 x%x AC x%x\n", status, >+ readl(phba->MBslimaddr + 0xa8), >+ readl(phba->MBslimaddr + 0xac)); > phba->link_state = LPFC_HBA_ERROR; > return -EIO; > } >@@ -2201,18 +2438,13 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba > uint32_t hbqno; > uint32_t hbq_entry_index; > >- /* Get a Mailbox buffer to setup mailbox >- * commands for HBA initialization >- */ > pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); >- > if (!pmb) > return -ENOMEM; >- > pmbox = &pmb->mb; >- > /* Initialize the struct lpfc_sli_hbq structure for each hbq */ > phba->link_state = LPFC_INIT_MBX_CMDS; >+ phba->hbq_in_use = 1; > > hbq_entry_index = 0; > for (hbqno = 0; hbqno < hbq_count; ++hbqno) { >@@ -2222,20 +2454,16 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba > phba->hbqs[hbqno].entry_count = > lpfc_hbq_defs[hbqno]->entry_count; > lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], >- hbq_entry_index, pmb); >+ hbq_entry_index, pmb); > hbq_entry_index += phba->hbqs[hbqno].entry_count; > > if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { >- /* Adapter failed to init, mbxCmd <cmd> CFG_RING, >- mbxStatus <status>, ring <num> */ >- > lpfc_printf_log(phba, KERN_ERR, > LOG_SLI | LOG_VPORT, > "1805 Adapter failed to init. " > "Data: x%x x%x x%x\n", > pmbox->mbxCommand, > pmbox->mbxStatus, hbqno); >- > phba->link_state = LPFC_HBA_ERROR; > mempool_free(pmb, phba->mbox_mem_pool); > return ENXIO; >@@ -2324,9 +2552,7 @@ lpfc_do_config_port(struct lpfc_hba *phb > if ((pmb->mb.un.varCfgPort.sli_mode == 3) && > (!pmb->mb.un.varCfgPort.cMA)) { > rc = -ENXIO; >- goto do_prep_failed; > } >- return rc; > > do_prep_failed: > mempool_free(pmb, phba->mbox_mem_pool); >@@ -2386,6 +2612,9 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba > lpfc_printf_log(phba, KERN_INFO, LOG_INIT, > "0444 Firmware in SLI %x mode. Max_vpi %d\n", > phba->sli_rev, phba->max_vpi); >+ >+ lpfc_sli_set_dma_length(phba,1); >+ > rc = lpfc_sli_ring_map(phba); > > if (rc) >@@ -2440,12 +2669,9 @@ lpfc_mbox_timeout(unsigned long ptr) > phba->pport->work_port_events |= WORKER_MBOX_TMO; > spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); > >- if (!tmo_posted) { >- spin_lock_irqsave(&phba->hbalock, iflag); >- if (phba->work_wait) >- lpfc_worker_wake_up(phba); >- spin_unlock_irqrestore(&phba->hbalock, iflag); >- } >+ if (!tmo_posted) >+ lpfc_worker_wake_up(phba); >+ return; > } > > void >@@ -2477,7 +2703,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hb > spin_unlock_irq(&phba->pport->work_port_lock); > spin_lock_irq(&phba->hbalock); > phba->link_state = LPFC_LINK_UNKNOWN; >- phba->pport->fc_flag |= FC_ESTABLISH_LINK; > psli->sli_flag &= ~LPFC_SLI2_ACTIVE; > spin_unlock_irq(&phba->hbalock); > >@@ -2485,16 +2710,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hb > lpfc_sli_abort_iocb_ring(phba, pring); > > lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, >- "0316 Resetting board due to mailbox timeout\n"); >+ "0345 Resetting board due to mailbox timeout\n"); > /* > * lpfc_offline calls lpfc_sli_hba_down which will clean up > * on oustanding mailbox commands. > */ >+ /* If resets are disabled then set error state and return. */ >+ if (!phba->cfg_enable_hba_reset) { >+ phba->link_state = LPFC_HBA_ERROR; >+ return; >+ } > lpfc_offline_prep(phba); > lpfc_offline(phba); > lpfc_sli_brdrestart(phba); >- if (lpfc_online(phba) == 0) /* Initialize the HBA */ >- mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); >+ lpfc_online(phba); > lpfc_unblock_mgmt_io(phba); > return; > } >@@ -2507,31 +2736,45 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb > uint32_t status, evtctr; > uint32_t ha_copy; > int i; >+ unsigned long timeout; > unsigned long drvr_flag = 0; > volatile uint32_t word0, ldata; > void __iomem *to_slim; >+ int processing_queue = 0; >+ >+ spin_lock_irqsave(&phba->hbalock, drvr_flag); >+ if (!pmbox) { >+ /* processing mbox queue from intr_handler */ >+ processing_queue = 1; >+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; >+ pmbox = lpfc_mbox_get(phba); >+ if (!pmbox) { >+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag); >+ return MBX_SUCCESS; >+ } >+ } > > if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && > pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { > if(!pmbox->vport) { >+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag); > lpfc_printf_log(phba, KERN_ERR, > LOG_MBOX | LOG_VPORT, > "1806 Mbox x%x failed. No vport\n", > pmbox->mb.mbxCommand); > dump_stack(); >- return MBXERR_ERROR; >+ goto out_not_finished; > } > } > >- > /* If the PCI channel is in offline state, do not post mbox. */ >- if (unlikely(pci_channel_offline(phba->pcidev))) >- return MBX_NOT_FINISHED; >+ if (unlikely(pci_channel_offline(phba->pcidev))) { >+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag); >+ goto out_not_finished; >+ } > >- spin_lock_irqsave(&phba->hbalock, drvr_flag); > psli = &phba->sli; > >- > mb = &pmbox->mb; > status = MBX_SUCCESS; > >@@ -2539,15 +2782,15 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb > spin_unlock_irqrestore(&phba->hbalock, drvr_flag); > > /* Mbox command <mbxCommand> cannot issue */ >- LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) >- return MBX_NOT_FINISHED; >+ LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); >+ goto out_not_finished; > } > > if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && > !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { > spin_unlock_irqrestore(&phba->hbalock, drvr_flag); >- LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) >- return MBX_NOT_FINISHED; >+ LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); >+ goto out_not_finished; > } > > if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { >@@ -2561,29 +2804,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb > > /* Mbox command <mbxCommand> cannot issue */ > LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); >- return MBX_NOT_FINISHED; >+ goto out_not_finished; > } > > if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { > spin_unlock_irqrestore(&phba->hbalock, drvr_flag); > /* Mbox command <mbxCommand> cannot issue */ > LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); >- return MBX_NOT_FINISHED; >- } >- >- /* Handle STOP IOCB processing flag. This is only meaningful >- * if we are not polling for mbox completion. >- */ >- if (flag & MBX_STOP_IOCB) { >- flag &= ~MBX_STOP_IOCB; >- /* Now flag each ring */ >- for (i = 0; i < psli->num_rings; i++) { >- /* If the ring is active, flag it */ >- if (psli->ring[i].cmdringaddr) { >- psli->ring[i].flag |= >- LPFC_STOP_IOCB_MBX; >- } >- } >+ goto out_not_finished; > } > > /* Another mailbox command is still being processed, queue this >@@ -2620,23 +2848,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb > return MBX_BUSY; > } > >- /* Handle STOP IOCB processing flag. This is only meaningful >- * if we are not polling for mbox completion. >- */ >- if (flag & MBX_STOP_IOCB) { >- flag &= ~MBX_STOP_IOCB; >- if (flag == MBX_NOWAIT) { >- /* Now flag each ring */ >- for (i = 0; i < psli->num_rings; i++) { >- /* If the ring is active, flag it */ >- if (psli->ring[i].cmdringaddr) { >- psli->ring[i].flag |= >- LPFC_STOP_IOCB_MBX; >- } >- } >- } >- } >- > psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; > > /* If we are not polling, we MUST be in SLI2 mode */ >@@ -2647,7 +2858,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb > spin_unlock_irqrestore(&phba->hbalock, drvr_flag); > /* Mbox command <mbxCommand> cannot issue */ > LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); >- return MBX_NOT_FINISHED; >+ goto out_not_finished; > } > /* timeout active mbox command */ > mod_timer(&psli->mbox_tmo, (jiffies + >@@ -2686,9 +2897,35 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb > mb->mbxOwner = OWN_CHIP; > > if (psli->sli_flag & LPFC_SLI2_ACTIVE) { >- /* First copy command data to host SLIM area */ >+ /* Populate mbox extension offset word. */ >+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { >+ *(((uint32_t *)mb) + pmbox->mbox_offset_word) >+ = (uint8_t *)&(phba->slim2p->mbx_ext_words[0]) >+ - (uint8_t *)&(phba->slim2p->mbx); >+ } >+ >+ /* Copy the mailbox extension data */ >+ if (pmbox->in_ext_byte_len && pmbox->context2) { >+ lpfc_sli_pcimem_bcopy(pmbox->context2, >+ (uint8_t*)&phba->slim2p->mbx_ext_words[0], >+ pmbox->in_ext_byte_len); >+ } >+ /* Copy command data to host SLIM area */ > lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE); >+ > } else { >+ /* Populate mbox extension offset word. */ >+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) >+ *(((uint32_t *)mb) + pmbox->mbox_offset_word) >+ = MAILBOX_HBA_EXT_OFFSET; >+ >+ /* Copy the mailbox extension data */ >+ if (pmbox->in_ext_byte_len && pmbox->context2) { >+ lpfc_memcpy_to_slim(phba->MBslimaddr + >+ MAILBOX_HBA_EXT_OFFSET, >+ pmbox->context2, pmbox->in_ext_byte_len); >+ >+ } > if (mb->mbxCommand == MBX_CONFIG_PORT) { > /* copy command data into host mbox for cmpl */ > lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, >@@ -2714,18 +2951,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb > } > > wmb(); >- /* interrupt board to doit right away */ >- writel(CA_MBATT, phba->CAregaddr); >- readl(phba->CAregaddr); /* flush */ > > switch (flag) { > case MBX_NOWAIT: >- /* Don't wait for it to finish, just return */ >+ /* Set up reference to mailbox command */ > psli->mbox_active = pmbox; >+ /* Interrupt board to do it */ >+ writel(CA_MBATT, phba->CAregaddr); >+ readl(phba->CAregaddr); /* flush */ >+ /* Don't wait for it to finish, just return */ > break; > > case MBX_POLL: >+ /* Set up null reference to mailbox command */ > psli->mbox_active = NULL; >+ /* Interrupt board to do it */ >+ writel(CA_MBATT, phba->CAregaddr); >+ readl(phba->CAregaddr); /* flush */ >+ > if (psli->sli_flag & LPFC_SLI2_ACTIVE) { > /* First read mbox status word */ > word0 = *((volatile uint32_t *)&phba->slim2p->mbx); >@@ -2737,19 +2980,19 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb > > /* Read the HBA Host Attention Register */ > ha_copy = readl(phba->HAregaddr); >- >- i = lpfc_mbox_tmo_val(phba, mb->mbxCommand); >- i *= 1000; /* Convert to ms */ >- >+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, >+ mb->mbxCommand) * >+ 1000) + jiffies; >+ i = 0; > /* Wait for command to complete */ > while (((word0 & OWN_CHIP) == OWN_CHIP) || > (!(ha_copy & HA_MBATT) && > (phba->link_state > LPFC_WARM_START))) { >- if (i-- <= 0) { >+ if (time_after(jiffies, timeout)) { > psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; > spin_unlock_irqrestore(&phba->hbalock, > drvr_flag); >- return MBX_NOT_FINISHED; >+ goto out_not_finished; > } > > /* Check if we took a mbox interrupt while we were >@@ -2758,12 +3001,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb > && (evtctr != psli->slistat.mbox_event)) > break; > >- spin_unlock_irqrestore(&phba->hbalock, >- drvr_flag); >- >- msleep(1); >- >- spin_lock_irqsave(&phba->hbalock, drvr_flag); >+ if (i++ > 10) { >+ spin_unlock_irqrestore(&phba->hbalock, >+ drvr_flag); >+ msleep(1); >+ spin_lock_irqsave(&phba->hbalock, drvr_flag); >+ } > > if (psli->sli_flag & LPFC_SLI2_ACTIVE) { > /* First copy command data */ >@@ -2795,15 +3038,23 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb > /* copy results back to user */ > lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb, > MAILBOX_CMD_SIZE); >+ /* Copy the mailbox extension data */ >+ if (pmbox->out_ext_byte_len && pmbox->context2) { >+ lpfc_sli_pcimem_bcopy( >+ &(phba->slim2p->mbx_ext_words[0]), >+ pmbox->context2, >+ pmbox->out_ext_byte_len); >+ } > } else { > /* First copy command data */ > lpfc_memcpy_from_slim(mb, phba->MBslimaddr, > MAILBOX_CMD_SIZE); >- if ((mb->mbxCommand == MBX_DUMP_MEMORY) && >- pmbox->context2) { >- lpfc_memcpy_from_slim((void *)pmbox->context2, >- phba->MBslimaddr + DMP_RSP_OFFSET, >- mb->un.varDmp.word_cnt); >+ /* Copy the mailbox extension data */ >+ if (pmbox->out_ext_byte_len && pmbox->context2) { >+ lpfc_memcpy_from_slim(pmbox->context2, >+ phba->MBslimaddr + >+ MAILBOX_HBA_EXT_OFFSET, >+ pmbox->out_ext_byte_len); > } > } > >@@ -2816,6 +3067,13 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb > > spin_unlock_irqrestore(&phba->hbalock, drvr_flag); > return status; >+ >+out_not_finished: >+ if (processing_queue) { >+ pmbox->mb.mbxStatus = MBX_NOT_FINISHED; >+ lpfc_mbox_cmpl_put(phba, pmbox); >+ } >+ return MBX_NOT_FINISHED; > } > > /* >@@ -2848,7 +3106,7 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba > /* > * Lockless version of lpfc_sli_issue_iocb. > */ >-int >+static int > __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, > struct lpfc_iocbq *piocb, uint32_t flag) > { >@@ -2866,7 +3124,6 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *p > return IOCB_ERROR; > } > >- > /* If the PCI channel is in offline state, do not post iocbs. */ > if (unlikely(pci_channel_offline(phba->pcidev))) > return IOCB_ERROR; >@@ -2879,9 +3136,9 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *p > > /* > * Check to see if we are blocking IOCB processing because of a >- * outstanding mbox command. >+ * outstanding event. > */ >- if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX)) >+ if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) > goto iocb_busy; > > if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { >@@ -2890,6 +3147,17 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *p > * can be issued if the link is not up. > */ > switch (piocb->iocb.ulpCommand) { >+ case CMD_GEN_REQUEST64_CR: >+ case CMD_GEN_REQUEST64_CX: >+ if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || >+ (piocb->iocb.un.genreq64.w5.hcsw.Rctl != >+ FC_FCP_CMND) || >+ (piocb->iocb.un.genreq64.w5.hcsw.Type != >+ MENLO_TRANSPORT_TYPE)) >+ >+ goto iocb_busy; >+ break; >+ > case CMD_QUE_RING_BUF_CN: > case CMD_QUE_RING_BUF64_CN: > /* >@@ -2993,6 +3261,61 @@ lpfc_extra_ring_setup( struct lpfc_hba * > return 0; > } > >+static void >+lpfc_sli_async_event_handler(struct lpfc_hba * phba, >+ struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) >+{ >+ IOCB_t *icmd; >+ uint16_t evt_code; >+ uint16_t temp; >+ struct temp_event temp_event_data; >+ struct Scsi_Host *shost; >+ >+ icmd = &iocbq->iocb; >+ evt_code = icmd->un.asyncstat.evt_code; >+ temp = icmd->ulpContext; >+ >+ if ((evt_code != ASYNC_TEMP_WARN) && >+ (evt_code != ASYNC_TEMP_SAFE)) { >+ lpfc_printf_log(phba, >+ KERN_ERR, >+ LOG_SLI, >+ "0346 Ring %d handler: unexpected ASYNC_STATUS" >+ " evt_code 0x%x\n", >+ pring->ringno, >+ icmd->un.asyncstat.evt_code); >+ return; >+ } >+ temp_event_data.data = (uint32_t)temp; >+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; >+ if (evt_code == ASYNC_TEMP_WARN) { >+ temp_event_data.event_code = LPFC_THRESHOLD_TEMP; >+ lpfc_printf_log(phba, >+ KERN_ERR, >+ LOG_TEMP, >+ "0347 Adapter is very hot, please take " >+ "corrective action. temperature : %d Celsius\n", >+ temp); >+ } >+ if (evt_code == ASYNC_TEMP_SAFE) { >+ temp_event_data.event_code = LPFC_NORMAL_TEMP; >+ lpfc_printf_log(phba, >+ KERN_ERR, >+ LOG_TEMP, >+ "0340 Adapter temperature is OK now. " >+ "temperature : %d Celsius\n", >+ temp); >+ } >+ >+ /* Send temperature change event to applications */ >+ shost = lpfc_shost_from_vport(phba->pport); >+ fc_host_post_vendor_event(shost, fc_get_event_number(), >+ sizeof(temp_event_data), (char *) &temp_event_data, >+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); >+ >+} >+ >+ > int > lpfc_sli_setup(struct lpfc_hba *phba) > { >@@ -3059,6 +3382,8 @@ lpfc_sli_setup(struct lpfc_hba *phba) > pring->fast_iotag = 0; > pring->iotag_ctr = 0; > pring->iotag_max = 4096; >+ pring->lpfc_sli_rcv_async_status = >+ lpfc_sli_async_event_handler; > pring->num_mask = 4; > pring->prt[0].profile = 0; /* Mask 0 */ > pring->prt[0].rctl = FC_ELS_REQ; >@@ -3123,6 +3448,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *ph > INIT_LIST_HEAD(&pring->txq); > INIT_LIST_HEAD(&pring->txcmplq); > INIT_LIST_HEAD(&pring->iocb_continueq); >+ INIT_LIST_HEAD(&pring->iocb_continue_saveq); > INIT_LIST_HEAD(&pring->postbufq); > } > spin_unlock_irq(&phba->hbalock); >@@ -3147,8 +3473,12 @@ lpfc_sli_host_down(struct lpfc_vport *vp > for (i = 0; i < psli->num_rings; i++) { > pring = &psli->ring[i]; > prev_pring_flag = pring->flag; >- if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ >+ /* Only slow rings */ >+ if (pring->ringno == LPFC_ELS_RING) { > pring->flag |= LPFC_DEFERRED_RING_EVENT; >+ /* Set the lpfc data pending flag */ >+ set_bit(LPFC_DATA_READY, &phba->data_flags); >+ } > /* > * Error everything on the txq since these iocbs have not been > * given to the FW yet. >@@ -3193,6 +3523,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) > LIST_HEAD(completions); > struct lpfc_sli *psli = &phba->sli; > struct lpfc_sli_ring *pring; >+ struct lpfc_dmabuf *buf_ptr; > LPFC_MBOXQ_t *pmb; > struct lpfc_iocbq *iocb; > IOCB_t *cmd = NULL; >@@ -3206,8 +3537,12 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) > spin_lock_irqsave(&phba->hbalock, flags); > for (i = 0; i < psli->num_rings; i++) { > pring = &psli->ring[i]; >- if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ >+ /* Only slow rings */ >+ if (pring->ringno == LPFC_ELS_RING) { > pring->flag |= LPFC_DEFERRED_RING_EVENT; >+ /* Set the lpfc data pending flag */ >+ set_bit(LPFC_DATA_READY, &phba->data_flags); >+ } > > /* > * Error everything on the txq since these iocbs have not been >@@ -3232,6 +3567,19 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) > } > } > >+ spin_lock_irqsave(&phba->hbalock, flags); >+ list_splice_init(&phba->elsbuf, &completions); >+ phba->elsbuf_cnt = 0; >+ phba->elsbuf_prev_cnt = 0; >+ spin_unlock_irqrestore(&phba->hbalock, flags); >+ >+ while (!list_empty(&completions)) { >+ list_remove_head(&completions, buf_ptr, >+ struct lpfc_dmabuf, list); >+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); >+ kfree(buf_ptr); >+ } >+ > /* Return any active mbox cmds */ > del_timer_sync(&psli->mbox_tmo); > spin_lock_irqsave(&phba->hbalock, flags); >@@ -3240,26 +3588,21 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) > phba->pport->work_port_events &= ~WORKER_MBOX_TMO; > spin_unlock(&phba->pport->work_port_lock); > >+ /* Return any pending or completed mbox cmds */ >+ list_splice_init(&phba->sli.mboxq, &completions); > if (psli->mbox_active) { > list_add_tail(&psli->mbox_active->list, &completions); > psli->mbox_active = NULL; > psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; > } >- >- /* Return any pending or completed mbox cmds */ >- list_splice_init(&phba->sli.mboxq, &completions); > list_splice_init(&phba->sli.mboxq_cmpl, &completions); >- INIT_LIST_HEAD(&psli->mboxq); >- INIT_LIST_HEAD(&psli->mboxq_cmpl); >- > spin_unlock_irqrestore(&phba->hbalock, flags); > > while (!list_empty(&completions)) { > list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); > pmb->mb.mbxStatus = MBX_NOT_FINISHED; >- if (pmb->mbox_cmpl) { >+ if (pmb->mbox_cmpl) > pmb->mbox_cmpl(phba,pmb); >- } > } > return 1; > } >@@ -3294,6 +3637,47 @@ lpfc_sli_ringpostbuf_put(struct lpfc_hba > return 0; > } > >+uint32_t >+lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) >+{ >+ spin_lock_irq(&phba->hbalock); >+ phba->buffer_tag_count++; >+ /* >+ * Always set the QUE_BUFTAG_BIT to distiguish between >+ * a tag assigned by HBQ. >+ */ >+ phba->buffer_tag_count |= QUE_BUFTAG_BIT; >+ spin_unlock_irq(&phba->hbalock); >+ return phba->buffer_tag_count; >+} >+ >+struct lpfc_dmabuf * >+lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, >+ uint32_t tag) >+{ >+ struct lpfc_dmabuf *mp, *next_mp; >+ struct list_head *slp = &pring->postbufq; >+ >+ /* Search postbufq, from the begining, looking for a match on tag */ >+ spin_lock_irq(&phba->hbalock); >+ list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { >+ if (mp->buffer_tag == tag) { >+ list_del_init(&mp->list); >+ pring->postbufq_cnt--; >+ spin_unlock_irq(&phba->hbalock); >+ return mp; >+ } >+ } >+ >+ spin_unlock_irq(&phba->hbalock); >+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, >+ "0402 Cannot find virtual addr for buffer tag on " >+ "ring %d Data x%lx x%p x%p x%x\n", >+ pring->ringno, (unsigned long) tag, >+ slp->next, slp->prev, pring->postbufq_cnt); >+ >+ return NULL; >+} > > struct lpfc_dmabuf * > lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, >@@ -3349,6 +3733,15 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba > irsp->ulpStatus, irsp->un.ulpWord[4]); > > /* >+ * If the iocb is not found in Firmware queue the iocb >+ * might have completed already. Do not free it again. >+ */ >+ if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { >+ spin_unlock_irq(&phba->hbalock); >+ lpfc_sli_release_iocbq(phba, cmdiocb); >+ return; >+ } >+ /* > * make sure we have the right iocbq before taking it > * off the txcmplq and try to call completion routine. > */ >@@ -3361,6 +3754,12 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba > pring->txcmplq_cnt--; > spin_unlock_irq(&phba->hbalock); > >+ /* Firmware could still be in progress of DMAing >+ * payload, so don't free data buffer till after >+ * a hbeat. >+ */ >+ abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; >+ > abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; > abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; > abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; >@@ -3380,7 +3779,7 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *ph > > /* ELS cmd tag <ulpIoTag> completes */ > lpfc_printf_log(phba, KERN_INFO, LOG_ELS, >- "0133 Ignoring ELS cmd tag x%x completion Data: " >+ "0139 Ignoring ELS cmd tag x%x completion Data: " > "x%x x%x x%x\n", > irsp->ulpIoTag, irsp->ulpStatus, > irsp->un.ulpWord[4], irsp->ulpTimeout); >@@ -3399,7 +3798,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_h > struct lpfc_iocbq *abtsiocbp; > IOCB_t *icmd = NULL; > IOCB_t *iabt = NULL; >- int retval = IOCB_ERROR; >+ int retval; > > /* > * There are certain command types we don't want to abort. And we >@@ -3421,19 +3820,22 @@ lpfc_sli_issue_abort_iotag(struct lpfc_h > cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; > else > cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; >- goto abort_iotag_exit; >+ return 0; > } > > /* issue ABTS for this IOCB based on iotag */ > abtsiocbp = __lpfc_sli_get_iocbq(phba); > if (abtsiocbp == NULL) >- return 0; >+ return IOCB_ERROR; > > /* This signals the response to set the correct status > * before calling the completion handler. > */ > cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; > >+ if (vport->load_flag & FC_UNLOADING) >+ cmdiocb->vport = NULL; >+ > iabt = &abtsiocbp->iocb; > iabt->un.acxri.abortType = ABORT_TYPE_ABTS; > iabt->un.acxri.abortContextTag = icmd->ulpContext; >@@ -3455,7 +3857,9 @@ lpfc_sli_issue_abort_iotag(struct lpfc_h > iabt->un.acxri.abortIoTag, abtsiocbp->iotag); > retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); > >-abort_iotag_exit: >+ if (retval) >+ __lpfc_sli_release_iocbq(phba, abtsiocbp); >+ > /* > * Caller to this routine should check for IOCB_ERROR > * and handle it properly. This routine no longer removes >@@ -3470,7 +3874,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_i > lpfc_ctx_cmd ctx_cmd) > { > struct lpfc_scsi_buf *lpfc_cmd; >- struct scsi_cmnd *cmnd; >+ struct scsi_lun fcp_lun; > int rc = 1; > > if (!(iocbq->iocb_flag & LPFC_IO_FCP)) >@@ -3480,19 +3884,22 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_i > return rc; > > lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); >- cmnd = lpfc_cmd->pCmd; > >- if (cmnd == NULL) >+ if (lpfc_cmd->pCmd == NULL) > return rc; > > switch (ctx_cmd) { > case LPFC_CTX_LUN: >- if ((cmnd->device->id == tgt_id) && >- (cmnd->device->lun == lun_id)) >+ int_to_scsilun(lun_id, &fcp_lun); >+ if ((lpfc_cmd->rdata->pnode) && >+ (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && >+ (!memcmp(&lpfc_cmd->fcp_cmnd->fcp_lun, &fcp_lun, >+ sizeof(fcp_lun)))) > rc = 0; > break; > case LPFC_CTX_TGT: >- if (cmnd->device->id == tgt_id) >+ if ((lpfc_cmd->rdata->pnode) && >+ (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) > rc = 0; > break; > case LPFC_CTX_HOST: >@@ -3670,7 +4077,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba > } > } else { > lpfc_printf_log(phba, KERN_INFO, LOG_SLI, >- ":0332 IOCB wait issue failed, Data x%x\n", >+ "0332 IOCB wait issue failed, Data x%x\n", > retval); > retval = IOCB_ERROR; > } >@@ -3699,9 +4106,10 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba > unsigned long flag; > > /* The caller must leave context1 empty. */ >- if (pmboxq->context1 != 0) >+ if (pmboxq->context1) > return MBX_NOT_FINISHED; > >+ pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; > /* setup wake call as IOCB callback */ > pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; > /* setup context field to pass wait_queue pointer to wake function */ >@@ -3771,7 +4179,6 @@ lpfc_intr_handler(int irq, void *dev_id) > uint32_t ha_copy; > uint32_t work_ha_copy; > unsigned long status; >- int i; > uint32_t control; > > MAILBOX_t *mbox, *pmbox; >@@ -3868,7 +4275,7 @@ lpfc_intr_handler(int irq, void *dev_id) > "pwork:x%x hawork:x%x wait:x%x", > phba->work_ha, work_ha_copy, > (uint32_t)((unsigned long) >- phba->work_wait)); >+ &phba->work_waitq)); > > control &= > ~(HC_R0INT_ENA << LPFC_ELS_RING); >@@ -3881,14 +4288,13 @@ lpfc_intr_handler(int irq, void *dev_id) > "x%x hawork:x%x wait:x%x", > phba->work_ha, work_ha_copy, > (uint32_t)((unsigned long) >- phba->work_wait)); >+ &phba->work_waitq)); > } > spin_unlock(&phba->hbalock); > } > } > > if (work_ha_copy & HA_ERATT) { >- phba->link_state = LPFC_HBA_ERROR; > /* > * There was a link/board error. Read the > * status register to retrieve the error event >@@ -3906,6 +4312,7 @@ lpfc_intr_handler(int irq, void *dev_id) > phba->pport->stopped = 1; > } > >+ spin_lock(&phba->hbalock); > if ((work_ha_copy & HA_MBATT) && > (phba->sli.mbox_active)) { > pmb = phba->sli.mbox_active; >@@ -3916,11 +4323,12 @@ lpfc_intr_handler(int irq, void *dev_id) > /* First check out the status word */ > lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); > if (pmbox->mbxOwner != OWN_HOST) { >+ spin_unlock(&phba->hbalock); > /* > * Stray Mailbox Interrupt, mbxCommand <cmd> > * mbxStatus <status> > */ >- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | >+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | > LOG_SLI, > "(%d):0304 Stray Mailbox " > "Interrupt mbxCommand x%x " >@@ -3928,81 +4336,95 @@ lpfc_intr_handler(int irq, void *dev_id) > (vport ? vport->vpi : 0), > pmbox->mbxCommand, > pmbox->mbxStatus); >- } >- phba->last_completion_time = jiffies; >- del_timer_sync(&phba->sli.mbox_tmo); >+ /* clear mailbox attention bit */ >+ work_ha_copy &= ~HA_MBATT; >+ } else { >+ phba->sli.mbox_active = NULL; >+ spin_unlock(&phba->hbalock); >+ phba->last_completion_time = jiffies; >+ del_timer(&phba->sli.mbox_tmo); >+ if (pmb->mbox_cmpl) { >+ lpfc_sli_pcimem_bcopy(mbox, pmbox, >+ MAILBOX_CMD_SIZE); >+ /* Copy the mailbox extension data */ >+ if (pmb->out_ext_byte_len && >+ pmb->context2) { >+ lpfc_sli_pcimem_bcopy( >+ &(phba->slim2p-> >+ mbx_ext_words[0]), >+ pmb->context2, >+ pmb->out_ext_byte_len); >+ } > >- phba->sli.mbox_active = NULL; >- if (pmb->mbox_cmpl) { >- lpfc_sli_pcimem_bcopy(mbox, pmbox, >- MAILBOX_CMD_SIZE); >- } >- if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { >- pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; >+ } >+ if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { >+ pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; > >- lpfc_debugfs_disc_trc(vport, >- LPFC_DISC_TRC_MBOX_VPORT, >- "MBOX dflt rpi: : status:x%x rpi:x%x", >- (uint32_t)pmbox->mbxStatus, >- pmbox->un.varWords[0], 0); >- >- if ( !pmbox->mbxStatus) { >- mp = (struct lpfc_dmabuf *) >- (pmb->context1); >- ndlp = (struct lpfc_nodelist *) >- pmb->context2; >- >- /* Reg_LOGIN of dflt RPI was successful. >- * new lets get rid of the RPI using the >- * same mbox buffer. >- */ >- lpfc_unreg_login(phba, vport->vpi, >- pmbox->un.varWords[0], pmb); >- pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; >- pmb->context1 = mp; >- pmb->context2 = ndlp; >- pmb->vport = vport; >- spin_lock(&phba->hbalock); >- phba->sli.sli_flag &= >- ~LPFC_SLI_MBOX_ACTIVE; >- spin_unlock(&phba->hbalock); >- goto send_current_mbox; >+ lpfc_debugfs_disc_trc(vport, >+ LPFC_DISC_TRC_MBOX_VPORT, >+ "MBOX dflt rpi: : " >+ "status:x%x rpi:x%x", >+ (uint32_t)pmbox->mbxStatus, >+ pmbox->un.varWords[0], 0); >+ >+ if (!pmbox->mbxStatus) { >+ mp = (struct lpfc_dmabuf *) >+ (pmb->context1); >+ ndlp = (struct lpfc_nodelist *) >+ pmb->context2; >+ >+ /* Reg_LOGIN of dflt RPI was >+ * successful. new lets get >+ * rid of the RPI using the >+ * same mbox buffer. >+ */ >+ lpfc_unreg_login(phba, >+ vport->vpi, >+ pmbox->un.varWords[0], >+ pmb); >+ pmb->mbox_cmpl = >+ lpfc_mbx_cmpl_dflt_rpi; >+ pmb->context1 = mp; >+ pmb->context2 = ndlp; >+ pmb->vport = vport; >+ rc = lpfc_sli_issue_mbox(phba, >+ pmb, >+ MBX_NOWAIT); >+ if (rc != MBX_BUSY) >+ lpfc_printf_log(phba, >+ KERN_ERR, >+ LOG_MBOX | LOG_SLI, >+ "0350 rc should have" >+ "been MBX_BUSY"); >+ goto send_current_mbox; >+ } > } >+ spin_lock(&phba->pport->work_port_lock); >+ phba->pport->work_port_events &= >+ ~WORKER_MBOX_TMO; >+ spin_unlock(&phba->pport->work_port_lock); >+ lpfc_mbox_cmpl_put(phba, pmb); > } >- spin_lock(&phba->pport->work_port_lock); >- phba->pport->work_port_events &= ~WORKER_MBOX_TMO; >- spin_unlock(&phba->pport->work_port_lock); >- lpfc_mbox_cmpl_put(phba, pmb); >- } >+ } else >+ spin_unlock(&phba->hbalock); > if ((work_ha_copy & HA_MBATT) && > (phba->sli.mbox_active == NULL)) { >-send_next_mbox: >- spin_lock(&phba->hbalock); >- phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; >- pmb = lpfc_mbox_get(phba); >- spin_unlock(&phba->hbalock); > send_current_mbox: > /* Process next mailbox command if there is one */ >- if (pmb != NULL) { >- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); >- if (rc == MBX_NOT_FINISHED) { >- pmb->mb.mbxStatus = MBX_NOT_FINISHED; >- lpfc_mbox_cmpl_put(phba, pmb); >- goto send_next_mbox; >- } >- } else { >- /* Turn on IOCB processing */ >- for (i = 0; i < phba->sli.num_rings; i++) >- lpfc_sli_turn_on_ring(phba, i); >- } >- >+ do { >+ rc = lpfc_sli_issue_mbox(phba, NULL, >+ MBX_NOWAIT); >+ } while (rc == MBX_NOT_FINISHED); >+ if (rc != MBX_SUCCESS) >+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | >+ LOG_SLI, "0349 rc should be " >+ "MBX_SUCCESS"); > } > > spin_lock(&phba->hbalock); > phba->work_ha |= work_ha_copy; >- if (phba->work_wait) >- lpfc_worker_wake_up(phba); > spin_unlock(&phba->hbalock); >+ lpfc_worker_wake_up(phba); > } > > ha_copy &= ~(phba->work_ha_mask); >diff -urpN a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h >--- a/drivers/scsi/lpfc/lpfc_sli.h 2008-09-05 17:47:41.742249000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_sli.h 2008-09-05 17:47:49.617877000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * * >@@ -18,6 +18,8 @@ > * included with this package. * > *******************************************************************/ > >+ >+ > /* forward declaration for LPFC_IOCB_t's use */ > struct lpfc_hba; > struct lpfc_vport; >@@ -33,6 +35,7 @@ typedef enum _lpfc_ctx_cmd { > struct lpfc_iocbq { > /* lpfc_iocbqs are used in double linked lists */ > struct list_head list; >+ struct list_head clist; > uint16_t iotag; /* pre-assigned IO tag */ > uint16_t rsvd1; > >@@ -44,6 +47,7 @@ struct lpfc_iocbq { > #define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ > #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ > #define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ >+#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ > > uint8_t abort_count; > uint8_t rsvd2; >@@ -86,14 +90,15 @@ typedef struct lpfcMboxq { > > void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); > uint8_t mbox_flag; >+ uint16_t in_ext_byte_len; >+ uint16_t out_ext_byte_len; >+ uint8_t mbox_offset_word; > > } LPFC_MBOXQ_t; > > #define MBX_POLL 1 /* poll mailbox till command done, then > return */ > #define MBX_NOWAIT 2 /* issue command then return immediately */ >-#define MBX_STOP_IOCB 4 /* Stop iocb processing till mbox cmds >- complete */ > > #define LPFC_MAX_RING_MASK 4 /* max num of rctl/type masks allowed per > ring */ >@@ -129,9 +134,7 @@ struct lpfc_sli_ring { > uint16_t flag; /* ring flags */ > #define LPFC_DEFERRED_RING_EVENT 0x001 /* Deferred processing a ring event */ > #define LPFC_CALL_RING_AVAILABLE 0x002 /* indicates cmd was full */ >-#define LPFC_STOP_IOCB_MBX 0x010 /* Stop processing IOCB cmds mbox */ > #define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */ >-#define LPFC_STOP_IOCB_MASK 0x030 /* Stop processing IOCB cmds mask */ > uint16_t abtsiotag; /* tracks next iotag to use for ABTS */ > > uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */ >@@ -163,9 +166,12 @@ struct lpfc_sli_ring { > struct list_head iocb_continueq; > uint16_t iocb_continueq_cnt; /* current length of queue */ > uint16_t iocb_continueq_max; /* max length */ >+ struct list_head iocb_continue_saveq; > > struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK]; > uint32_t num_mask; /* number of mask entries in prt array */ >+ void (*lpfc_sli_rcv_async_status) (struct lpfc_hba *, >+ struct lpfc_sli_ring *, struct lpfc_iocbq *); > > struct lpfc_sli_ring_stat stats; /* SLI statistical info */ > >@@ -199,9 +205,6 @@ struct lpfc_hbq_init { > uint32_t add_count; /* number to allocate when starved */ > } ; > >-#define LPFC_MAX_HBQ 16 >- >- > /* Structure used to hold SLI statistical counters and info */ > struct lpfc_sli_stat { > uint64_t mbox_stat_err; /* Mbox cmds completed status error */ >@@ -235,6 +238,7 @@ struct lpfc_sli { > #define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ > #define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ > #define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ >+#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ > > struct lpfc_sli_ring ring[LPFC_MAX_RING]; > int fcp_ring; /* ring used for FCP initiator commands */ >diff -urpN a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h >--- a/drivers/scsi/lpfc/lpfc_version.h 2008-09-05 17:47:41.745247000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_version.h 2008-09-05 17:47:49.678876000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2007 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * * >@@ -18,10 +18,12 @@ > * included with this package. * > *******************************************************************/ > >-#define LPFC_DRIVER_VERSION "8.2.2" >+#define LPFC_DRIVER_VERSION "8.2.0.29" > > #define LPFC_DRIVER_NAME "lpfc" > > #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ > LPFC_DRIVER_VERSION >-#define LPFC_COPYRIGHT "Copyright(c) 2004-2007 Emulex. All rights reserved." >+#define LPFC_COPYRIGHT "Copyright(c) 2004-2008 Emulex. All rights reserved." >+ >+#define DFC_API_VERSION "0.0.0" >diff -urpN a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c >--- a/drivers/scsi/lpfc/lpfc_vport.c 2008-09-05 17:47:41.748248000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_vport.c 2008-09-05 17:47:49.764876000 -0400 >@@ -1,7 +1,7 @@ > /******************************************************************* > * This file is part of the Emulex Linux Device Driver for * > * Fibre Channel Host Bus Adapters. * >- * Copyright (C) 2004-2006 Emulex. All rights reserved. * >+ * Copyright (C) 2004-2008 Emulex. All rights reserved. * > * EMULEX and SLI are trademarks of Emulex. * > * www.emulex.com * > * Portions Copyright (C) 2004-2005 Christoph Hellwig * >@@ -41,6 +41,7 @@ > #include "lpfc_crtn.h" > #include "lpfc_version.h" > #include "lpfc_vport.h" >+#include "lpfc_auth_access.h" > > inline void lpfc_vport_set_state(struct lpfc_vport *vport, > enum fc_vport_state new_state) >@@ -125,15 +126,26 @@ lpfc_vport_sparm(struct lpfc_hba *phba, > pmb->vport = vport; > rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2); > if (rc != MBX_SUCCESS) { >- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, >- "1818 VPort failed init, mbxCmd x%x " >- "READ_SPARM mbxStatus x%x, rc = x%x\n", >- mb->mbxCommand, mb->mbxStatus, rc); >- lpfc_mbuf_free(phba, mp->virt, mp->phys); >- kfree(mp); >- if (rc != MBX_TIMEOUT) >- mempool_free(pmb, phba->mbox_mem_pool); >- return -EIO; >+ if (signal_pending(current)) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, >+ "1830 Signal aborted mbxCmd x%x\n", >+ mb->mbxCommand); >+ lpfc_mbuf_free(phba, mp->virt, mp->phys); >+ kfree(mp); >+ if (rc != MBX_TIMEOUT) >+ mempool_free(pmb, phba->mbox_mem_pool); >+ return -EINTR; >+ } else { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, >+ "1818 VPort failed init, mbxCmd x%x " >+ "READ_SPARM mbxStatus x%x, rc = x%x\n", >+ mb->mbxCommand, mb->mbxStatus, rc); >+ lpfc_mbuf_free(phba, mp->virt, mp->phys); >+ kfree(mp); >+ if (rc != MBX_TIMEOUT) >+ mempool_free(pmb, phba->mbox_mem_pool); >+ return -EIO; >+ } > } > > memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); >@@ -193,10 +205,82 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, > return 1; > } > >+/** >+ * lpfc_discovery_wait: Wait for driver discovery to quiesce. >+ * @vport: The virtual port for which this call is being executed. >+ * >+ * This driver calls this routine specifically from lpfc_vport_delete >+ * to enforce a synchronous execution of vport >+ * delete relative to discovery activities. The >+ * lpfc_vport_delete routine should not return until it >+ * can reasonably guarantee that discovery has quiesced. >+ * Post FDISC LOGO, the driver must wait until its SAN teardown is >+ * complete and all resources recovered before allowing >+ * cleanup. >+ * >+ * This routine does not require any locks held. >+ **/ >+static void lpfc_discovery_wait(struct lpfc_vport *vport) >+{ >+ struct lpfc_hba *phba = vport->phba; >+ uint32_t wait_flags = 0; >+ unsigned long wait_time_max; >+ unsigned long start_time; >+ >+ wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE | >+ FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO; >+ >+ /* >+ * The time constraint on this loop is a balance between the >+ * fabric RA_TOV value and dev_loss tmo. The driver's >+ * devloss_tmo is 10 giving this loop a 3x multiplier minimally. >+ */ >+ wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000); >+ wait_time_max += jiffies; >+ start_time = jiffies; >+ while (time_before(jiffies, wait_time_max)) { >+ if ((vport->num_disc_nodes > 0) || >+ (vport->fc_flag & wait_flags) || >+ ((vport->port_state > LPFC_VPORT_FAILED) && >+ (vport->port_state < LPFC_VPORT_READY))) { >+ lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, >+ "1833 Vport discovery quiesce Wait:" >+ " vpi x%x state x%x fc_flags x%x" >+ " num_nodes x%x, waiting 1000 msecs" >+ " total wait msecs x%x\n", >+ vport->vpi, vport->port_state, >+ vport->fc_flag, vport->num_disc_nodes, >+ jiffies_to_msecs(jiffies - start_time)); >+ msleep(1000); >+ } else { >+ /* Base case. Wait variants satisfied. Break out */ >+ lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, >+ "1834 Vport discovery quiesced:" >+ " vpi x%x state x%x fc_flags x%x" >+ " wait msecs x%x\n", >+ vport->vpi, vport->port_state, >+ vport->fc_flag, >+ jiffies_to_msecs(jiffies >+ - start_time)); >+ break; >+ } >+ } >+ >+ if (time_after(jiffies, wait_time_max)) >+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, >+ "1835 Vport discovery quiesce failed:" >+ " vpi x%x state x%x fc_flags x%x" >+ " wait msecs x%x\n", >+ vport->vpi, vport->port_state, >+ vport->fc_flag, >+ jiffies_to_msecs(jiffies - start_time)); >+} >+ > int > lpfc_vport_create(struct fc_vport *fc_vport, bool disable) > { > struct lpfc_nodelist *ndlp; >+ char *vname = fc_vport->symbolic_name; > struct Scsi_Host *shost = fc_vport->shost; > struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata; > struct lpfc_hba *phba = pport->phba; >@@ -204,9 +288,10 @@ lpfc_vport_create(struct fc_vport *fc_vp > int instance; > int vpi; > int rc = VPORT_ERROR; >+ int status; >+ int size; > >- if ((phba->sli_rev < 3) || >- !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { >+ if ((phba->sli_rev < 3) || !phba->cfg_enable_npiv) { > lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, > "1808 Create VPORT failed: " > "NPIV is not enabled: SLImode:%d\n", >@@ -248,19 +333,38 @@ lpfc_vport_create(struct fc_vport *fc_vp > vport->vpi = vpi; > lpfc_debugfs_initialize(vport); > >- if (lpfc_vport_sparm(phba, vport)) { >- lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, >- "1813 Create VPORT failed. " >- "Cannot get sparam\n"); >+ if ((status = lpfc_vport_sparm(phba, vport))) { >+ if (status == -EINTR) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, >+ "1831 Create VPORT Interrupted.\n"); >+ rc = VPORT_ERROR; >+ } else { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, >+ "1813 Create VPORT failed. " >+ "Cannot get sparam\n"); >+ rc = VPORT_NORESOURCES; >+ } > lpfc_free_vpi(phba, vpi); > destroy_port(vport); >- rc = VPORT_NORESOURCES; > goto error_out; > } > > memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8); > memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8); >- >+ size = strnlen(vname, LPFC_VNAME_LEN); >+ if (size) { >+ vport->vname = kzalloc(size+1, GFP_KERNEL); >+ if (!vport->vname) { >+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, >+ "1814 Create VPORT failed. " >+ "vname allocation failed.\n"); >+ rc = VPORT_ERROR; >+ lpfc_free_vpi(phba, vpi); >+ destroy_port(vport); >+ goto error_out; >+ } >+ memcpy(vport->vname, vname, size+1); >+ } > if (fc_vport->node_name != 0) > u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); > if (fc_vport->port_name != 0) >@@ -290,6 +394,21 @@ lpfc_vport_create(struct fc_vport *fc_vp > goto error_out; > } > >+ shost = lpfc_shost_from_vport(vport); >+ >+ if ((lpfc_get_security_enabled)(shost)){ >+ spin_lock_irq(&fc_security_user_lock); >+ >+ list_add_tail(&vport->sc_users, &fc_security_user_list); >+ >+ spin_unlock_irq(&fc_security_user_lock); >+ >+ if (fc_service_state == FC_SC_SERVICESTATE_ONLINE) { >+ lpfc_fc_queue_security_work(vport, >+ &vport->sc_online_work); >+ } >+ } >+ > *(struct lpfc_vport **)fc_vport->dd_data = vport; > vport->fc_vport = fc_vport; > >@@ -309,7 +428,8 @@ lpfc_vport_create(struct fc_vport *fc_vp > * up and ready to FDISC. > */ > ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); >- if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && >+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { > if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { > lpfc_set_disctmo(vport); > lpfc_initial_fdisc(vport); >@@ -340,7 +460,8 @@ disable_vport(struct fc_vport *fc_vport) > long timeout; > > ndlp = lpfc_findnode_did(vport, Fabric_DID); >- if (ndlp && phba->link_state >= LPFC_LINK_UP) { >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) >+ && phba->link_state >= LPFC_LINK_UP) { > vport->unreg_vpi_cmpl = VPORT_INVAL; > timeout = msecs_to_jiffies(phba->fc_ratov * 2000); > if (!lpfc_issue_els_npiv_logo(vport, ndlp)) >@@ -354,6 +475,8 @@ disable_vport(struct fc_vport *fc_vport) > * calling lpfc_cleanup_rpis(vport, 1) > */ > list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { >+ if (!NLP_CHK_NODE_ACT(ndlp)) >+ continue; > if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) > continue; > lpfc_disc_state_machine(vport, ndlp, NULL, >@@ -396,7 +519,8 @@ enable_vport(struct fc_vport *fc_vport) > * up and ready to FDISC. > */ > ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); >- if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) >+ && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { > if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { > lpfc_set_disctmo(vport); > lpfc_initial_fdisc(vport); >@@ -426,8 +550,7 @@ lpfc_vport_disable(struct fc_vport *fc_v > int > lpfc_vport_delete(struct fc_vport *fc_vport) > { >- struct lpfc_nodelist *ndlp = NULL; >- struct lpfc_nodelist *next_ndlp; >+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; > struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost; > struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; > struct lpfc_hba *phba = vport->phba; >@@ -470,8 +593,14 @@ lpfc_vport_delete(struct fc_vport *fc_vp > * initiated after we've disposed of all other resources associated > * with the port. > */ >- if (!scsi_host_get(shost) || !scsi_host_get(shost)) >+ if (!scsi_host_get(shost)) >+ return VPORT_INVAL; >+ if (!scsi_host_get(shost)) { >+ scsi_host_put(shost); > return VPORT_INVAL; >+ } >+ >+ > spin_lock_irq(&phba->hbalock); > vport->load_flag |= FC_UNLOADING; > spin_unlock_irq(&phba->hbalock); >@@ -481,9 +610,54 @@ lpfc_vport_delete(struct fc_vport *fc_vp > scsi_remove_host(lpfc_shost_from_vport(vport)); > > ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); >- if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && >- phba->link_state >= LPFC_LINK_UP) { > >+ /* In case of driver unload, we shall not perform fabric logo as the >+ * worker thread already stopped at this stage and, in this case, we >+ * can safely skip the fabric logo. >+ */ >+ if (phba->pport->load_flag & FC_UNLOADING) { >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && >+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && >+ phba->link_state >= LPFC_LINK_UP) { >+ /* First look for the Fabric ndlp */ >+ ndlp = lpfc_findnode_did(vport, Fabric_DID); >+ if (!ndlp) >+ goto skip_logo; >+ else if (!NLP_CHK_NODE_ACT(ndlp)) { >+ ndlp = lpfc_enable_node(vport, ndlp, >+ NLP_STE_UNUSED_NODE); >+ if (!ndlp) >+ goto skip_logo; >+ } >+ /* Remove ndlp from vport npld list */ >+ lpfc_dequeue_node(vport, ndlp); >+ >+ /* Indicate free memory when release */ >+ spin_lock_irq(&phba->ndlp_lock); >+ NLP_SET_FREE_REQ(ndlp); >+ spin_unlock_irq(&phba->ndlp_lock); >+ /* Kick off release ndlp when it can be safely done */ >+ lpfc_nlp_put(ndlp); >+ } >+ goto skip_logo; >+ } >+ >+ /* Otherwise, we will perform fabric logo as needed */ >+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && >+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && >+ phba->link_state >= LPFC_LINK_UP && >+ phba->fc_topology != TOPOLOGY_LOOP) { >+ if (vport->cfg_enable_da_id) { >+ timeout = msecs_to_jiffies(phba->fc_ratov * 2000); >+ if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0)) >+ while (vport->ct_flags && timeout) >+ timeout = schedule_timeout(timeout); >+ else >+ lpfc_printf_log(vport->phba, KERN_WARNING, >+ LOG_VPORT, >+ "1829 CT command failed to " >+ "delete objects on fabric. \n"); >+ } > /* First look for the Fabric ndlp */ > ndlp = lpfc_findnode_did(vport, Fabric_DID); > if (!ndlp) { >@@ -492,8 +666,29 @@ lpfc_vport_delete(struct fc_vport *fc_vp > if (!ndlp) > goto skip_logo; > lpfc_nlp_init(vport, ndlp, Fabric_DID); >+ /* Indicate free memory when release */ >+ spin_lock_irq(&phba->ndlp_lock); >+ NLP_SET_FREE_REQ(ndlp); >+ spin_unlock_irq(&phba->ndlp_lock); > } else { >+ if (!NLP_CHK_NODE_ACT(ndlp)) >+ ndlp = lpfc_enable_node(vport, ndlp, >+ NLP_STE_UNUSED_NODE); >+ if (!ndlp) >+ goto skip_logo; >+ >+ /* Remove ndlp from vport npld list */ > lpfc_dequeue_node(vport, ndlp); >+ spin_lock_irq(&phba->ndlp_lock); >+ if (!NLP_CHK_FREE_REQ(ndlp)) >+ /* Indicate free memory when release */ >+ NLP_SET_FREE_REQ(ndlp); >+ else { >+ /* Skip this if ndlp is already in free mode */ >+ spin_unlock_irq(&phba->ndlp_lock); >+ goto skip_logo; >+ } >+ spin_unlock_irq(&phba->ndlp_lock); > } > vport->unreg_vpi_cmpl = VPORT_INVAL; > timeout = msecs_to_jiffies(phba->fc_ratov * 2000); >@@ -502,24 +697,35 @@ lpfc_vport_delete(struct fc_vport *fc_vp > timeout = schedule_timeout(timeout); > } > >+ if (!(phba->pport->load_flag & FC_UNLOADING)) >+ lpfc_discovery_wait(vport); >+ > skip_logo: >+ lpfc_cleanup(vport); > lpfc_sli_host_down(vport); > >- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { >- lpfc_disc_state_machine(vport, ndlp, NULL, >- NLP_EVT_DEVICE_RECOVERY); >- lpfc_disc_state_machine(vport, ndlp, NULL, >- NLP_EVT_DEVICE_RM); >- } >- > lpfc_stop_vport_timers(vport); >- lpfc_unreg_all_rpis(vport); >- lpfc_unreg_default_rpis(vport); >- /* >- * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the >- * scsi_host_put() to release the vport. >- */ >- lpfc_mbx_unreg_vpi(vport); >+ >+ if (!(phba->pport->load_flag & FC_UNLOADING)) { >+ lpfc_unreg_all_rpis(vport); >+ lpfc_unreg_default_rpis(vport); >+ /* >+ * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) >+ * does the scsi_host_put() to release the vport. >+ */ >+ if (lpfc_mbx_unreg_vpi(vport)) { >+ /* >+ * Need to release the reference count to shost >+ */ >+ scsi_host_put(shost); >+ } >+ } >+ else { >+ /* >+ * Need to release the reference count to shost >+ */ >+ scsi_host_put(shost); >+ } > > lpfc_free_vpi(phba, vport->vpi); > vport->work_port_events = 0; >@@ -541,7 +747,7 @@ lpfc_create_vport_work_array(struct lpfc > struct lpfc_vport *port_iterator; > struct lpfc_vport **vports; > int index = 0; >- vports = kzalloc(LPFC_MAX_VPORTS * sizeof(struct lpfc_vport *), >+ vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *), > GFP_KERNEL); > if (vports == NULL) > return NULL; >@@ -560,12 +766,12 @@ lpfc_create_vport_work_array(struct lpfc > } > > void >-lpfc_destroy_vport_work_array(struct lpfc_vport **vports) >+lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports) > { > int i; > if (vports == NULL) > return; >- for (i=0; vports[i] != NULL && i < LPFC_MAX_VPORTS; i++) >+ for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++) > scsi_host_put(lpfc_shost_from_vport(vports[i])); > kfree(vports); > } >diff -urpN a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h >--- a/drivers/scsi/lpfc/lpfc_vport.h 2008-09-05 17:47:41.751247000 -0400 >+++ b/drivers/scsi/lpfc/lpfc_vport.h 2008-09-05 17:47:49.771879000 -0400 >@@ -89,7 +89,7 @@ int lpfc_vport_delete(struct fc_vport *) > int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *); > int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint); > struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *); >-void lpfc_destroy_vport_work_array(struct lpfc_vport **); >+void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **); > > /* > * queuecommand VPORT-specific return codes. Specified in the host byte code. >diff -urpN a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile >--- a/drivers/scsi/lpfc/Makefile 2008-09-05 17:47:41.607248000 -0400 >+++ b/drivers/scsi/lpfc/Makefile 2008-09-05 17:47:49.673876000 -0400 >@@ -1,7 +1,7 @@ > #/******************************************************************* > # * This file is part of the Emulex Linux Device Driver for * > # * Fibre Channel Host Bus Adapters. * >-# * Copyright (C) 2004-2006 Emulex. All rights reserved. * >+# * Copyright (C) 2004-2007 Emulex. All rights reserved. * > # * EMULEX and SLI are trademarks of Emulex. * > # * www.emulex.com * > # * * >@@ -24,8 +24,11 @@ ifneq ($(GCOV),) > EXTRA_CFLAGS += -O0 > endif > >+EXTRA_CFLAGS += -DNETLINK_FCTRANSPORT=25 >+ > obj-$(CONFIG_SCSI_LPFC) := lpfc.o > > lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ > lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \ >- lpfc_vport.o lpfc_debugfs.o >+ lpfc_vport.o lpfc_debugfs.o lpfc_security.o lpfc_auth_access.o \ >+ lpfc_auth.o lpfc_ioctl.o lpfc_menlo.o
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 461330
: 315938