1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * QLogic iSCSI HBA Driver 4 * Copyright (c) 2003-2013 QLogic Corporation 5 */ 6#include <linux/moduleparam.h> 7#include <linux/slab.h> 8#include <linux/blkdev.h> 9#include <linux/iscsi_boot_sysfs.h> 10#include <linux/inet.h> 11 12#include <scsi/scsi_tcq.h> 13#include <scsi/scsicam.h> 14 15#include "ql4_def.h" 16#include "ql4_version.h" 17#include "ql4_glbl.h" 18#include "ql4_dbg.h" 19#include "ql4_inline.h" 20#include "ql4_83xx.h" 21 22/* 23 * Driver version 24 */ 25static char qla4xxx_version_str[40]; 26 27/* 28 * SRB allocation cache 29 */ 30static struct kmem_cache *srb_cachep; 31 32/* 33 * Module parameter information and variables 34 */ 35static int ql4xdisablesysfsboot = 1; 36module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); 37MODULE_PARM_DESC(ql4xdisablesysfsboot, 38 " Set to disable exporting boot targets to sysfs.\n" 39 "\t\t 0 - Export boot targets\n" 40 "\t\t 1 - Do not export boot targets (Default)"); 41 42int ql4xdontresethba; 43module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); 44MODULE_PARM_DESC(ql4xdontresethba, 45 " Don't reset the HBA for driver recovery.\n" 46 "\t\t 0 - It will reset HBA (Default)\n" 47 "\t\t 1 - It will NOT reset HBA"); 48 49int ql4xextended_error_logging; 50module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); 51MODULE_PARM_DESC(ql4xextended_error_logging, 52 " Option to enable extended error logging.\n" 53 "\t\t 0 - no logging (Default)\n" 54 "\t\t 2 - debug logging"); 55 56int ql4xenablemsix = 1; 57module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); 58MODULE_PARM_DESC(ql4xenablemsix, 59 " Set to enable MSI or MSI-X interrupt mechanism.\n" 60 "\t\t 0 = enable INTx interrupt mechanism.\n" 61 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n" 62 "\t\t 2 = enable MSI interrupt mechanism."); 63 64#define QL4_DEF_QDEPTH 32 65static int ql4xmaxqdepth = QL4_DEF_QDEPTH; 66module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); 67MODULE_PARM_DESC(ql4xmaxqdepth, 68 " Maximum queue depth to report for target devices.\n" 69 "\t\t Default: 32."); 70 71static int ql4xqfulltracking = 1; 72module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR); 73MODULE_PARM_DESC(ql4xqfulltracking, 74 " Enable or disable dynamic tracking and adjustment of\n" 75 "\t\t scsi device queue depth.\n" 76 "\t\t 0 - Disable.\n" 77 "\t\t 1 - Enable. (Default)"); 78 79static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 80module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 81MODULE_PARM_DESC(ql4xsess_recovery_tmo, 82 " Target Session Recovery Timeout.\n" 83 "\t\t Default: 120 sec."); 84 85int ql4xmdcapmask = 0; 86module_param(ql4xmdcapmask, int, S_IRUGO); 87MODULE_PARM_DESC(ql4xmdcapmask, 88 " Set the Minidump driver capture mask level.\n" 89 "\t\t Default is 0 (firmware default capture mask)\n" 90 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF"); 91 92int ql4xenablemd = 1; 93module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); 94MODULE_PARM_DESC(ql4xenablemd, 95 " Set to enable minidump.\n" 96 "\t\t 0 - disable minidump\n" 97 "\t\t 1 - enable minidump (Default)"); 98 99static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 100/* 101 * SCSI host template entry points 102 */ 103static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha); 104 105/* 106 * iSCSI template entry points 107 */ 108static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 109 enum iscsi_param param, char *buf); 110static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, 111 enum iscsi_param param, char *buf); 112static int qla4xxx_host_get_param(struct Scsi_Host *shost, 113 enum iscsi_host_param param, char *buf); 114static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, 115 uint32_t len); 116static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 117 enum iscsi_param_type param_type, 118 int param, char *buf); 119static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); 120static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost, 121 struct sockaddr *dst_addr, 122 int non_blocking); 123static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms); 124static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep); 125static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 126 enum iscsi_param param, char *buf); 127static int qla4xxx_conn_start(struct iscsi_cls_conn *conn); 128static struct iscsi_cls_conn * 129qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx); 130static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 131 struct iscsi_cls_conn *cls_conn, 132 uint64_t transport_fd, int is_leading); 133static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn); 134static struct iscsi_cls_session * 135qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, 136 uint16_t qdepth, uint32_t initial_cmdsn); 137static void qla4xxx_session_destroy(struct iscsi_cls_session *sess); 138static void qla4xxx_task_work(struct work_struct *wdata); 139static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t); 140static int qla4xxx_task_xmit(struct iscsi_task *); 141static void qla4xxx_task_cleanup(struct iscsi_task *); 142static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session); 143static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 144 struct iscsi_stats *stats); 145static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 146 uint32_t iface_type, uint32_t payload_size, 147 uint32_t pid, struct sockaddr *dst_addr); 148static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 149 uint32_t *num_entries, char *buf); 150static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); 151static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, 152 int len); 153static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len); 154 155/* 156 * SCSI host template entry points 157 */ 158static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); 159static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); 160static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); 161static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); 162static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); 163static int qla4xxx_slave_alloc(struct scsi_device *device); 164static umode_t qla4_attr_is_visible(int param_type, int param); 165static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 166 167/* 168 * iSCSI Flash DDB sysfs entry points 169 */ 170static int 171qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 172 struct iscsi_bus_flash_conn *fnode_conn, 173 void *data, int len); 174static int 175qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 176 int param, char *buf); 177static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 178 int len); 179static int 180qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess); 181static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 182 struct iscsi_bus_flash_conn *fnode_conn); 183static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 184 struct iscsi_bus_flash_conn *fnode_conn); 185static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess); 186 187static struct qla4_8xxx_legacy_intr_set legacy_intr[] = 188 QLA82XX_LEGACY_INTR_CONFIG; 189 190static const uint32_t qla4_82xx_reg_tbl[] = { 191 QLA82XX_PEG_HALT_STATUS1, 192 QLA82XX_PEG_HALT_STATUS2, 193 QLA82XX_PEG_ALIVE_COUNTER, 194 QLA82XX_CRB_DRV_ACTIVE, 195 QLA82XX_CRB_DEV_STATE, 196 QLA82XX_CRB_DRV_STATE, 197 QLA82XX_CRB_DRV_SCRATCH, 198 QLA82XX_CRB_DEV_PART_INFO, 199 QLA82XX_CRB_DRV_IDC_VERSION, 200 QLA82XX_FW_VERSION_MAJOR, 201 QLA82XX_FW_VERSION_MINOR, 202 QLA82XX_FW_VERSION_SUB, 203 CRB_CMDPEG_STATE, 204 CRB_TEMP_STATE, 205}; 206 207static const uint32_t qla4_83xx_reg_tbl[] = { 208 QLA83XX_PEG_HALT_STATUS1, 209 QLA83XX_PEG_HALT_STATUS2, 210 QLA83XX_PEG_ALIVE_COUNTER, 211 QLA83XX_CRB_DRV_ACTIVE, 212 QLA83XX_CRB_DEV_STATE, 213 QLA83XX_CRB_DRV_STATE, 214 QLA83XX_CRB_DRV_SCRATCH, 215 QLA83XX_CRB_DEV_PART_INFO1, 216 QLA83XX_CRB_IDC_VER_MAJOR, 217 QLA83XX_FW_VER_MAJOR, 218 QLA83XX_FW_VER_MINOR, 219 QLA83XX_FW_VER_SUB, 220 QLA83XX_CMDPEG_STATE, 221 QLA83XX_ASIC_TEMP, 222}; 223 224static struct scsi_host_template qla4xxx_driver_template = { 225 .module = THIS_MODULE, 226 .name = DRIVER_NAME, 227 .proc_name = DRIVER_NAME, 228 .queuecommand = qla4xxx_queuecommand, 229 230 .eh_abort_handler = qla4xxx_eh_abort, 231 .eh_device_reset_handler = qla4xxx_eh_device_reset, 232 .eh_target_reset_handler = qla4xxx_eh_target_reset, 233 .eh_host_reset_handler = qla4xxx_eh_host_reset, 234 .eh_timed_out = qla4xxx_eh_cmd_timed_out, 235 236 .slave_alloc = qla4xxx_slave_alloc, 237 .change_queue_depth = scsi_change_queue_depth, 238 239 .this_id = -1, 240 .cmd_per_lun = 3, 241 .sg_tablesize = SG_ALL, 242 243 .max_sectors = 0xFFFF, 244 .shost_attrs = qla4xxx_host_attrs, 245 .host_reset = qla4xxx_host_reset, 246 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, 247}; 248 249static struct iscsi_transport qla4xxx_iscsi_transport = { 250 .owner = THIS_MODULE, 251 .name = DRIVER_NAME, 252 .caps = CAP_TEXT_NEGO | 253 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | 254 CAP_DATADGST | CAP_LOGIN_OFFLOAD | 255 CAP_MULTI_R2T, 256 .attr_is_visible = qla4_attr_is_visible, 257 .create_session = qla4xxx_session_create, 258 .destroy_session = qla4xxx_session_destroy, 259 .start_conn = qla4xxx_conn_start, 260 .create_conn = qla4xxx_conn_create, 261 .bind_conn = qla4xxx_conn_bind, 262 .unbind_conn = iscsi_conn_unbind, 263 .stop_conn = iscsi_conn_stop, 264 .destroy_conn = qla4xxx_conn_destroy, 265 .set_param = iscsi_set_param, 266 .get_conn_param = qla4xxx_conn_get_param, 267 .get_session_param = qla4xxx_session_get_param, 268 .get_ep_param = qla4xxx_get_ep_param, 269 .ep_connect = qla4xxx_ep_connect, 270 .ep_poll = qla4xxx_ep_poll, 271 .ep_disconnect = qla4xxx_ep_disconnect, 272 .get_stats = qla4xxx_conn_get_stats, 273 .send_pdu = iscsi_conn_send_pdu, 274 .xmit_task = qla4xxx_task_xmit, 275 .cleanup_task = qla4xxx_task_cleanup, 276 .alloc_pdu = qla4xxx_alloc_pdu, 277 278 .get_host_param = qla4xxx_host_get_param, 279 .set_iface_param = qla4xxx_iface_set_param, 280 .get_iface_param = qla4xxx_get_iface_param, 281 .bsg_request = qla4xxx_bsg_request, 282 .send_ping = qla4xxx_send_ping, 283 .get_chap = qla4xxx_get_chap_list, 284 .delete_chap = qla4xxx_delete_chap, 285 .set_chap = qla4xxx_set_chap_entry, 286 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param, 287 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param, 288 .new_flashnode = qla4xxx_sysfs_ddb_add, 289 .del_flashnode = qla4xxx_sysfs_ddb_delete, 290 .login_flashnode = qla4xxx_sysfs_ddb_login, 291 .logout_flashnode = qla4xxx_sysfs_ddb_logout, 292 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid, 293 .get_host_stats = qla4xxx_get_host_stats, 294}; 295 296static struct scsi_transport_template *qla4xxx_scsi_transport; 297 298static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha) 299{ 300 u32 reg_val = 0; 301 int rval = QLA_SUCCESS; 302 303 if (is_qla8022(ha)) 304 reg_val = readl(&ha->qla4_82xx_reg->host_status); 305 else if (is_qla8032(ha) || is_qla8042(ha)) 306 reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); 307 else 308 reg_val = readw(&ha->reg->ctrl_status); 309 310 if (reg_val == QL4_ISP_REG_DISCONNECT) 311 rval = QLA_ERROR; 312 313 return rval; 314} 315 316static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 317 uint32_t iface_type, uint32_t payload_size, 318 uint32_t pid, struct sockaddr *dst_addr) 319{ 320 struct scsi_qla_host *ha = to_qla_host(shost); 321 struct sockaddr_in *addr; 322 struct sockaddr_in6 *addr6; 323 uint32_t options = 0; 324 uint8_t ipaddr[IPv6_ADDR_LEN]; 325 int rval; 326 327 memset(ipaddr, 0, IPv6_ADDR_LEN); 328 /* IPv4 to IPv4 */ 329 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) && 330 (dst_addr->sa_family == AF_INET)) { 331 addr = (struct sockaddr_in *)dst_addr; 332 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN); 333 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 " 334 "dest: %pI4\n", __func__, 335 &ha->ip_config.ip_address, ipaddr)); 336 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, 337 ipaddr); 338 if (rval) 339 rval = -EINVAL; 340 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) && 341 (dst_addr->sa_family == AF_INET6)) { 342 /* IPv6 to IPv6 */ 343 addr6 = (struct sockaddr_in6 *)dst_addr; 344 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN); 345 346 options |= PING_IPV6_PROTOCOL_ENABLE; 347 348 /* Ping using LinkLocal address */ 349 if ((iface_num == 0) || (iface_num == 1)) { 350 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping " 351 "src: %pI6 dest: %pI6\n", __func__, 352 &ha->ip_config.ipv6_link_local_addr, 353 ipaddr)); 354 options |= PING_IPV6_LINKLOCAL_ADDR; 355 rval = qla4xxx_ping_iocb(ha, options, payload_size, 356 pid, ipaddr); 357 } else { 358 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d " 359 "not supported\n", __func__, iface_num); 360 rval = -ENOSYS; 361 goto exit_send_ping; 362 } 363 364 /* 365 * If ping using LinkLocal address fails, try ping using 366 * IPv6 address 367 */ 368 if (rval != QLA_SUCCESS) { 369 options &= ~PING_IPV6_LINKLOCAL_ADDR; 370 if (iface_num == 0) { 371 options |= PING_IPV6_ADDR0; 372 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 373 "Ping src: %pI6 " 374 "dest: %pI6\n", __func__, 375 &ha->ip_config.ipv6_addr0, 376 ipaddr)); 377 } else if (iface_num == 1) { 378 options |= PING_IPV6_ADDR1; 379 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 380 "Ping src: %pI6 " 381 "dest: %pI6\n", __func__, 382 &ha->ip_config.ipv6_addr1, 383 ipaddr)); 384 } 385 rval = qla4xxx_ping_iocb(ha, options, payload_size, 386 pid, ipaddr); 387 if (rval) 388 rval = -EINVAL; 389 } 390 } else 391 rval = -ENOSYS; 392exit_send_ping: 393 return rval; 394} 395 396static umode_t qla4_attr_is_visible(int param_type, int param) 397{ 398 switch (param_type) { 399 case ISCSI_HOST_PARAM: 400 switch (param) { 401 case ISCSI_HOST_PARAM_HWADDRESS: 402 case ISCSI_HOST_PARAM_IPADDRESS: 403 case ISCSI_HOST_PARAM_INITIATOR_NAME: 404 case ISCSI_HOST_PARAM_PORT_STATE: 405 case ISCSI_HOST_PARAM_PORT_SPEED: 406 return S_IRUGO; 407 default: 408 return 0; 409 } 410 case ISCSI_PARAM: 411 switch (param) { 412 case ISCSI_PARAM_PERSISTENT_ADDRESS: 413 case ISCSI_PARAM_PERSISTENT_PORT: 414 case ISCSI_PARAM_CONN_ADDRESS: 415 case ISCSI_PARAM_CONN_PORT: 416 case ISCSI_PARAM_TARGET_NAME: 417 case ISCSI_PARAM_TPGT: 418 case ISCSI_PARAM_TARGET_ALIAS: 419 case ISCSI_PARAM_MAX_BURST: 420 case ISCSI_PARAM_MAX_R2T: 421 case ISCSI_PARAM_FIRST_BURST: 422 case ISCSI_PARAM_MAX_RECV_DLENGTH: 423 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 424 case ISCSI_PARAM_IFACE_NAME: 425 case ISCSI_PARAM_CHAP_OUT_IDX: 426 case ISCSI_PARAM_CHAP_IN_IDX: 427 case ISCSI_PARAM_USERNAME: 428 case ISCSI_PARAM_PASSWORD: 429 case ISCSI_PARAM_USERNAME_IN: 430 case ISCSI_PARAM_PASSWORD_IN: 431 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: 432 case ISCSI_PARAM_DISCOVERY_SESS: 433 case ISCSI_PARAM_PORTAL_TYPE: 434 case ISCSI_PARAM_CHAP_AUTH_EN: 435 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: 436 case ISCSI_PARAM_BIDI_CHAP_EN: 437 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: 438 case ISCSI_PARAM_DEF_TIME2WAIT: 439 case ISCSI_PARAM_DEF_TIME2RETAIN: 440 case ISCSI_PARAM_HDRDGST_EN: 441 case ISCSI_PARAM_DATADGST_EN: 442 case ISCSI_PARAM_INITIAL_R2T_EN: 443 case ISCSI_PARAM_IMM_DATA_EN: 444 case ISCSI_PARAM_PDU_INORDER_EN: 445 case ISCSI_PARAM_DATASEQ_INORDER_EN: 446 case ISCSI_PARAM_MAX_SEGMENT_SIZE: 447 case ISCSI_PARAM_TCP_TIMESTAMP_STAT: 448 case ISCSI_PARAM_TCP_WSF_DISABLE: 449 case ISCSI_PARAM_TCP_NAGLE_DISABLE: 450 case ISCSI_PARAM_TCP_TIMER_SCALE: 451 case ISCSI_PARAM_TCP_TIMESTAMP_EN: 452 case ISCSI_PARAM_TCP_XMIT_WSF: 453 case ISCSI_PARAM_TCP_RECV_WSF: 454 case ISCSI_PARAM_IP_FRAGMENT_DISABLE: 455 case ISCSI_PARAM_IPV4_TOS: 456 case ISCSI_PARAM_IPV6_TC: 457 case ISCSI_PARAM_IPV6_FLOW_LABEL: 458 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: 459 case ISCSI_PARAM_KEEPALIVE_TMO: 460 case ISCSI_PARAM_LOCAL_PORT: 461 case ISCSI_PARAM_ISID: 462 case ISCSI_PARAM_TSID: 463 case ISCSI_PARAM_DEF_TASKMGMT_TMO: 464 case ISCSI_PARAM_ERL: 465 case ISCSI_PARAM_STATSN: 466 case ISCSI_PARAM_EXP_STATSN: 467 case ISCSI_PARAM_DISCOVERY_PARENT_IDX: 468 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: 469 case ISCSI_PARAM_LOCAL_IPADDR: 470 return S_IRUGO; 471 default: 472 return 0; 473 } 474 case ISCSI_NET_PARAM: 475 switch (param) { 476 case ISCSI_NET_PARAM_IPV4_ADDR: 477 case ISCSI_NET_PARAM_IPV4_SUBNET: 478 case ISCSI_NET_PARAM_IPV4_GW: 479 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 480 case ISCSI_NET_PARAM_IFACE_ENABLE: 481 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 482 case ISCSI_NET_PARAM_IPV6_ADDR: 483 case ISCSI_NET_PARAM_IPV6_ROUTER: 484 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 485 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 486 case ISCSI_NET_PARAM_VLAN_ID: 487 case ISCSI_NET_PARAM_VLAN_PRIORITY: 488 case ISCSI_NET_PARAM_VLAN_ENABLED: 489 case ISCSI_NET_PARAM_MTU: 490 case ISCSI_NET_PARAM_PORT: 491 case ISCSI_NET_PARAM_IPADDR_STATE: 492 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 493 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 494 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 495 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 496 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 497 case ISCSI_NET_PARAM_TCP_WSF: 498 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 499 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 500 case ISCSI_NET_PARAM_CACHE_ID: 501 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 502 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 503 case ISCSI_NET_PARAM_IPV4_TOS_EN: 504 case ISCSI_NET_PARAM_IPV4_TOS: 505 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 506 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 507 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 508 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 509 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 510 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 511 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 512 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 513 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 514 case ISCSI_NET_PARAM_REDIRECT_EN: 515 case ISCSI_NET_PARAM_IPV4_TTL: 516 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 517 case ISCSI_NET_PARAM_IPV6_MLD_EN: 518 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 519 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 520 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 521 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 522 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 523 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 524 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 525 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 526 return S_IRUGO; 527 default: 528 return 0; 529 } 530 case ISCSI_IFACE_PARAM: 531 switch (param) { 532 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 533 case ISCSI_IFACE_PARAM_HDRDGST_EN: 534 case ISCSI_IFACE_PARAM_DATADGST_EN: 535 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 536 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 537 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 538 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 539 case ISCSI_IFACE_PARAM_ERL: 540 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 541 case ISCSI_IFACE_PARAM_FIRST_BURST: 542 case ISCSI_IFACE_PARAM_MAX_R2T: 543 case ISCSI_IFACE_PARAM_MAX_BURST: 544 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 545 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 546 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 547 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 548 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 549 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 550 return S_IRUGO; 551 default: 552 return 0; 553 } 554 case ISCSI_FLASHNODE_PARAM: 555 switch (param) { 556 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 557 case ISCSI_FLASHNODE_PORTAL_TYPE: 558 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 559 case ISCSI_FLASHNODE_DISCOVERY_SESS: 560 case ISCSI_FLASHNODE_ENTRY_EN: 561 case ISCSI_FLASHNODE_HDR_DGST_EN: 562 case ISCSI_FLASHNODE_DATA_DGST_EN: 563 case ISCSI_FLASHNODE_IMM_DATA_EN: 564 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 565 case ISCSI_FLASHNODE_DATASEQ_INORDER: 566 case ISCSI_FLASHNODE_PDU_INORDER: 567 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 568 case ISCSI_FLASHNODE_SNACK_REQ_EN: 569 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 570 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 571 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 572 case ISCSI_FLASHNODE_ERL: 573 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 574 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 575 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 576 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 577 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 578 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 579 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 580 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 581 case ISCSI_FLASHNODE_FIRST_BURST: 582 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 583 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 584 case ISCSI_FLASHNODE_MAX_R2T: 585 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 586 case ISCSI_FLASHNODE_ISID: 587 case ISCSI_FLASHNODE_TSID: 588 case ISCSI_FLASHNODE_PORT: 589 case ISCSI_FLASHNODE_MAX_BURST: 590 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 591 case ISCSI_FLASHNODE_IPADDR: 592 case ISCSI_FLASHNODE_ALIAS: 593 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 594 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 595 case ISCSI_FLASHNODE_LOCAL_PORT: 596 case ISCSI_FLASHNODE_IPV4_TOS: 597 case ISCSI_FLASHNODE_IPV6_TC: 598 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 599 case ISCSI_FLASHNODE_NAME: 600 case ISCSI_FLASHNODE_TPGT: 601 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 602 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 603 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 604 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 605 case ISCSI_FLASHNODE_TCP_RECV_WSF: 606 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 607 case ISCSI_FLASHNODE_USERNAME: 608 case ISCSI_FLASHNODE_PASSWORD: 609 case ISCSI_FLASHNODE_STATSN: 610 case ISCSI_FLASHNODE_EXP_STATSN: 611 case ISCSI_FLASHNODE_IS_BOOT_TGT: 612 return S_IRUGO; 613 default: 614 return 0; 615 } 616 } 617 618 return 0; 619} 620 621/** 622 * qla4xxx_create chap_list - Create CHAP list from FLASH 623 * @ha: pointer to adapter structure 624 * 625 * Read flash and make a list of CHAP entries, during login when a CHAP entry 626 * is received, it will be checked in this list. If entry exist then the CHAP 627 * entry index is set in the DDB. If CHAP entry does not exist in this list 628 * then a new entry is added in FLASH in CHAP table and the index obtained is 629 * used in the DDB. 630 **/ 631static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) 632{ 633 int rval = 0; 634 uint8_t *chap_flash_data = NULL; 635 uint32_t offset; 636 dma_addr_t chap_dma; 637 uint32_t chap_size = 0; 638 639 if (is_qla40XX(ha)) 640 chap_size = MAX_CHAP_ENTRIES_40XX * 641 sizeof(struct ql4_chap_table); 642 else /* Single region contains CHAP info for both 643 * ports which is divided into half for each port. 644 */ 645 chap_size = ha->hw.flt_chap_size / 2; 646 647 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, 648 &chap_dma, GFP_KERNEL); 649 if (!chap_flash_data) { 650 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); 651 return; 652 } 653 654 if (is_qla40XX(ha)) { 655 offset = FLASH_CHAP_OFFSET; 656 } else { 657 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 658 if (ha->port_num == 1) 659 offset += chap_size; 660 } 661 662 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 663 if (rval != QLA_SUCCESS) 664 goto exit_chap_list; 665 666 if (ha->chap_list == NULL) 667 ha->chap_list = vmalloc(chap_size); 668 if (ha->chap_list == NULL) { 669 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); 670 goto exit_chap_list; 671 } 672 673 memset(ha->chap_list, 0, chap_size); 674 memcpy(ha->chap_list, chap_flash_data, chap_size); 675 676exit_chap_list: 677 dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma); 678} 679 680static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, 681 int16_t chap_index, 682 struct ql4_chap_table **chap_entry) 683{ 684 int rval = QLA_ERROR; 685 int max_chap_entries; 686 687 if (!ha->chap_list) { 688 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 689 rval = QLA_ERROR; 690 goto exit_get_chap; 691 } 692 693 if (is_qla80XX(ha)) 694 max_chap_entries = (ha->hw.flt_chap_size / 2) / 695 sizeof(struct ql4_chap_table); 696 else 697 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 698 699 if (chap_index > max_chap_entries) { 700 ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); 701 rval = QLA_ERROR; 702 goto exit_get_chap; 703 } 704 705 *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; 706 if ((*chap_entry)->cookie != 707 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 708 rval = QLA_ERROR; 709 *chap_entry = NULL; 710 } else { 711 rval = QLA_SUCCESS; 712 } 713 714exit_get_chap: 715 return rval; 716} 717 718/** 719 * qla4xxx_find_free_chap_index - Find the first free chap index 720 * @ha: pointer to adapter structure 721 * @chap_index: CHAP index to be returned 722 * 723 * Find the first free chap index available in the chap table 724 * 725 * Note: Caller should acquire the chap lock before getting here. 726 **/ 727static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, 728 uint16_t *chap_index) 729{ 730 int i, rval; 731 int free_index = -1; 732 int max_chap_entries = 0; 733 struct ql4_chap_table *chap_table; 734 735 if (is_qla80XX(ha)) 736 max_chap_entries = (ha->hw.flt_chap_size / 2) / 737 sizeof(struct ql4_chap_table); 738 else 739 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 740 741 if (!ha->chap_list) { 742 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 743 rval = QLA_ERROR; 744 goto exit_find_chap; 745 } 746 747 for (i = 0; i < max_chap_entries; i++) { 748 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 749 750 if ((chap_table->cookie != 751 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) && 752 (i > MAX_RESRV_CHAP_IDX)) { 753 free_index = i; 754 break; 755 } 756 } 757 758 if (free_index != -1) { 759 *chap_index = free_index; 760 rval = QLA_SUCCESS; 761 } else { 762 rval = QLA_ERROR; 763 } 764 765exit_find_chap: 766 return rval; 767} 768 769static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 770 uint32_t *num_entries, char *buf) 771{ 772 struct scsi_qla_host *ha = to_qla_host(shost); 773 struct ql4_chap_table *chap_table; 774 struct iscsi_chap_rec *chap_rec; 775 int max_chap_entries = 0; 776 int valid_chap_entries = 0; 777 int ret = 0, i; 778 779 if (is_qla80XX(ha)) 780 max_chap_entries = (ha->hw.flt_chap_size / 2) / 781 sizeof(struct ql4_chap_table); 782 else 783 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 784 785 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n", 786 __func__, *num_entries, chap_tbl_idx); 787 788 if (!buf) { 789 ret = -ENOMEM; 790 goto exit_get_chap_list; 791 } 792 793 qla4xxx_create_chap_list(ha); 794 795 chap_rec = (struct iscsi_chap_rec *) buf; 796 mutex_lock(&ha->chap_sem); 797 for (i = chap_tbl_idx; i < max_chap_entries; i++) { 798 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 799 if (chap_table->cookie != 800 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) 801 continue; 802 803 chap_rec->chap_tbl_idx = i; 804 strlcpy(chap_rec->username, chap_table->name, 805 ISCSI_CHAP_AUTH_NAME_MAX_LEN); 806 strlcpy(chap_rec->password, chap_table->secret, 807 QL4_CHAP_MAX_SECRET_LEN); 808 chap_rec->password_length = chap_table->secret_len; 809 810 if (chap_table->flags & BIT_7) /* local */ 811 chap_rec->chap_type = CHAP_TYPE_OUT; 812 813 if (chap_table->flags & BIT_6) /* peer */ 814 chap_rec->chap_type = CHAP_TYPE_IN; 815 816 chap_rec++; 817 818 valid_chap_entries++; 819 if (valid_chap_entries == *num_entries) 820 break; 821 else 822 continue; 823 } 824 mutex_unlock(&ha->chap_sem); 825 826exit_get_chap_list: 827 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n", 828 __func__, valid_chap_entries); 829 *num_entries = valid_chap_entries; 830 return ret; 831} 832 833static int __qla4xxx_is_chap_active(struct device *dev, void *data) 834{ 835 int ret = 0; 836 uint16_t *chap_tbl_idx = (uint16_t *) data; 837 struct iscsi_cls_session *cls_session; 838 struct iscsi_session *sess; 839 struct ddb_entry *ddb_entry; 840 841 if (!iscsi_is_session_dev(dev)) 842 goto exit_is_chap_active; 843 844 cls_session = iscsi_dev_to_session(dev); 845 sess = cls_session->dd_data; 846 ddb_entry = sess->dd_data; 847 848 if (iscsi_session_chkready(cls_session)) 849 goto exit_is_chap_active; 850 851 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx) 852 ret = 1; 853 854exit_is_chap_active: 855 return ret; 856} 857 858static int qla4xxx_is_chap_active(struct Scsi_Host *shost, 859 uint16_t chap_tbl_idx) 860{ 861 int ret = 0; 862 863 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx, 864 __qla4xxx_is_chap_active); 865 866 return ret; 867} 868 869static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx) 870{ 871 struct scsi_qla_host *ha = to_qla_host(shost); 872 struct ql4_chap_table *chap_table; 873 dma_addr_t chap_dma; 874 int max_chap_entries = 0; 875 uint32_t offset = 0; 876 uint32_t chap_size; 877 int ret = 0; 878 879 chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); 880 if (chap_table == NULL) 881 return -ENOMEM; 882 883 if (is_qla80XX(ha)) 884 max_chap_entries = (ha->hw.flt_chap_size / 2) / 885 sizeof(struct ql4_chap_table); 886 else 887 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 888 889 if (chap_tbl_idx > max_chap_entries) { 890 ret = -EINVAL; 891 goto exit_delete_chap; 892 } 893 894 /* Check if chap index is in use. 895 * If chap is in use don't delet chap entry */ 896 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx); 897 if (ret) { 898 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot " 899 "delete from flash\n", chap_tbl_idx); 900 ret = -EBUSY; 901 goto exit_delete_chap; 902 } 903 904 chap_size = sizeof(struct ql4_chap_table); 905 if (is_qla40XX(ha)) 906 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size); 907 else { 908 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 909 /* flt_chap_size is CHAP table size for both ports 910 * so divide it by 2 to calculate the offset for second port 911 */ 912 if (ha->port_num == 1) 913 offset += (ha->hw.flt_chap_size / 2); 914 offset += (chap_tbl_idx * chap_size); 915 } 916 917 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 918 if (ret != QLA_SUCCESS) { 919 ret = -EINVAL; 920 goto exit_delete_chap; 921 } 922 923 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n", 924 __le16_to_cpu(chap_table->cookie))); 925 926 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) { 927 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n"); 928 goto exit_delete_chap; 929 } 930 931 chap_table->cookie = __constant_cpu_to_le16(0xFFFF); 932 933 offset = FLASH_CHAP_OFFSET | 934 (chap_tbl_idx * sizeof(struct ql4_chap_table)); 935 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size, 936 FLASH_OPT_RMW_COMMIT); 937 if (ret == QLA_SUCCESS && ha->chap_list) { 938 mutex_lock(&ha->chap_sem); 939 /* Update ha chap_list cache */ 940 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx, 941 chap_table, sizeof(struct ql4_chap_table)); 942 mutex_unlock(&ha->chap_sem); 943 } 944 if (ret != QLA_SUCCESS) 945 ret = -EINVAL; 946 947exit_delete_chap: 948 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); 949 return ret; 950} 951 952/** 953 * qla4xxx_set_chap_entry - Make chap entry with given information 954 * @shost: pointer to host 955 * @data: chap info - credentials, index and type to make chap entry 956 * @len: length of data 957 * 958 * Add or update chap entry with the given information 959 **/ 960static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) 961{ 962 struct scsi_qla_host *ha = to_qla_host(shost); 963 struct iscsi_chap_rec chap_rec; 964 struct ql4_chap_table *chap_entry = NULL; 965 struct iscsi_param_info *param_info; 966 struct nlattr *attr; 967 int max_chap_entries = 0; 968 int type; 969 int rem = len; 970 int rc = 0; 971 int size; 972 973 memset(&chap_rec, 0, sizeof(chap_rec)); 974 975 nla_for_each_attr(attr, data, len, rem) { 976 if (nla_len(attr) < sizeof(*param_info)) { 977 rc = -EINVAL; 978 goto exit_set_chap; 979 } 980 981 param_info = nla_data(attr); 982 983 switch (param_info->param) { 984 case ISCSI_CHAP_PARAM_INDEX: 985 chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value; 986 break; 987 case ISCSI_CHAP_PARAM_CHAP_TYPE: 988 chap_rec.chap_type = param_info->value[0]; 989 break; 990 case ISCSI_CHAP_PARAM_USERNAME: 991 size = min_t(size_t, sizeof(chap_rec.username), 992 param_info->len); 993 memcpy(chap_rec.username, param_info->value, size); 994 break; 995 case ISCSI_CHAP_PARAM_PASSWORD: 996 size = min_t(size_t, sizeof(chap_rec.password), 997 param_info->len); 998 memcpy(chap_rec.password, param_info->value, size); 999 break; 1000 case ISCSI_CHAP_PARAM_PASSWORD_LEN: 1001 chap_rec.password_length = param_info->value[0]; 1002 break; 1003 default: 1004 ql4_printk(KERN_ERR, ha, 1005 "%s: No such sysfs attribute\n", __func__); 1006 rc = -ENOSYS; 1007 goto exit_set_chap; 1008 } 1009 } 1010 1011 if (chap_rec.chap_type == CHAP_TYPE_IN) 1012 type = BIDI_CHAP; 1013 else 1014 type = LOCAL_CHAP; 1015 1016 if (is_qla80XX(ha)) 1017 max_chap_entries = (ha->hw.flt_chap_size / 2) / 1018 sizeof(struct ql4_chap_table); 1019 else 1020 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 1021 1022 mutex_lock(&ha->chap_sem); 1023 if (chap_rec.chap_tbl_idx < max_chap_entries) { 1024 rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx, 1025 &chap_entry); 1026 if (!rc) { 1027 if (!(type == qla4xxx_get_chap_type(chap_entry))) { 1028 ql4_printk(KERN_INFO, ha, 1029 "Type mismatch for CHAP entry %d\n", 1030 chap_rec.chap_tbl_idx); 1031 rc = -EINVAL; 1032 goto exit_unlock_chap; 1033 } 1034 1035 /* If chap index is in use then don't modify it */ 1036 rc = qla4xxx_is_chap_active(shost, 1037 chap_rec.chap_tbl_idx); 1038 if (rc) { 1039 ql4_printk(KERN_INFO, ha, 1040 "CHAP entry %d is in use\n", 1041 chap_rec.chap_tbl_idx); 1042 rc = -EBUSY; 1043 goto exit_unlock_chap; 1044 } 1045 } 1046 } else { 1047 rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx); 1048 if (rc) { 1049 ql4_printk(KERN_INFO, ha, "CHAP entry not available\n"); 1050 rc = -EBUSY; 1051 goto exit_unlock_chap; 1052 } 1053 } 1054 1055 rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password, 1056 chap_rec.chap_tbl_idx, type); 1057 1058exit_unlock_chap: 1059 mutex_unlock(&ha->chap_sem); 1060 1061exit_set_chap: 1062 return rc; 1063} 1064 1065 1066static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) 1067{ 1068 struct scsi_qla_host *ha = to_qla_host(shost); 1069 struct iscsi_offload_host_stats *host_stats = NULL; 1070 int host_stats_size; 1071 int ret = 0; 1072 int ddb_idx = 0; 1073 struct ql_iscsi_stats *ql_iscsi_stats = NULL; 1074 int stats_size; 1075 dma_addr_t iscsi_stats_dma; 1076 1077 DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__)); 1078 1079 host_stats_size = sizeof(struct iscsi_offload_host_stats); 1080 1081 if (host_stats_size != len) { 1082 ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n", 1083 __func__, len, host_stats_size); 1084 ret = -EINVAL; 1085 goto exit_host_stats; 1086 } 1087 host_stats = (struct iscsi_offload_host_stats *)buf; 1088 1089 if (!buf) { 1090 ret = -ENOMEM; 1091 goto exit_host_stats; 1092 } 1093 1094 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1095 1096 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1097 &iscsi_stats_dma, GFP_KERNEL); 1098 if (!ql_iscsi_stats) { 1099 ql4_printk(KERN_ERR, ha, 1100 "Unable to allocate memory for iscsi stats\n"); 1101 ret = -ENOMEM; 1102 goto exit_host_stats; 1103 } 1104 1105 ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size, 1106 iscsi_stats_dma); 1107 if (ret != QLA_SUCCESS) { 1108 ql4_printk(KERN_ERR, ha, 1109 "Unable to retrieve iscsi stats\n"); 1110 ret = -EIO; 1111 goto exit_host_stats; 1112 } 1113 host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); 1114 host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes); 1115 host_stats->mactx_multicast_frames = 1116 le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames); 1117 host_stats->mactx_broadcast_frames = 1118 le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames); 1119 host_stats->mactx_pause_frames = 1120 le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames); 1121 host_stats->mactx_control_frames = 1122 le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames); 1123 host_stats->mactx_deferral = 1124 le64_to_cpu(ql_iscsi_stats->mac_tx_deferral); 1125 host_stats->mactx_excess_deferral = 1126 le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral); 1127 host_stats->mactx_late_collision = 1128 le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision); 1129 host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort); 1130 host_stats->mactx_single_collision = 1131 le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision); 1132 host_stats->mactx_multiple_collision = 1133 le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision); 1134 host_stats->mactx_collision = 1135 le64_to_cpu(ql_iscsi_stats->mac_tx_collision); 1136 host_stats->mactx_frames_dropped = 1137 le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped); 1138 host_stats->mactx_jumbo_frames = 1139 le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames); 1140 host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames); 1141 host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes); 1142 host_stats->macrx_unknown_control_frames = 1143 le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames); 1144 host_stats->macrx_pause_frames = 1145 le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames); 1146 host_stats->macrx_control_frames = 1147 le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames); 1148 host_stats->macrx_dribble = 1149 le64_to_cpu(ql_iscsi_stats->mac_rx_dribble); 1150 host_stats->macrx_frame_length_error = 1151 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error); 1152 host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber); 1153 host_stats->macrx_carrier_sense_error = 1154 le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error); 1155 host_stats->macrx_frame_discarded = 1156 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded); 1157 host_stats->macrx_frames_dropped = 1158 le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped); 1159 host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error); 1160 host_stats->mac_encoding_error = 1161 le64_to_cpu(ql_iscsi_stats->mac_encoding_error); 1162 host_stats->macrx_length_error_large = 1163 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large); 1164 host_stats->macrx_length_error_small = 1165 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small); 1166 host_stats->macrx_multicast_frames = 1167 le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames); 1168 host_stats->macrx_broadcast_frames = 1169 le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames); 1170 host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets); 1171 host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes); 1172 host_stats->iptx_fragments = 1173 le64_to_cpu(ql_iscsi_stats->ip_tx_fragments); 1174 host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets); 1175 host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes); 1176 host_stats->iprx_fragments = 1177 le64_to_cpu(ql_iscsi_stats->ip_rx_fragments); 1178 host_stats->ip_datagram_reassembly = 1179 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly); 1180 host_stats->ip_invalid_address_error = 1181 le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error); 1182 host_stats->ip_error_packets = 1183 le64_to_cpu(ql_iscsi_stats->ip_error_packets); 1184 host_stats->ip_fragrx_overlap = 1185 le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap); 1186 host_stats->ip_fragrx_outoforder = 1187 le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder); 1188 host_stats->ip_datagram_reassembly_timeout = 1189 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout); 1190 host_stats->ipv6tx_packets = 1191 le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets); 1192 host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes); 1193 host_stats->ipv6tx_fragments = 1194 le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments); 1195 host_stats->ipv6rx_packets = 1196 le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets); 1197 host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes); 1198 host_stats->ipv6rx_fragments = 1199 le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments); 1200 host_stats->ipv6_datagram_reassembly = 1201 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly); 1202 host_stats->ipv6_invalid_address_error = 1203 le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error); 1204 host_stats->ipv6_error_packets = 1205 le64_to_cpu(ql_iscsi_stats->ipv6_error_packets); 1206 host_stats->ipv6_fragrx_overlap = 1207 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap); 1208 host_stats->ipv6_fragrx_outoforder = 1209 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder); 1210 host_stats->ipv6_datagram_reassembly_timeout = 1211 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout); 1212 host_stats->tcptx_segments = 1213 le64_to_cpu(ql_iscsi_stats->tcp_tx_segments); 1214 host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes); 1215 host_stats->tcprx_segments = 1216 le64_to_cpu(ql_iscsi_stats->tcp_rx_segments); 1217 host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte); 1218 host_stats->tcp_duplicate_ack_retx = 1219 le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx); 1220 host_stats->tcp_retx_timer_expired = 1221 le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired); 1222 host_stats->tcprx_duplicate_ack = 1223 le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack); 1224 host_stats->tcprx_pure_ackr = 1225 le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr); 1226 host_stats->tcptx_delayed_ack = 1227 le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack); 1228 host_stats->tcptx_pure_ack = 1229 le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack); 1230 host_stats->tcprx_segment_error = 1231 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error); 1232 host_stats->tcprx_segment_outoforder = 1233 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder); 1234 host_stats->tcprx_window_probe = 1235 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe); 1236 host_stats->tcprx_window_update = 1237 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update); 1238 host_stats->tcptx_window_probe_persist = 1239 le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist); 1240 host_stats->ecc_error_correction = 1241 le64_to_cpu(ql_iscsi_stats->ecc_error_correction); 1242 host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx); 1243 host_stats->iscsi_data_bytes_tx = 1244 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx); 1245 host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx); 1246 host_stats->iscsi_data_bytes_rx = 1247 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx); 1248 host_stats->iscsi_io_completed = 1249 le64_to_cpu(ql_iscsi_stats->iscsi_io_completed); 1250 host_stats->iscsi_unexpected_io_rx = 1251 le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx); 1252 host_stats->iscsi_format_error = 1253 le64_to_cpu(ql_iscsi_stats->iscsi_format_error); 1254 host_stats->iscsi_hdr_digest_error = 1255 le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error); 1256 host_stats->iscsi_data_digest_error = 1257 le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error); 1258 host_stats->iscsi_sequence_error = 1259 le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); 1260exit_host_stats: 1261 if (ql_iscsi_stats) 1262 dma_free_coherent(&ha->pdev->dev, stats_size, 1263 ql_iscsi_stats, iscsi_stats_dma); 1264 1265 ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", 1266 __func__); 1267 return ret; 1268} 1269 1270static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 1271 enum iscsi_param_type param_type, 1272 int param, char *buf) 1273{ 1274 struct Scsi_Host *shost = iscsi_iface_to_shost(iface); 1275 struct scsi_qla_host *ha = to_qla_host(shost); 1276 int ival; 1277 char *pval = NULL; 1278 int len = -ENOSYS; 1279 1280 if (param_type == ISCSI_NET_PARAM) { 1281 switch (param) { 1282 case ISCSI_NET_PARAM_IPV4_ADDR: 1283 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1284 break; 1285 case ISCSI_NET_PARAM_IPV4_SUBNET: 1286 len = sprintf(buf, "%pI4\n", 1287 &ha->ip_config.subnet_mask); 1288 break; 1289 case ISCSI_NET_PARAM_IPV4_GW: 1290 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); 1291 break; 1292 case ISCSI_NET_PARAM_IFACE_ENABLE: 1293 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1294 OP_STATE(ha->ip_config.ipv4_options, 1295 IPOPT_IPV4_PROTOCOL_ENABLE, pval); 1296 } else { 1297 OP_STATE(ha->ip_config.ipv6_options, 1298 IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval); 1299 } 1300 1301 len = sprintf(buf, "%s\n", pval); 1302 break; 1303 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 1304 len = sprintf(buf, "%s\n", 1305 (ha->ip_config.tcp_options & 1306 TCPOPT_DHCP_ENABLE) ? 1307 "dhcp" : "static"); 1308 break; 1309 case ISCSI_NET_PARAM_IPV6_ADDR: 1310 if (iface->iface_num == 0) 1311 len = sprintf(buf, "%pI6\n", 1312 &ha->ip_config.ipv6_addr0); 1313 if (iface->iface_num == 1) 1314 len = sprintf(buf, "%pI6\n", 1315 &ha->ip_config.ipv6_addr1); 1316 break; 1317 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 1318 len = sprintf(buf, "%pI6\n", 1319 &ha->ip_config.ipv6_link_local_addr); 1320 break; 1321 case ISCSI_NET_PARAM_IPV6_ROUTER: 1322 len = sprintf(buf, "%pI6\n", 1323 &ha->ip_config.ipv6_default_router_addr); 1324 break; 1325 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 1326 pval = (ha->ip_config.ipv6_addl_options & 1327 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? 1328 "nd" : "static"; 1329 1330 len = sprintf(buf, "%s\n", pval); 1331 break; 1332 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 1333 pval = (ha->ip_config.ipv6_addl_options & 1334 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? 1335 "auto" : "static"; 1336 1337 len = sprintf(buf, "%s\n", pval); 1338 break; 1339 case ISCSI_NET_PARAM_VLAN_ID: 1340 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1341 ival = ha->ip_config.ipv4_vlan_tag & 1342 ISCSI_MAX_VLAN_ID; 1343 else 1344 ival = ha->ip_config.ipv6_vlan_tag & 1345 ISCSI_MAX_VLAN_ID; 1346 1347 len = sprintf(buf, "%d\n", ival); 1348 break; 1349 case ISCSI_NET_PARAM_VLAN_PRIORITY: 1350 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1351 ival = (ha->ip_config.ipv4_vlan_tag >> 13) & 1352 ISCSI_MAX_VLAN_PRIORITY; 1353 else 1354 ival = (ha->ip_config.ipv6_vlan_tag >> 13) & 1355 ISCSI_MAX_VLAN_PRIORITY; 1356 1357 len = sprintf(buf, "%d\n", ival); 1358 break; 1359 case ISCSI_NET_PARAM_VLAN_ENABLED: 1360 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1361 OP_STATE(ha->ip_config.ipv4_options, 1362 IPOPT_VLAN_TAGGING_ENABLE, pval); 1363 } else { 1364 OP_STATE(ha->ip_config.ipv6_options, 1365 IPV6_OPT_VLAN_TAGGING_ENABLE, pval); 1366 } 1367 len = sprintf(buf, "%s\n", pval); 1368 break; 1369 case ISCSI_NET_PARAM_MTU: 1370 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); 1371 break; 1372 case ISCSI_NET_PARAM_PORT: 1373 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1374 len = sprintf(buf, "%d\n", 1375 ha->ip_config.ipv4_port); 1376 else 1377 len = sprintf(buf, "%d\n", 1378 ha->ip_config.ipv6_port); 1379 break; 1380 case ISCSI_NET_PARAM_IPADDR_STATE: 1381 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1382 pval = iscsi_get_ipaddress_state_name( 1383 ha->ip_config.ipv4_addr_state); 1384 } else { 1385 if (iface->iface_num == 0) 1386 pval = iscsi_get_ipaddress_state_name( 1387 ha->ip_config.ipv6_addr0_state); 1388 else if (iface->iface_num == 1) 1389 pval = iscsi_get_ipaddress_state_name( 1390 ha->ip_config.ipv6_addr1_state); 1391 } 1392 1393 len = sprintf(buf, "%s\n", pval); 1394 break; 1395 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 1396 pval = iscsi_get_ipaddress_state_name( 1397 ha->ip_config.ipv6_link_local_state); 1398 len = sprintf(buf, "%s\n", pval); 1399 break; 1400 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 1401 pval = iscsi_get_router_state_name( 1402 ha->ip_config.ipv6_default_router_state); 1403 len = sprintf(buf, "%s\n", pval); 1404 break; 1405 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 1406 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1407 OP_STATE(~ha->ip_config.tcp_options, 1408 TCPOPT_DELAYED_ACK_DISABLE, pval); 1409 } else { 1410 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1411 IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval); 1412 } 1413 len = sprintf(buf, "%s\n", pval); 1414 break; 1415 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 1416 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1417 OP_STATE(~ha->ip_config.tcp_options, 1418 TCPOPT_NAGLE_ALGO_DISABLE, pval); 1419 } else { 1420 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1421 IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval); 1422 } 1423 len = sprintf(buf, "%s\n", pval); 1424 break; 1425 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 1426 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1427 OP_STATE(~ha->ip_config.tcp_options, 1428 TCPOPT_WINDOW_SCALE_DISABLE, pval); 1429 } else { 1430 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1431 IPV6_TCPOPT_WINDOW_SCALE_DISABLE, 1432 pval); 1433 } 1434 len = sprintf(buf, "%s\n", pval); 1435 break; 1436 case ISCSI_NET_PARAM_TCP_WSF: 1437 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1438 len = sprintf(buf, "%d\n", 1439 ha->ip_config.tcp_wsf); 1440 else 1441 len = sprintf(buf, "%d\n", 1442 ha->ip_config.ipv6_tcp_wsf); 1443 break; 1444 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 1445 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1446 ival = (ha->ip_config.tcp_options & 1447 TCPOPT_TIMER_SCALE) >> 1; 1448 else 1449 ival = (ha->ip_config.ipv6_tcp_options & 1450 IPV6_TCPOPT_TIMER_SCALE) >> 1; 1451 1452 len = sprintf(buf, "%d\n", ival); 1453 break; 1454 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 1455 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1456 OP_STATE(ha->ip_config.tcp_options, 1457 TCPOPT_TIMESTAMP_ENABLE, pval); 1458 } else { 1459 OP_STATE(ha->ip_config.ipv6_tcp_options, 1460 IPV6_TCPOPT_TIMESTAMP_EN, pval); 1461 } 1462 len = sprintf(buf, "%s\n", pval); 1463 break; 1464 case ISCSI_NET_PARAM_CACHE_ID: 1465 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1466 len = sprintf(buf, "%d\n", 1467 ha->ip_config.ipv4_cache_id); 1468 else 1469 len = sprintf(buf, "%d\n", 1470 ha->ip_config.ipv6_cache_id); 1471 break; 1472 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 1473 OP_STATE(ha->ip_config.tcp_options, 1474 TCPOPT_DNS_SERVER_IP_EN, pval); 1475 1476 len = sprintf(buf, "%s\n", pval); 1477 break; 1478 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 1479 OP_STATE(ha->ip_config.tcp_options, 1480 TCPOPT_SLP_DA_INFO_EN, pval); 1481 1482 len = sprintf(buf, "%s\n", pval); 1483 break; 1484 case ISCSI_NET_PARAM_IPV4_TOS_EN: 1485 OP_STATE(ha->ip_config.ipv4_options, 1486 IPOPT_IPV4_TOS_EN, pval); 1487 1488 len = sprintf(buf, "%s\n", pval); 1489 break; 1490 case ISCSI_NET_PARAM_IPV4_TOS: 1491 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos); 1492 break; 1493 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 1494 OP_STATE(ha->ip_config.ipv4_options, 1495 IPOPT_GRAT_ARP_EN, pval); 1496 1497 len = sprintf(buf, "%s\n", pval); 1498 break; 1499 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 1500 OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN, 1501 pval); 1502 1503 len = sprintf(buf, "%s\n", pval); 1504 break; 1505 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 1506 pval = (ha->ip_config.ipv4_alt_cid_len) ? 1507 (char *)ha->ip_config.ipv4_alt_cid : ""; 1508 1509 len = sprintf(buf, "%s\n", pval); 1510 break; 1511 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 1512 OP_STATE(ha->ip_config.ipv4_options, 1513 IPOPT_REQ_VID_EN, pval); 1514 1515 len = sprintf(buf, "%s\n", pval); 1516 break; 1517 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 1518 OP_STATE(ha->ip_config.ipv4_options, 1519 IPOPT_USE_VID_EN, pval); 1520 1521 len = sprintf(buf, "%s\n", pval); 1522 break; 1523 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 1524 pval = (ha->ip_config.ipv4_vid_len) ? 1525 (char *)ha->ip_config.ipv4_vid : ""; 1526 1527 len = sprintf(buf, "%s\n", pval); 1528 break; 1529 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 1530 OP_STATE(ha->ip_config.ipv4_options, 1531 IPOPT_LEARN_IQN_EN, pval); 1532 1533 len = sprintf(buf, "%s\n", pval); 1534 break; 1535 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 1536 OP_STATE(~ha->ip_config.ipv4_options, 1537 IPOPT_FRAGMENTATION_DISABLE, pval); 1538 1539 len = sprintf(buf, "%s\n", pval); 1540 break; 1541 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 1542 OP_STATE(ha->ip_config.ipv4_options, 1543 IPOPT_IN_FORWARD_EN, pval); 1544 1545 len = sprintf(buf, "%s\n", pval); 1546 break; 1547 case ISCSI_NET_PARAM_REDIRECT_EN: 1548 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1549 OP_STATE(ha->ip_config.ipv4_options, 1550 IPOPT_ARP_REDIRECT_EN, pval); 1551 } else { 1552 OP_STATE(ha->ip_config.ipv6_options, 1553 IPV6_OPT_REDIRECT_EN, pval); 1554 } 1555 len = sprintf(buf, "%s\n", pval); 1556 break; 1557 case ISCSI_NET_PARAM_IPV4_TTL: 1558 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl); 1559 break; 1560 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 1561 OP_STATE(ha->ip_config.ipv6_options, 1562 IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval); 1563 1564 len = sprintf(buf, "%s\n", pval); 1565 break; 1566 case ISCSI_NET_PARAM_IPV6_MLD_EN: 1567 OP_STATE(ha->ip_config.ipv6_addl_options, 1568 IPV6_ADDOPT_MLD_EN, pval); 1569 1570 len = sprintf(buf, "%s\n", pval); 1571 break; 1572 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 1573 len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl); 1574 break; 1575 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 1576 len = sprintf(buf, "%d\n", 1577 ha->ip_config.ipv6_traffic_class); 1578 break; 1579 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 1580 len = sprintf(buf, "%d\n", 1581 ha->ip_config.ipv6_hop_limit); 1582 break; 1583 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 1584 len = sprintf(buf, "%d\n", 1585 ha->ip_config.ipv6_nd_reach_time); 1586 break; 1587 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 1588 len = sprintf(buf, "%d\n", 1589 ha->ip_config.ipv6_nd_rexmit_timer); 1590 break; 1591 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 1592 len = sprintf(buf, "%d\n", 1593 ha->ip_config.ipv6_nd_stale_timeout); 1594 break; 1595 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 1596 len = sprintf(buf, "%d\n", 1597 ha->ip_config.ipv6_dup_addr_detect_count); 1598 break; 1599 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 1600 len = sprintf(buf, "%d\n", 1601 ha->ip_config.ipv6_gw_advrt_mtu); 1602 break; 1603 default: 1604 len = -ENOSYS; 1605 } 1606 } else if (param_type == ISCSI_IFACE_PARAM) { 1607 switch (param) { 1608 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 1609 len = sprintf(buf, "%d\n", ha->ip_config.def_timeout); 1610 break; 1611 case ISCSI_IFACE_PARAM_HDRDGST_EN: 1612 OP_STATE(ha->ip_config.iscsi_options, 1613 ISCSIOPTS_HEADER_DIGEST_EN, pval); 1614 1615 len = sprintf(buf, "%s\n", pval); 1616 break; 1617 case ISCSI_IFACE_PARAM_DATADGST_EN: 1618 OP_STATE(ha->ip_config.iscsi_options, 1619 ISCSIOPTS_DATA_DIGEST_EN, pval); 1620 1621 len = sprintf(buf, "%s\n", pval); 1622 break; 1623 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 1624 OP_STATE(ha->ip_config.iscsi_options, 1625 ISCSIOPTS_IMMEDIATE_DATA_EN, pval); 1626 1627 len = sprintf(buf, "%s\n", pval); 1628 break; 1629 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 1630 OP_STATE(ha->ip_config.iscsi_options, 1631 ISCSIOPTS_INITIAL_R2T_EN, pval); 1632 1633 len = sprintf(buf, "%s\n", pval); 1634 break; 1635 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 1636 OP_STATE(ha->ip_config.iscsi_options, 1637 ISCSIOPTS_DATA_SEQ_INORDER_EN, pval); 1638 1639 len = sprintf(buf, "%s\n", pval); 1640 break; 1641 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 1642 OP_STATE(ha->ip_config.iscsi_options, 1643 ISCSIOPTS_DATA_PDU_INORDER_EN, pval); 1644 1645 len = sprintf(buf, "%s\n", pval); 1646 break; 1647 case ISCSI_IFACE_PARAM_ERL: 1648 len = sprintf(buf, "%d\n", 1649 (ha->ip_config.iscsi_options & 1650 ISCSIOPTS_ERL)); 1651 break; 1652 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 1653 len = sprintf(buf, "%u\n", 1654 ha->ip_config.iscsi_max_pdu_size * 1655 BYTE_UNITS); 1656 break; 1657 case ISCSI_IFACE_PARAM_FIRST_BURST: 1658 len = sprintf(buf, "%u\n", 1659 ha->ip_config.iscsi_first_burst_len * 1660 BYTE_UNITS); 1661 break; 1662 case ISCSI_IFACE_PARAM_MAX_R2T: 1663 len = sprintf(buf, "%d\n", 1664 ha->ip_config.iscsi_max_outstnd_r2t); 1665 break; 1666 case ISCSI_IFACE_PARAM_MAX_BURST: 1667 len = sprintf(buf, "%u\n", 1668 ha->ip_config.iscsi_max_burst_len * 1669 BYTE_UNITS); 1670 break; 1671 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 1672 OP_STATE(ha->ip_config.iscsi_options, 1673 ISCSIOPTS_CHAP_AUTH_EN, pval); 1674 1675 len = sprintf(buf, "%s\n", pval); 1676 break; 1677 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 1678 OP_STATE(ha->ip_config.iscsi_options, 1679 ISCSIOPTS_BIDI_CHAP_EN, pval); 1680 1681 len = sprintf(buf, "%s\n", pval); 1682 break; 1683 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 1684 OP_STATE(ha->ip_config.iscsi_options, 1685 ISCSIOPTS_DISCOVERY_AUTH_EN, pval); 1686 1687 len = sprintf(buf, "%s\n", pval); 1688 break; 1689 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 1690 OP_STATE(ha->ip_config.iscsi_options, 1691 ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval); 1692 1693 len = sprintf(buf, "%s\n", pval); 1694 break; 1695 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 1696 OP_STATE(ha->ip_config.iscsi_options, 1697 ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval); 1698 1699 len = sprintf(buf, "%s\n", pval); 1700 break; 1701 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 1702 len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name); 1703 break; 1704 default: 1705 len = -ENOSYS; 1706 } 1707 } 1708 1709 return len; 1710} 1711 1712static struct iscsi_endpoint * 1713qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, 1714 int non_blocking) 1715{ 1716 int ret; 1717 struct iscsi_endpoint *ep; 1718 struct qla_endpoint *qla_ep; 1719 struct scsi_qla_host *ha; 1720 struct sockaddr_in *addr; 1721 struct sockaddr_in6 *addr6; 1722 1723 if (!shost) { 1724 ret = -ENXIO; 1725 pr_err("%s: shost is NULL\n", __func__); 1726 return ERR_PTR(ret); 1727 } 1728 1729 ha = iscsi_host_priv(shost); 1730 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint)); 1731 if (!ep) { 1732 ret = -ENOMEM; 1733 return ERR_PTR(ret); 1734 } 1735 1736 qla_ep = ep->dd_data; 1737 memset(qla_ep, 0, sizeof(struct qla_endpoint)); 1738 if (dst_addr->sa_family == AF_INET) { 1739 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in)); 1740 addr = (struct sockaddr_in *)&qla_ep->dst_addr; 1741 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__, 1742 (char *)&addr->sin_addr)); 1743 } else if (dst_addr->sa_family == AF_INET6) { 1744 memcpy(&qla_ep->dst_addr, dst_addr, 1745 sizeof(struct sockaddr_in6)); 1746 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr; 1747 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__, 1748 (char *)&addr6->sin6_addr)); 1749 } else { 1750 ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n", 1751 __func__); 1752 } 1753 1754 qla_ep->host = shost; 1755 1756 return ep; 1757} 1758 1759static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 1760{ 1761 struct qla_endpoint *qla_ep; 1762 struct scsi_qla_host *ha; 1763 int ret = 0; 1764 1765 qla_ep = ep->dd_data; 1766 ha = to_qla_host(qla_ep->host); 1767 DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no)); 1768 1769 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags)) 1770 ret = 1; 1771 1772 return ret; 1773} 1774 1775static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep) 1776{ 1777 struct qla_endpoint *qla_ep; 1778 struct scsi_qla_host *ha; 1779 1780 qla_ep = ep->dd_data; 1781 ha = to_qla_host(qla_ep->host); 1782 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1783 ha->host_no)); 1784 iscsi_destroy_endpoint(ep); 1785} 1786 1787static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 1788 enum iscsi_param param, 1789 char *buf) 1790{ 1791 struct qla_endpoint *qla_ep = ep->dd_data; 1792 struct sockaddr *dst_addr; 1793 struct scsi_qla_host *ha; 1794 1795 if (!qla_ep) 1796 return -ENOTCONN; 1797 1798 ha = to_qla_host(qla_ep->host); 1799 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1800 ha->host_no)); 1801 1802 switch (param) { 1803 case ISCSI_PARAM_CONN_PORT: 1804 case ISCSI_PARAM_CONN_ADDRESS: 1805 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 1806 if (!dst_addr) 1807 return -ENOTCONN; 1808 1809 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 1810 &qla_ep->dst_addr, param, buf); 1811 default: 1812 return -ENOSYS; 1813 } 1814} 1815 1816static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 1817 struct iscsi_stats *stats) 1818{ 1819 struct iscsi_session *sess; 1820 struct iscsi_cls_session *cls_sess; 1821 struct ddb_entry *ddb_entry; 1822 struct scsi_qla_host *ha; 1823 struct ql_iscsi_stats *ql_iscsi_stats; 1824 int stats_size; 1825 int ret; 1826 dma_addr_t iscsi_stats_dma; 1827 1828 cls_sess = iscsi_conn_to_session(cls_conn); 1829 sess = cls_sess->dd_data; 1830 ddb_entry = sess->dd_data; 1831 ha = ddb_entry->ha; 1832 1833 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1834 ha->host_no)); 1835 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1836 /* Allocate memory */ 1837 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1838 &iscsi_stats_dma, GFP_KERNEL); 1839 if (!ql_iscsi_stats) { 1840 ql4_printk(KERN_ERR, ha, 1841 "Unable to allocate memory for iscsi stats\n"); 1842 goto exit_get_stats; 1843 } 1844 1845 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size, 1846 iscsi_stats_dma); 1847 if (ret != QLA_SUCCESS) { 1848 ql4_printk(KERN_ERR, ha, 1849 "Unable to retrieve iscsi stats\n"); 1850 goto free_stats; 1851 } 1852 1853 /* octets */ 1854 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets); 1855 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets); 1856 /* xmit pdus */ 1857 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus); 1858 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus); 1859 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus); 1860 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus); 1861 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus); 1862 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus); 1863 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus); 1864 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus); 1865 /* recv pdus */ 1866 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus); 1867 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus); 1868 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus); 1869 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus); 1870 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus); 1871 stats->logoutrsp_pdus = 1872 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus); 1873 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus); 1874 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus); 1875 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus); 1876 1877free_stats: 1878 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats, 1879 iscsi_stats_dma); 1880exit_get_stats: 1881 return; 1882} 1883 1884static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) 1885{ 1886 struct iscsi_cls_session *session; 1887 unsigned long flags; 1888 enum blk_eh_timer_return ret = BLK_EH_DONE; 1889 1890 session = starget_to_session(scsi_target(sc->device)); 1891 1892 spin_lock_irqsave(&session->lock, flags); 1893 if (session->state == ISCSI_SESSION_FAILED) 1894 ret = BLK_EH_RESET_TIMER; 1895 spin_unlock_irqrestore(&session->lock, flags); 1896 1897 return ret; 1898} 1899 1900static void qla4xxx_set_port_speed(struct Scsi_Host *shost) 1901{ 1902 struct scsi_qla_host *ha = to_qla_host(shost); 1903 struct iscsi_cls_host *ihost = shost->shost_data; 1904 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; 1905 1906 qla4xxx_get_firmware_state(ha); 1907 1908 switch (ha->addl_fw_state & 0x0F00) { 1909 case FW_ADDSTATE_LINK_SPEED_10MBPS: 1910 speed = ISCSI_PORT_SPEED_10MBPS; 1911 break; 1912 case FW_ADDSTATE_LINK_SPEED_100MBPS: 1913 speed = ISCSI_PORT_SPEED_100MBPS; 1914 break; 1915 case FW_ADDSTATE_LINK_SPEED_1GBPS: 1916 speed = ISCSI_PORT_SPEED_1GBPS; 1917 break; 1918 case FW_ADDSTATE_LINK_SPEED_10GBPS: 1919 speed = ISCSI_PORT_SPEED_10GBPS; 1920 break; 1921 } 1922 ihost->port_speed = speed; 1923} 1924 1925static void qla4xxx_set_port_state(struct Scsi_Host *shost) 1926{ 1927 struct scsi_qla_host *ha = to_qla_host(shost); 1928 struct iscsi_cls_host *ihost = shost->shost_data; 1929 uint32_t state = ISCSI_PORT_STATE_DOWN; 1930 1931 if (test_bit(AF_LINK_UP, &ha->flags)) 1932 state = ISCSI_PORT_STATE_UP; 1933 1934 ihost->port_state = state; 1935} 1936 1937static int qla4xxx_host_get_param(struct Scsi_Host *shost, 1938 enum iscsi_host_param param, char *buf) 1939{ 1940 struct scsi_qla_host *ha = to_qla_host(shost); 1941 int len; 1942 1943 switch (param) { 1944 case ISCSI_HOST_PARAM_HWADDRESS: 1945 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN); 1946 break; 1947 case ISCSI_HOST_PARAM_IPADDRESS: 1948 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1949 break; 1950 case ISCSI_HOST_PARAM_INITIATOR_NAME: 1951 len = sprintf(buf, "%s\n", ha->name_string); 1952 break; 1953 case ISCSI_HOST_PARAM_PORT_STATE: 1954 qla4xxx_set_port_state(shost); 1955 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost)); 1956 break; 1957 case ISCSI_HOST_PARAM_PORT_SPEED: 1958 qla4xxx_set_port_speed(shost); 1959 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); 1960 break; 1961 default: 1962 return -ENOSYS; 1963 } 1964 1965 return len; 1966} 1967 1968static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha) 1969{ 1970 if (ha->iface_ipv4) 1971 return; 1972 1973 /* IPv4 */ 1974 ha->iface_ipv4 = iscsi_create_iface(ha->host, 1975 &qla4xxx_iscsi_transport, 1976 ISCSI_IFACE_TYPE_IPV4, 0, 0); 1977 if (!ha->iface_ipv4) 1978 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI " 1979 "iface0.\n"); 1980} 1981 1982static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha) 1983{ 1984 if (!ha->iface_ipv6_0) 1985 /* IPv6 iface-0 */ 1986 ha->iface_ipv6_0 = iscsi_create_iface(ha->host, 1987 &qla4xxx_iscsi_transport, 1988 ISCSI_IFACE_TYPE_IPV6, 0, 1989 0); 1990 if (!ha->iface_ipv6_0) 1991 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1992 "iface0.\n"); 1993 1994 if (!ha->iface_ipv6_1) 1995 /* IPv6 iface-1 */ 1996 ha->iface_ipv6_1 = iscsi_create_iface(ha->host, 1997 &qla4xxx_iscsi_transport, 1998 ISCSI_IFACE_TYPE_IPV6, 1, 1999 0); 2000 if (!ha->iface_ipv6_1) 2001 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 2002 "iface1.\n"); 2003} 2004 2005static void qla4xxx_create_ifaces(struct scsi_qla_host *ha) 2006{ 2007 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) 2008 qla4xxx_create_ipv4_iface(ha); 2009 2010 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) 2011 qla4xxx_create_ipv6_iface(ha); 2012} 2013 2014static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha) 2015{ 2016 if (ha->iface_ipv4) { 2017 iscsi_destroy_iface(ha->iface_ipv4); 2018 ha->iface_ipv4 = NULL; 2019 } 2020} 2021 2022static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha) 2023{ 2024 if (ha->iface_ipv6_0) { 2025 iscsi_destroy_iface(ha->iface_ipv6_0); 2026 ha->iface_ipv6_0 = NULL; 2027 } 2028 if (ha->iface_ipv6_1) { 2029 iscsi_destroy_iface(ha->iface_ipv6_1); 2030 ha->iface_ipv6_1 = NULL; 2031 } 2032} 2033 2034static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha) 2035{ 2036 qla4xxx_destroy_ipv4_iface(ha); 2037 qla4xxx_destroy_ipv6_iface(ha); 2038} 2039 2040static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, 2041 struct iscsi_iface_param_info *iface_param, 2042 struct addr_ctrl_blk *init_fw_cb) 2043{ 2044 /* 2045 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg. 2046 * iface_num 1 is valid only for IPv6 Addr. 2047 */ 2048 switch (iface_param->param) { 2049 case ISCSI_NET_PARAM_IPV6_ADDR: 2050 if (iface_param->iface_num & 0x1) 2051 /* IPv6 Addr 1 */ 2052 memcpy(init_fw_cb->ipv6_addr1, iface_param->value, 2053 sizeof(init_fw_cb->ipv6_addr1)); 2054 else 2055 /* IPv6 Addr 0 */ 2056 memcpy(init_fw_cb->ipv6_addr0, iface_param->value, 2057 sizeof(init_fw_cb->ipv6_addr0)); 2058 break; 2059 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 2060 if (iface_param->iface_num & 0x1) 2061 break; 2062 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8], 2063 sizeof(init_fw_cb->ipv6_if_id)); 2064 break; 2065 case ISCSI_NET_PARAM_IPV6_ROUTER: 2066 if (iface_param->iface_num & 0x1) 2067 break; 2068 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value, 2069 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2070 break; 2071 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 2072 /* Autocfg applies to even interface */ 2073 if (iface_param->iface_num & 0x1) 2074 break; 2075 2076 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE) 2077 init_fw_cb->ipv6_addtl_opts &= 2078 cpu_to_le16( 2079 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2080 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE) 2081 init_fw_cb->ipv6_addtl_opts |= 2082 cpu_to_le16( 2083 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2084 else 2085 ql4_printk(KERN_ERR, ha, 2086 "Invalid autocfg setting for IPv6 addr\n"); 2087 break; 2088 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 2089 /* Autocfg applies to even interface */ 2090 if (iface_param->iface_num & 0x1) 2091 break; 2092 2093 if (iface_param->value[0] == 2094 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE) 2095 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16( 2096 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2097 else if (iface_param->value[0] == 2098 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE) 2099 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( 2100 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2101 else 2102 ql4_printk(KERN_ERR, ha, 2103 "Invalid autocfg setting for IPv6 linklocal addr\n"); 2104 break; 2105 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG: 2106 /* Autocfg applies to even interface */ 2107 if (iface_param->iface_num & 0x1) 2108 break; 2109 2110 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE) 2111 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0, 2112 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2113 break; 2114 case ISCSI_NET_PARAM_IFACE_ENABLE: 2115 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2116 init_fw_cb->ipv6_opts |= 2117 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE); 2118 qla4xxx_create_ipv6_iface(ha); 2119 } else { 2120 init_fw_cb->ipv6_opts &= 2121 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE & 2122 0xFFFF); 2123 qla4xxx_destroy_ipv6_iface(ha); 2124 } 2125 break; 2126 case ISCSI_NET_PARAM_VLAN_TAG: 2127 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag)) 2128 break; 2129 init_fw_cb->ipv6_vlan_tag = 2130 cpu_to_be16(*(uint16_t *)iface_param->value); 2131 break; 2132 case ISCSI_NET_PARAM_VLAN_ENABLED: 2133 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2134 init_fw_cb->ipv6_opts |= 2135 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE); 2136 else 2137 init_fw_cb->ipv6_opts &= 2138 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE); 2139 break; 2140 case ISCSI_NET_PARAM_MTU: 2141 init_fw_cb->eth_mtu_size = 2142 cpu_to_le16(*(uint16_t *)iface_param->value); 2143 break; 2144 case ISCSI_NET_PARAM_PORT: 2145 /* Autocfg applies to even interface */ 2146 if (iface_param->iface_num & 0x1) 2147 break; 2148 2149 init_fw_cb->ipv6_port = 2150 cpu_to_le16(*(uint16_t *)iface_param->value); 2151 break; 2152 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2153 if (iface_param->iface_num & 0x1) 2154 break; 2155 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2156 init_fw_cb->ipv6_tcp_opts |= 2157 cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE); 2158 else 2159 init_fw_cb->ipv6_tcp_opts &= 2160 cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE & 2161 0xFFFF); 2162 break; 2163 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2164 if (iface_param->iface_num & 0x1) 2165 break; 2166 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2167 init_fw_cb->ipv6_tcp_opts |= 2168 cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2169 else 2170 init_fw_cb->ipv6_tcp_opts &= 2171 cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2172 break; 2173 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2174 if (iface_param->iface_num & 0x1) 2175 break; 2176 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2177 init_fw_cb->ipv6_tcp_opts |= 2178 cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2179 else 2180 init_fw_cb->ipv6_tcp_opts &= 2181 cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2182 break; 2183 case ISCSI_NET_PARAM_TCP_WSF: 2184 if (iface_param->iface_num & 0x1) 2185 break; 2186 init_fw_cb->ipv6_tcp_wsf = iface_param->value[0]; 2187 break; 2188 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2189 if (iface_param->iface_num & 0x1) 2190 break; 2191 init_fw_cb->ipv6_tcp_opts &= 2192 cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE); 2193 init_fw_cb->ipv6_tcp_opts |= 2194 cpu_to_le16((iface_param->value[0] << 1) & 2195 IPV6_TCPOPT_TIMER_SCALE); 2196 break; 2197 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2198 if (iface_param->iface_num & 0x1) 2199 break; 2200 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2201 init_fw_cb->ipv6_tcp_opts |= 2202 cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN); 2203 else 2204 init_fw_cb->ipv6_tcp_opts &= 2205 cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN); 2206 break; 2207 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 2208 if (iface_param->iface_num & 0x1) 2209 break; 2210 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2211 init_fw_cb->ipv6_opts |= 2212 cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2213 else 2214 init_fw_cb->ipv6_opts &= 2215 cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2216 break; 2217 case ISCSI_NET_PARAM_REDIRECT_EN: 2218 if (iface_param->iface_num & 0x1) 2219 break; 2220 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2221 init_fw_cb->ipv6_opts |= 2222 cpu_to_le16(IPV6_OPT_REDIRECT_EN); 2223 else 2224 init_fw_cb->ipv6_opts &= 2225 cpu_to_le16(~IPV6_OPT_REDIRECT_EN); 2226 break; 2227 case ISCSI_NET_PARAM_IPV6_MLD_EN: 2228 if (iface_param->iface_num & 0x1) 2229 break; 2230 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2231 init_fw_cb->ipv6_addtl_opts |= 2232 cpu_to_le16(IPV6_ADDOPT_MLD_EN); 2233 else 2234 init_fw_cb->ipv6_addtl_opts &= 2235 cpu_to_le16(~IPV6_ADDOPT_MLD_EN); 2236 break; 2237 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 2238 if (iface_param->iface_num & 0x1) 2239 break; 2240 init_fw_cb->ipv6_flow_lbl = 2241 cpu_to_le16(*(uint16_t *)iface_param->value); 2242 break; 2243 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 2244 if (iface_param->iface_num & 0x1) 2245 break; 2246 init_fw_cb->ipv6_traffic_class = iface_param->value[0]; 2247 break; 2248 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 2249 if (iface_param->iface_num & 0x1) 2250 break; 2251 init_fw_cb->ipv6_hop_limit = iface_param->value[0]; 2252 break; 2253 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 2254 if (iface_param->iface_num & 0x1) 2255 break; 2256 init_fw_cb->ipv6_nd_reach_time = 2257 cpu_to_le32(*(uint32_t *)iface_param->value); 2258 break; 2259 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 2260 if (iface_param->iface_num & 0x1) 2261 break; 2262 init_fw_cb->ipv6_nd_rexmit_timer = 2263 cpu_to_le32(*(uint32_t *)iface_param->value); 2264 break; 2265 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 2266 if (iface_param->iface_num & 0x1) 2267 break; 2268 init_fw_cb->ipv6_nd_stale_timeout = 2269 cpu_to_le32(*(uint32_t *)iface_param->value); 2270 break; 2271 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 2272 if (iface_param->iface_num & 0x1) 2273 break; 2274 init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0]; 2275 break; 2276 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 2277 if (iface_param->iface_num & 0x1) 2278 break; 2279 init_fw_cb->ipv6_gw_advrt_mtu = 2280 cpu_to_le32(*(uint32_t *)iface_param->value); 2281 break; 2282 default: 2283 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n", 2284 iface_param->param); 2285 break; 2286 } 2287} 2288 2289static void qla4xxx_set_ipv4(struct scsi_qla_host *ha, 2290 struct iscsi_iface_param_info *iface_param, 2291 struct addr_ctrl_blk *init_fw_cb) 2292{ 2293 switch (iface_param->param) { 2294 case ISCSI_NET_PARAM_IPV4_ADDR: 2295 memcpy(init_fw_cb->ipv4_addr, iface_param->value, 2296 sizeof(init_fw_cb->ipv4_addr)); 2297 break; 2298 case ISCSI_NET_PARAM_IPV4_SUBNET: 2299 memcpy(init_fw_cb->ipv4_subnet, iface_param->value, 2300 sizeof(init_fw_cb->ipv4_subnet)); 2301 break; 2302 case ISCSI_NET_PARAM_IPV4_GW: 2303 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value, 2304 sizeof(init_fw_cb->ipv4_gw_addr)); 2305 break; 2306 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 2307 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP) 2308 init_fw_cb->ipv4_tcp_opts |= 2309 cpu_to_le16(TCPOPT_DHCP_ENABLE); 2310 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC) 2311 init_fw_cb->ipv4_tcp_opts &= 2312 cpu_to_le16(~TCPOPT_DHCP_ENABLE); 2313 else 2314 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n"); 2315 break; 2316 case ISCSI_NET_PARAM_IFACE_ENABLE: 2317 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2318 init_fw_cb->ipv4_ip_opts |= 2319 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE); 2320 qla4xxx_create_ipv4_iface(ha); 2321 } else { 2322 init_fw_cb->ipv4_ip_opts &= 2323 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE & 2324 0xFFFF); 2325 qla4xxx_destroy_ipv4_iface(ha); 2326 } 2327 break; 2328 case ISCSI_NET_PARAM_VLAN_TAG: 2329 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag)) 2330 break; 2331 init_fw_cb->ipv4_vlan_tag = 2332 cpu_to_be16(*(uint16_t *)iface_param->value); 2333 break; 2334 case ISCSI_NET_PARAM_VLAN_ENABLED: 2335 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2336 init_fw_cb->ipv4_ip_opts |= 2337 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE); 2338 else 2339 init_fw_cb->ipv4_ip_opts &= 2340 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE); 2341 break; 2342 case ISCSI_NET_PARAM_MTU: 2343 init_fw_cb->eth_mtu_size = 2344 cpu_to_le16(*(uint16_t *)iface_param->value); 2345 break; 2346 case ISCSI_NET_PARAM_PORT: 2347 init_fw_cb->ipv4_port = 2348 cpu_to_le16(*(uint16_t *)iface_param->value); 2349 break; 2350 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2351 if (iface_param->iface_num & 0x1) 2352 break; 2353 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2354 init_fw_cb->ipv4_tcp_opts |= 2355 cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE); 2356 else 2357 init_fw_cb->ipv4_tcp_opts &= 2358 cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE & 2359 0xFFFF); 2360 break; 2361 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2362 if (iface_param->iface_num & 0x1) 2363 break; 2364 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2365 init_fw_cb->ipv4_tcp_opts |= 2366 cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE); 2367 else 2368 init_fw_cb->ipv4_tcp_opts &= 2369 cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE); 2370 break; 2371 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2372 if (iface_param->iface_num & 0x1) 2373 break; 2374 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2375 init_fw_cb->ipv4_tcp_opts |= 2376 cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE); 2377 else 2378 init_fw_cb->ipv4_tcp_opts &= 2379 cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE); 2380 break; 2381 case ISCSI_NET_PARAM_TCP_WSF: 2382 if (iface_param->iface_num & 0x1) 2383 break; 2384 init_fw_cb->ipv4_tcp_wsf = iface_param->value[0]; 2385 break; 2386 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2387 if (iface_param->iface_num & 0x1) 2388 break; 2389 init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE); 2390 init_fw_cb->ipv4_tcp_opts |= 2391 cpu_to_le16((iface_param->value[0] << 1) & 2392 TCPOPT_TIMER_SCALE); 2393 break; 2394 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2395 if (iface_param->iface_num & 0x1) 2396 break; 2397 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2398 init_fw_cb->ipv4_tcp_opts |= 2399 cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE); 2400 else 2401 init_fw_cb->ipv4_tcp_opts &= 2402 cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE); 2403 break; 2404 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 2405 if (iface_param->iface_num & 0x1) 2406 break; 2407 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2408 init_fw_cb->ipv4_tcp_opts |= 2409 cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN); 2410 else 2411 init_fw_cb->ipv4_tcp_opts &= 2412 cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN); 2413 break; 2414 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 2415 if (iface_param->iface_num & 0x1) 2416 break; 2417 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2418 init_fw_cb->ipv4_tcp_opts |= 2419 cpu_to_le16(TCPOPT_SLP_DA_INFO_EN); 2420 else 2421 init_fw_cb->ipv4_tcp_opts &= 2422 cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN); 2423 break; 2424 case ISCSI_NET_PARAM_IPV4_TOS_EN: 2425 if (iface_param->iface_num & 0x1) 2426 break; 2427 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2428 init_fw_cb->ipv4_ip_opts |= 2429 cpu_to_le16(IPOPT_IPV4_TOS_EN); 2430 else 2431 init_fw_cb->ipv4_ip_opts &= 2432 cpu_to_le16(~IPOPT_IPV4_TOS_EN); 2433 break; 2434 case ISCSI_NET_PARAM_IPV4_TOS: 2435 if (iface_param->iface_num & 0x1) 2436 break; 2437 init_fw_cb->ipv4_tos = iface_param->value[0]; 2438 break; 2439 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 2440 if (iface_param->iface_num & 0x1) 2441 break; 2442 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2443 init_fw_cb->ipv4_ip_opts |= 2444 cpu_to_le16(IPOPT_GRAT_ARP_EN); 2445 else 2446 init_fw_cb->ipv4_ip_opts &= 2447 cpu_to_le16(~IPOPT_GRAT_ARP_EN); 2448 break; 2449 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 2450 if (iface_param->iface_num & 0x1) 2451 break; 2452 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2453 init_fw_cb->ipv4_ip_opts |= 2454 cpu_to_le16(IPOPT_ALT_CID_EN); 2455 else 2456 init_fw_cb->ipv4_ip_opts &= 2457 cpu_to_le16(~IPOPT_ALT_CID_EN); 2458 break; 2459 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 2460 if (iface_param->iface_num & 0x1) 2461 break; 2462 memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value, 2463 (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1)); 2464 init_fw_cb->ipv4_dhcp_alt_cid_len = 2465 strlen(init_fw_cb->ipv4_dhcp_alt_cid); 2466 break; 2467 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 2468 if (iface_param->iface_num & 0x1) 2469 break; 2470 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2471 init_fw_cb->ipv4_ip_opts |= 2472 cpu_to_le16(IPOPT_REQ_VID_EN); 2473 else 2474 init_fw_cb->ipv4_ip_opts &= 2475 cpu_to_le16(~IPOPT_REQ_VID_EN); 2476 break; 2477 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 2478 if (iface_param->iface_num & 0x1) 2479 break; 2480 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2481 init_fw_cb->ipv4_ip_opts |= 2482 cpu_to_le16(IPOPT_USE_VID_EN); 2483 else 2484 init_fw_cb->ipv4_ip_opts &= 2485 cpu_to_le16(~IPOPT_USE_VID_EN); 2486 break; 2487 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 2488 if (iface_param->iface_num & 0x1) 2489 break; 2490 memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value, 2491 (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1)); 2492 init_fw_cb->ipv4_dhcp_vid_len = 2493 strlen(init_fw_cb->ipv4_dhcp_vid); 2494 break; 2495 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 2496 if (iface_param->iface_num & 0x1) 2497 break; 2498 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2499 init_fw_cb->ipv4_ip_opts |= 2500 cpu_to_le16(IPOPT_LEARN_IQN_EN); 2501 else 2502 init_fw_cb->ipv4_ip_opts &= 2503 cpu_to_le16(~IPOPT_LEARN_IQN_EN); 2504 break; 2505 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 2506 if (iface_param->iface_num & 0x1) 2507 break; 2508 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2509 init_fw_cb->ipv4_ip_opts |= 2510 cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE); 2511 else 2512 init_fw_cb->ipv4_ip_opts &= 2513 cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE); 2514 break; 2515 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 2516 if (iface_param->iface_num & 0x1) 2517 break; 2518 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2519 init_fw_cb->ipv4_ip_opts |= 2520 cpu_to_le16(IPOPT_IN_FORWARD_EN); 2521 else 2522 init_fw_cb->ipv4_ip_opts &= 2523 cpu_to_le16(~IPOPT_IN_FORWARD_EN); 2524 break; 2525 case ISCSI_NET_PARAM_REDIRECT_EN: 2526 if (iface_param->iface_num & 0x1) 2527 break; 2528 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2529 init_fw_cb->ipv4_ip_opts |= 2530 cpu_to_le16(IPOPT_ARP_REDIRECT_EN); 2531 else 2532 init_fw_cb->ipv4_ip_opts &= 2533 cpu_to_le16(~IPOPT_ARP_REDIRECT_EN); 2534 break; 2535 case ISCSI_NET_PARAM_IPV4_TTL: 2536 if (iface_param->iface_num & 0x1) 2537 break; 2538 init_fw_cb->ipv4_ttl = iface_param->value[0]; 2539 break; 2540 default: 2541 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n", 2542 iface_param->param); 2543 break; 2544 } 2545} 2546 2547static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha, 2548 struct iscsi_iface_param_info *iface_param, 2549 struct addr_ctrl_blk *init_fw_cb) 2550{ 2551 switch (iface_param->param) { 2552 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 2553 if (iface_param->iface_num & 0x1) 2554 break; 2555 init_fw_cb->def_timeout = 2556 cpu_to_le16(*(uint16_t *)iface_param->value); 2557 break; 2558 case ISCSI_IFACE_PARAM_HDRDGST_EN: 2559 if (iface_param->iface_num & 0x1) 2560 break; 2561 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2562 init_fw_cb->iscsi_opts |= 2563 cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN); 2564 else 2565 init_fw_cb->iscsi_opts &= 2566 cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN); 2567 break; 2568 case ISCSI_IFACE_PARAM_DATADGST_EN: 2569 if (iface_param->iface_num & 0x1) 2570 break; 2571 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2572 init_fw_cb->iscsi_opts |= 2573 cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN); 2574 else 2575 init_fw_cb->iscsi_opts &= 2576 cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN); 2577 break; 2578 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 2579 if (iface_param->iface_num & 0x1) 2580 break; 2581 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2582 init_fw_cb->iscsi_opts |= 2583 cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN); 2584 else 2585 init_fw_cb->iscsi_opts &= 2586 cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN); 2587 break; 2588 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 2589 if (iface_param->iface_num & 0x1) 2590 break; 2591 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2592 init_fw_cb->iscsi_opts |= 2593 cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN); 2594 else 2595 init_fw_cb->iscsi_opts &= 2596 cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN); 2597 break; 2598 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 2599 if (iface_param->iface_num & 0x1) 2600 break; 2601 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2602 init_fw_cb->iscsi_opts |= 2603 cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN); 2604 else 2605 init_fw_cb->iscsi_opts &= 2606 cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN); 2607 break; 2608 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 2609 if (iface_param->iface_num & 0x1) 2610 break; 2611 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2612 init_fw_cb->iscsi_opts |= 2613 cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN); 2614 else 2615 init_fw_cb->iscsi_opts &= 2616 cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN); 2617 break; 2618 case ISCSI_IFACE_PARAM_ERL: 2619 if (iface_param->iface_num & 0x1) 2620 break; 2621 init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL); 2622 init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] & 2623 ISCSIOPTS_ERL); 2624 break; 2625 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 2626 if (iface_param->iface_num & 0x1) 2627 break; 2628 init_fw_cb->iscsi_max_pdu_size = 2629 cpu_to_le32(*(uint32_t *)iface_param->value) / 2630 BYTE_UNITS; 2631 break; 2632 case ISCSI_IFACE_PARAM_FIRST_BURST: 2633 if (iface_param->iface_num & 0x1) 2634 break; 2635 init_fw_cb->iscsi_fburst_len = 2636 cpu_to_le32(*(uint32_t *)iface_param->value) / 2637 BYTE_UNITS; 2638 break; 2639 case ISCSI_IFACE_PARAM_MAX_R2T: 2640 if (iface_param->iface_num & 0x1) 2641 break; 2642 init_fw_cb->iscsi_max_outstnd_r2t = 2643 cpu_to_le16(*(uint16_t *)iface_param->value); 2644 break; 2645 case ISCSI_IFACE_PARAM_MAX_BURST: 2646 if (iface_param->iface_num & 0x1) 2647 break; 2648 init_fw_cb->iscsi_max_burst_len = 2649 cpu_to_le32(*(uint32_t *)iface_param->value) / 2650 BYTE_UNITS; 2651 break; 2652 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 2653 if (iface_param->iface_num & 0x1) 2654 break; 2655 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2656 init_fw_cb->iscsi_opts |= 2657 cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN); 2658 else 2659 init_fw_cb->iscsi_opts &= 2660 cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN); 2661 break; 2662 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 2663 if (iface_param->iface_num & 0x1) 2664 break; 2665 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2666 init_fw_cb->iscsi_opts |= 2667 cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN); 2668 else 2669 init_fw_cb->iscsi_opts &= 2670 cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN); 2671 break; 2672 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 2673 if (iface_param->iface_num & 0x1) 2674 break; 2675 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2676 init_fw_cb->iscsi_opts |= 2677 cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN); 2678 else 2679 init_fw_cb->iscsi_opts &= 2680 cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN); 2681 break; 2682 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 2683 if (iface_param->iface_num & 0x1) 2684 break; 2685 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2686 init_fw_cb->iscsi_opts |= 2687 cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2688 else 2689 init_fw_cb->iscsi_opts &= 2690 cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2691 break; 2692 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 2693 if (iface_param->iface_num & 0x1) 2694 break; 2695 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2696 init_fw_cb->iscsi_opts |= 2697 cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2698 else 2699 init_fw_cb->iscsi_opts &= 2700 cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2701 break; 2702 default: 2703 ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n", 2704 iface_param->param); 2705 break; 2706 } 2707} 2708 2709static void 2710qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb) 2711{ 2712 struct addr_ctrl_blk_def *acb; 2713 acb = (struct addr_ctrl_blk_def *)init_fw_cb; 2714 memset(acb->reserved1, 0, sizeof(acb->reserved1)); 2715 memset(acb->reserved2, 0, sizeof(acb->reserved2)); 2716 memset(acb->reserved3, 0, sizeof(acb->reserved3)); 2717 memset(acb->reserved4, 0, sizeof(acb->reserved4)); 2718 memset(acb->reserved5, 0, sizeof(acb->reserved5)); 2719 memset(acb->reserved6, 0, sizeof(acb->reserved6)); 2720 memset(acb->reserved7, 0, sizeof(acb->reserved7)); 2721 memset(acb->reserved8, 0, sizeof(acb->reserved8)); 2722 memset(acb->reserved9, 0, sizeof(acb->reserved9)); 2723 memset(acb->reserved10, 0, sizeof(acb->reserved10)); 2724 memset(acb->reserved11, 0, sizeof(acb->reserved11)); 2725 memset(acb->reserved12, 0, sizeof(acb->reserved12)); 2726 memset(acb->reserved13, 0, sizeof(acb->reserved13)); 2727 memset(acb->reserved14, 0, sizeof(acb->reserved14)); 2728 memset(acb->reserved15, 0, sizeof(acb->reserved15)); 2729} 2730 2731static int 2732qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) 2733{ 2734 struct scsi_qla_host *ha = to_qla_host(shost); 2735 int rval = 0; 2736 struct iscsi_iface_param_info *iface_param = NULL; 2737 struct addr_ctrl_blk *init_fw_cb = NULL; 2738 dma_addr_t init_fw_cb_dma; 2739 uint32_t mbox_cmd[MBOX_REG_COUNT]; 2740 uint32_t mbox_sts[MBOX_REG_COUNT]; 2741 uint32_t rem = len; 2742 struct nlattr *attr; 2743 2744 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 2745 sizeof(struct addr_ctrl_blk), 2746 &init_fw_cb_dma, GFP_KERNEL); 2747 if (!init_fw_cb) { 2748 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", 2749 __func__); 2750 return -ENOMEM; 2751 } 2752 2753 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 2754 memset(&mbox_sts, 0, sizeof(mbox_sts)); 2755 2756 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) { 2757 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__); 2758 rval = -EIO; 2759 goto exit_init_fw_cb; 2760 } 2761 2762 nla_for_each_attr(attr, data, len, rem) { 2763 if (nla_len(attr) < sizeof(*iface_param)) { 2764 rval = -EINVAL; 2765 goto exit_init_fw_cb; 2766 } 2767 2768 iface_param = nla_data(attr); 2769 2770 if (iface_param->param_type == ISCSI_NET_PARAM) { 2771 switch (iface_param->iface_type) { 2772 case ISCSI_IFACE_TYPE_IPV4: 2773 switch (iface_param->iface_num) { 2774 case 0: 2775 qla4xxx_set_ipv4(ha, iface_param, 2776 init_fw_cb); 2777 break; 2778 default: 2779 /* Cannot have more than one IPv4 interface */ 2780 ql4_printk(KERN_ERR, ha, 2781 "Invalid IPv4 iface number = %d\n", 2782 iface_param->iface_num); 2783 break; 2784 } 2785 break; 2786 case ISCSI_IFACE_TYPE_IPV6: 2787 switch (iface_param->iface_num) { 2788 case 0: 2789 case 1: 2790 qla4xxx_set_ipv6(ha, iface_param, 2791 init_fw_cb); 2792 break; 2793 default: 2794 /* Cannot have more than two IPv6 interface */ 2795 ql4_printk(KERN_ERR, ha, 2796 "Invalid IPv6 iface number = %d\n", 2797 iface_param->iface_num); 2798 break; 2799 } 2800 break; 2801 default: 2802 ql4_printk(KERN_ERR, ha, 2803 "Invalid iface type\n"); 2804 break; 2805 } 2806 } else if (iface_param->param_type == ISCSI_IFACE_PARAM) { 2807 qla4xxx_set_iscsi_param(ha, iface_param, 2808 init_fw_cb); 2809 } else { 2810 continue; 2811 } 2812 } 2813 2814 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A); 2815 2816 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB, 2817 sizeof(struct addr_ctrl_blk), 2818 FLASH_OPT_RMW_COMMIT); 2819 if (rval != QLA_SUCCESS) { 2820 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n", 2821 __func__); 2822 rval = -EIO; 2823 goto exit_init_fw_cb; 2824 } 2825 2826 rval = qla4xxx_disable_acb(ha); 2827 if (rval != QLA_SUCCESS) { 2828 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n", 2829 __func__); 2830 rval = -EIO; 2831 goto exit_init_fw_cb; 2832 } 2833 2834 wait_for_completion_timeout(&ha->disable_acb_comp, 2835 DISABLE_ACB_TOV * HZ); 2836 2837 qla4xxx_initcb_to_acb(init_fw_cb); 2838 2839 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma); 2840 if (rval != QLA_SUCCESS) { 2841 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n", 2842 __func__); 2843 rval = -EIO; 2844 goto exit_init_fw_cb; 2845 } 2846 2847 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); 2848 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb, 2849 init_fw_cb_dma); 2850 2851exit_init_fw_cb: 2852 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), 2853 init_fw_cb, init_fw_cb_dma); 2854 2855 return rval; 2856} 2857 2858static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 2859 enum iscsi_param param, char *buf) 2860{ 2861 struct iscsi_session *sess = cls_sess->dd_data; 2862 struct ddb_entry *ddb_entry = sess->dd_data; 2863 struct scsi_qla_host *ha = ddb_entry->ha; 2864 struct iscsi_cls_conn *cls_conn = ddb_entry->conn; 2865 struct ql4_chap_table chap_tbl; 2866 int rval, len; 2867 uint16_t idx; 2868 2869 memset(&chap_tbl, 0, sizeof(chap_tbl)); 2870 switch (param) { 2871 case ISCSI_PARAM_CHAP_IN_IDX: 2872 rval = qla4xxx_get_chap_index(ha, sess->username_in, 2873 sess->password_in, BIDI_CHAP, 2874 &idx); 2875 if (rval) 2876 len = sprintf(buf, "\n"); 2877 else 2878 len = sprintf(buf, "%hu\n", idx); 2879 break; 2880 case ISCSI_PARAM_CHAP_OUT_IDX: 2881 if (ddb_entry->ddb_type == FLASH_DDB) { 2882 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 2883 idx = ddb_entry->chap_tbl_idx; 2884 rval = QLA_SUCCESS; 2885 } else { 2886 rval = QLA_ERROR; 2887 } 2888 } else { 2889 rval = qla4xxx_get_chap_index(ha, sess->username, 2890 sess->password, 2891 LOCAL_CHAP, &idx); 2892 } 2893 if (rval) 2894 len = sprintf(buf, "\n"); 2895 else 2896 len = sprintf(buf, "%hu\n", idx); 2897 break; 2898 case ISCSI_PARAM_USERNAME: 2899 case ISCSI_PARAM_PASSWORD: 2900 /* First, populate session username and password for FLASH DDB, 2901 * if not already done. This happens when session login fails 2902 * for a FLASH DDB. 2903 */ 2904 if (ddb_entry->ddb_type == FLASH_DDB && 2905 ddb_entry->chap_tbl_idx != INVALID_ENTRY && 2906 !sess->username && !sess->password) { 2907 idx = ddb_entry->chap_tbl_idx; 2908 rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 2909 chap_tbl.secret, 2910 idx); 2911 if (!rval) { 2912 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 2913 (char *)chap_tbl.name, 2914 strlen((char *)chap_tbl.name)); 2915 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 2916 (char *)chap_tbl.secret, 2917 chap_tbl.secret_len); 2918 } 2919 } 2920 fallthrough; 2921 default: 2922 return iscsi_session_get_param(cls_sess, param, buf); 2923 } 2924 2925 return len; 2926} 2927 2928static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, 2929 enum iscsi_param param, char *buf) 2930{ 2931 struct iscsi_conn *conn; 2932 struct qla_conn *qla_conn; 2933 struct sockaddr *dst_addr; 2934 2935 conn = cls_conn->dd_data; 2936 qla_conn = conn->dd_data; 2937 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr; 2938 2939 switch (param) { 2940 case ISCSI_PARAM_CONN_PORT: 2941 case ISCSI_PARAM_CONN_ADDRESS: 2942 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 2943 dst_addr, param, buf); 2944 default: 2945 return iscsi_conn_get_param(cls_conn, param, buf); 2946 } 2947} 2948 2949int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) 2950{ 2951 uint32_t mbx_sts = 0; 2952 uint16_t tmp_ddb_index; 2953 int ret; 2954 2955get_ddb_index: 2956 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES); 2957 2958 if (tmp_ddb_index >= MAX_DDB_ENTRIES) { 2959 DEBUG2(ql4_printk(KERN_INFO, ha, 2960 "Free DDB index not available\n")); 2961 ret = QLA_ERROR; 2962 goto exit_get_ddb_index; 2963 } 2964 2965 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map)) 2966 goto get_ddb_index; 2967 2968 DEBUG2(ql4_printk(KERN_INFO, ha, 2969 "Found a free DDB index at %d\n", tmp_ddb_index)); 2970 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts); 2971 if (ret == QLA_ERROR) { 2972 if (mbx_sts == MBOX_STS_COMMAND_ERROR) { 2973 ql4_printk(KERN_INFO, ha, 2974 "DDB index = %d not available trying next\n", 2975 tmp_ddb_index); 2976 goto get_ddb_index; 2977 } 2978 DEBUG2(ql4_printk(KERN_INFO, ha, 2979 "Free FW DDB not available\n")); 2980 } 2981 2982 *ddb_index = tmp_ddb_index; 2983 2984exit_get_ddb_index: 2985 return ret; 2986} 2987 2988static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha, 2989 struct ddb_entry *ddb_entry, 2990 char *existing_ipaddr, 2991 char *user_ipaddr) 2992{ 2993 uint8_t dst_ipaddr[IPv6_ADDR_LEN]; 2994 char formatted_ipaddr[DDB_IPADDR_LEN]; 2995 int status = QLA_SUCCESS, ret = 0; 2996 2997 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) { 2998 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2999 '\0', NULL); 3000 if (ret == 0) { 3001 status = QLA_ERROR; 3002 goto out_match; 3003 } 3004 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr); 3005 } else { 3006 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 3007 '\0', NULL); 3008 if (ret == 0) { 3009 status = QLA_ERROR; 3010 goto out_match; 3011 } 3012 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr); 3013 } 3014 3015 if (strcmp(existing_ipaddr, formatted_ipaddr)) 3016 status = QLA_ERROR; 3017 3018out_match: 3019 return status; 3020} 3021 3022static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha, 3023 struct iscsi_cls_conn *cls_conn) 3024{ 3025 int idx = 0, max_ddbs, rval; 3026 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3027 struct iscsi_session *sess, *existing_sess; 3028 struct iscsi_conn *conn, *existing_conn; 3029 struct ddb_entry *ddb_entry; 3030 3031 sess = cls_sess->dd_data; 3032 conn = cls_conn->dd_data; 3033 3034 if (sess->targetname == NULL || 3035 conn->persistent_address == NULL || 3036 conn->persistent_port == 0) 3037 return QLA_ERROR; 3038 3039 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 3040 MAX_DEV_DB_ENTRIES; 3041 3042 for (idx = 0; idx < max_ddbs; idx++) { 3043 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 3044 if (ddb_entry == NULL) 3045 continue; 3046 3047 if (ddb_entry->ddb_type != FLASH_DDB) 3048 continue; 3049 3050 existing_sess = ddb_entry->sess->dd_data; 3051 existing_conn = ddb_entry->conn->dd_data; 3052 3053 if (existing_sess->targetname == NULL || 3054 existing_conn->persistent_address == NULL || 3055 existing_conn->persistent_port == 0) 3056 continue; 3057 3058 DEBUG2(ql4_printk(KERN_INFO, ha, 3059 "IQN = %s User IQN = %s\n", 3060 existing_sess->targetname, 3061 sess->targetname)); 3062 3063 DEBUG2(ql4_printk(KERN_INFO, ha, 3064 "IP = %s User IP = %s\n", 3065 existing_conn->persistent_address, 3066 conn->persistent_address)); 3067 3068 DEBUG2(ql4_printk(KERN_INFO, ha, 3069 "Port = %d User Port = %d\n", 3070 existing_conn->persistent_port, 3071 conn->persistent_port)); 3072 3073 if (strcmp(existing_sess->targetname, sess->targetname)) 3074 continue; 3075 rval = qla4xxx_match_ipaddress(ha, ddb_entry, 3076 existing_conn->persistent_address, 3077 conn->persistent_address); 3078 if (rval == QLA_ERROR) 3079 continue; 3080 if (existing_conn->persistent_port != conn->persistent_port) 3081 continue; 3082 break; 3083 } 3084 3085 if (idx == max_ddbs) 3086 return QLA_ERROR; 3087 3088 DEBUG2(ql4_printk(KERN_INFO, ha, 3089 "Match found in fwdb sessions\n")); 3090 return QLA_SUCCESS; 3091} 3092 3093static struct iscsi_cls_session * 3094qla4xxx_session_create(struct iscsi_endpoint *ep, 3095 uint16_t cmds_max, uint16_t qdepth, 3096 uint32_t initial_cmdsn) 3097{ 3098 struct iscsi_cls_session *cls_sess; 3099 struct scsi_qla_host *ha; 3100 struct qla_endpoint *qla_ep; 3101 struct ddb_entry *ddb_entry; 3102 uint16_t ddb_index; 3103 struct iscsi_session *sess; 3104 int ret; 3105 3106 if (!ep) { 3107 printk(KERN_ERR "qla4xxx: missing ep.\n"); 3108 return NULL; 3109 } 3110 3111 qla_ep = ep->dd_data; 3112 ha = to_qla_host(qla_ep->host); 3113 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 3114 ha->host_no)); 3115 3116 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 3117 if (ret == QLA_ERROR) 3118 return NULL; 3119 3120 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host, 3121 cmds_max, sizeof(struct ddb_entry), 3122 sizeof(struct ql4_task_data), 3123 initial_cmdsn, ddb_index); 3124 if (!cls_sess) 3125 return NULL; 3126 3127 sess = cls_sess->dd_data; 3128 ddb_entry = sess->dd_data; 3129 ddb_entry->fw_ddb_index = ddb_index; 3130 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 3131 ddb_entry->ha = ha; 3132 ddb_entry->sess = cls_sess; 3133 ddb_entry->unblock_sess = qla4xxx_unblock_ddb; 3134 ddb_entry->ddb_change = qla4xxx_ddb_change; 3135 clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags); 3136 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 3137 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; 3138 ha->tot_ddbs++; 3139 3140 return cls_sess; 3141} 3142 3143static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) 3144{ 3145 struct iscsi_session *sess; 3146 struct ddb_entry *ddb_entry; 3147 struct scsi_qla_host *ha; 3148 unsigned long flags, wtime; 3149 struct dev_db_entry *fw_ddb_entry = NULL; 3150 dma_addr_t fw_ddb_entry_dma; 3151 uint32_t ddb_state; 3152 int ret; 3153 3154 sess = cls_sess->dd_data; 3155 ddb_entry = sess->dd_data; 3156 ha = ddb_entry->ha; 3157 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 3158 ha->host_no)); 3159 3160 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3161 &fw_ddb_entry_dma, GFP_KERNEL); 3162 if (!fw_ddb_entry) { 3163 ql4_printk(KERN_ERR, ha, 3164 "%s: Unable to allocate dma buffer\n", __func__); 3165 goto destroy_session; 3166 } 3167 3168 wtime = jiffies + (HZ * LOGOUT_TOV); 3169 do { 3170 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 3171 fw_ddb_entry, fw_ddb_entry_dma, 3172 NULL, NULL, &ddb_state, NULL, 3173 NULL, NULL); 3174 if (ret == QLA_ERROR) 3175 goto destroy_session; 3176 3177 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 3178 (ddb_state == DDB_DS_SESSION_FAILED)) 3179 goto destroy_session; 3180 3181 schedule_timeout_uninterruptible(HZ); 3182 } while ((time_after(wtime, jiffies))); 3183 3184destroy_session: 3185 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 3186 if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags)) 3187 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 3188 spin_lock_irqsave(&ha->hardware_lock, flags); 3189 qla4xxx_free_ddb(ha, ddb_entry); 3190 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3191 3192 iscsi_session_teardown(cls_sess); 3193 3194 if (fw_ddb_entry) 3195 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3196 fw_ddb_entry, fw_ddb_entry_dma); 3197} 3198 3199static struct iscsi_cls_conn * 3200qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx) 3201{ 3202 struct iscsi_cls_conn *cls_conn; 3203 struct iscsi_session *sess; 3204 struct ddb_entry *ddb_entry; 3205 struct scsi_qla_host *ha; 3206 3207 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), 3208 conn_idx); 3209 if (!cls_conn) { 3210 pr_info("%s: Can not create connection for conn_idx = %u\n", 3211 __func__, conn_idx); 3212 return NULL; 3213 } 3214 3215 sess = cls_sess->dd_data; 3216 ddb_entry = sess->dd_data; 3217 ddb_entry->conn = cls_conn; 3218 3219 ha = ddb_entry->ha; 3220 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__, 3221 conn_idx)); 3222 return cls_conn; 3223} 3224 3225static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 3226 struct iscsi_cls_conn *cls_conn, 3227 uint64_t transport_fd, int is_leading) 3228{ 3229 struct iscsi_conn *conn; 3230 struct qla_conn *qla_conn; 3231 struct iscsi_endpoint *ep; 3232 struct ddb_entry *ddb_entry; 3233 struct scsi_qla_host *ha; 3234 struct iscsi_session *sess; 3235 3236 sess = cls_session->dd_data; 3237 ddb_entry = sess->dd_data; 3238 ha = ddb_entry->ha; 3239 3240 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, 3241 cls_session->sid, cls_conn->cid)); 3242 3243 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 3244 return -EINVAL; 3245 ep = iscsi_lookup_endpoint(transport_fd); 3246 if (!ep) 3247 return -EINVAL; 3248 conn = cls_conn->dd_data; 3249 qla_conn = conn->dd_data; 3250 qla_conn->qla_ep = ep->dd_data; 3251 iscsi_put_endpoint(ep); 3252 return 0; 3253} 3254 3255static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn) 3256{ 3257 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3258 struct iscsi_session *sess; 3259 struct ddb_entry *ddb_entry; 3260 struct scsi_qla_host *ha; 3261 struct dev_db_entry *fw_ddb_entry = NULL; 3262 dma_addr_t fw_ddb_entry_dma; 3263 uint32_t mbx_sts = 0; 3264 int ret = 0; 3265 int status = QLA_SUCCESS; 3266 3267 sess = cls_sess->dd_data; 3268 ddb_entry = sess->dd_data; 3269 ha = ddb_entry->ha; 3270 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, 3271 cls_sess->sid, cls_conn->cid)); 3272 3273 /* Check if we have matching FW DDB, if yes then do not 3274 * login to this target. This could cause target to logout previous 3275 * connection 3276 */ 3277 ret = qla4xxx_match_fwdb_session(ha, cls_conn); 3278 if (ret == QLA_SUCCESS) { 3279 ql4_printk(KERN_INFO, ha, 3280 "Session already exist in FW.\n"); 3281 ret = -EEXIST; 3282 goto exit_conn_start; 3283 } 3284 3285 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3286 &fw_ddb_entry_dma, GFP_KERNEL); 3287 if (!fw_ddb_entry) { 3288 ql4_printk(KERN_ERR, ha, 3289 "%s: Unable to allocate dma buffer\n", __func__); 3290 ret = -ENOMEM; 3291 goto exit_conn_start; 3292 } 3293 3294 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts); 3295 if (ret) { 3296 /* If iscsid is stopped and started then no need to do 3297 * set param again since ddb state will be already 3298 * active and FW does not allow set ddb to an 3299 * active session. 3300 */ 3301 if (mbx_sts) 3302 if (ddb_entry->fw_ddb_device_state == 3303 DDB_DS_SESSION_ACTIVE) { 3304 ddb_entry->unblock_sess(ddb_entry->sess); 3305 goto exit_set_param; 3306 } 3307 3308 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n", 3309 __func__, ddb_entry->fw_ddb_index); 3310 goto exit_conn_start; 3311 } 3312 3313 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); 3314 if (status == QLA_ERROR) { 3315 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, 3316 sess->targetname); 3317 ret = -EINVAL; 3318 goto exit_conn_start; 3319 } 3320 3321 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) 3322 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; 3323 3324 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__, 3325 ddb_entry->fw_ddb_device_state)); 3326 3327exit_set_param: 3328 ret = 0; 3329 3330exit_conn_start: 3331 if (fw_ddb_entry) 3332 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3333 fw_ddb_entry, fw_ddb_entry_dma); 3334 return ret; 3335} 3336 3337static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn) 3338{ 3339 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3340 struct iscsi_session *sess; 3341 struct scsi_qla_host *ha; 3342 struct ddb_entry *ddb_entry; 3343 int options; 3344 3345 sess = cls_sess->dd_data; 3346 ddb_entry = sess->dd_data; 3347 ha = ddb_entry->ha; 3348 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__, 3349 cls_conn->cid)); 3350 3351 options = LOGOUT_OPTION_CLOSE_SESSION; 3352 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) 3353 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 3354} 3355 3356static void qla4xxx_task_work(struct work_struct *wdata) 3357{ 3358 struct ql4_task_data *task_data; 3359 struct scsi_qla_host *ha; 3360 struct passthru_status *sts; 3361 struct iscsi_task *task; 3362 struct iscsi_hdr *hdr; 3363 uint8_t *data; 3364 uint32_t data_len; 3365 struct iscsi_conn *conn; 3366 int hdr_len; 3367 itt_t itt; 3368 3369 task_data = container_of(wdata, struct ql4_task_data, task_work); 3370 ha = task_data->ha; 3371 task = task_data->task; 3372 sts = &task_data->sts; 3373 hdr_len = sizeof(struct iscsi_hdr); 3374 3375 DEBUG3(printk(KERN_INFO "Status returned\n")); 3376 DEBUG3(qla4xxx_dump_buffer(sts, 64)); 3377 DEBUG3(printk(KERN_INFO "Response buffer")); 3378 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64)); 3379 3380 conn = task->conn; 3381 3382 switch (sts->completionStatus) { 3383 case PASSTHRU_STATUS_COMPLETE: 3384 hdr = (struct iscsi_hdr *)task_data->resp_buffer; 3385 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */ 3386 itt = sts->handle; 3387 hdr->itt = itt; 3388 data = task_data->resp_buffer + hdr_len; 3389 data_len = task_data->resp_len - hdr_len; 3390 iscsi_complete_pdu(conn, hdr, data, data_len); 3391 break; 3392 default: 3393 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n", 3394 sts->completionStatus); 3395 break; 3396 } 3397 return; 3398} 3399 3400static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 3401{ 3402 struct ql4_task_data *task_data; 3403 struct iscsi_session *sess; 3404 struct ddb_entry *ddb_entry; 3405 struct scsi_qla_host *ha; 3406 int hdr_len; 3407 3408 sess = task->conn->session; 3409 ddb_entry = sess->dd_data; 3410 ha = ddb_entry->ha; 3411 task_data = task->dd_data; 3412 memset(task_data, 0, sizeof(struct ql4_task_data)); 3413 3414 if (task->sc) { 3415 ql4_printk(KERN_INFO, ha, 3416 "%s: SCSI Commands not implemented\n", __func__); 3417 return -EINVAL; 3418 } 3419 3420 hdr_len = sizeof(struct iscsi_hdr); 3421 task_data->ha = ha; 3422 task_data->task = task; 3423 3424 if (task->data_count) { 3425 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, 3426 task->data_count, 3427 DMA_TO_DEVICE); 3428 } 3429 3430 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3431 __func__, task->conn->max_recv_dlength, hdr_len)); 3432 3433 task_data->resp_len = task->conn->max_recv_dlength + hdr_len; 3434 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev, 3435 task_data->resp_len, 3436 &task_data->resp_dma, 3437 GFP_ATOMIC); 3438 if (!task_data->resp_buffer) 3439 goto exit_alloc_pdu; 3440 3441 task_data->req_len = task->data_count + hdr_len; 3442 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev, 3443 task_data->req_len, 3444 &task_data->req_dma, 3445 GFP_ATOMIC); 3446 if (!task_data->req_buffer) 3447 goto exit_alloc_pdu; 3448 3449 task->hdr = task_data->req_buffer; 3450 3451 INIT_WORK(&task_data->task_work, qla4xxx_task_work); 3452 3453 return 0; 3454 3455exit_alloc_pdu: 3456 if (task_data->resp_buffer) 3457 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3458 task_data->resp_buffer, task_data->resp_dma); 3459 3460 if (task_data->req_buffer) 3461 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3462 task_data->req_buffer, task_data->req_dma); 3463 return -ENOMEM; 3464} 3465 3466static void qla4xxx_task_cleanup(struct iscsi_task *task) 3467{ 3468 struct ql4_task_data *task_data; 3469 struct iscsi_session *sess; 3470 struct ddb_entry *ddb_entry; 3471 struct scsi_qla_host *ha; 3472 int hdr_len; 3473 3474 hdr_len = sizeof(struct iscsi_hdr); 3475 sess = task->conn->session; 3476 ddb_entry = sess->dd_data; 3477 ha = ddb_entry->ha; 3478 task_data = task->dd_data; 3479 3480 if (task->data_count) { 3481 dma_unmap_single(&ha->pdev->dev, task_data->data_dma, 3482 task->data_count, DMA_TO_DEVICE); 3483 } 3484 3485 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3486 __func__, task->conn->max_recv_dlength, hdr_len)); 3487 3488 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3489 task_data->resp_buffer, task_data->resp_dma); 3490 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3491 task_data->req_buffer, task_data->req_dma); 3492 return; 3493} 3494 3495static int qla4xxx_task_xmit(struct iscsi_task *task) 3496{ 3497 struct scsi_cmnd *sc = task->sc; 3498 struct iscsi_session *sess = task->conn->session; 3499 struct ddb_entry *ddb_entry = sess->dd_data; 3500 struct scsi_qla_host *ha = ddb_entry->ha; 3501 3502 if (!sc) 3503 return qla4xxx_send_passthru0(task); 3504 3505 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n", 3506 __func__); 3507 return -ENOSYS; 3508} 3509 3510static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess, 3511 struct iscsi_bus_flash_conn *conn, 3512 struct dev_db_entry *fw_ddb_entry) 3513{ 3514 unsigned long options = 0; 3515 int rc = 0; 3516 3517 options = le16_to_cpu(fw_ddb_entry->options); 3518 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3519 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3520 rc = iscsi_switch_str_param(&sess->portal_type, 3521 PORTAL_TYPE_IPV6); 3522 if (rc) 3523 goto exit_copy; 3524 } else { 3525 rc = iscsi_switch_str_param(&sess->portal_type, 3526 PORTAL_TYPE_IPV4); 3527 if (rc) 3528 goto exit_copy; 3529 } 3530 3531 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3532 &options); 3533 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3534 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options); 3535 3536 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3537 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3538 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3539 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3540 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3541 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3542 &options); 3543 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3544 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3545 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options); 3546 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3547 &options); 3548 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3549 sess->discovery_auth_optional = 3550 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3551 if (test_bit(ISCSIOPT_ERL1, &options)) 3552 sess->erl |= BIT_1; 3553 if (test_bit(ISCSIOPT_ERL0, &options)) 3554 sess->erl |= BIT_0; 3555 3556 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3557 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3558 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3559 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3560 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3561 conn->tcp_timer_scale |= BIT_3; 3562 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3563 conn->tcp_timer_scale |= BIT_2; 3564 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3565 conn->tcp_timer_scale |= BIT_1; 3566 3567 conn->tcp_timer_scale >>= 1; 3568 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3569 3570 options = le16_to_cpu(fw_ddb_entry->ip_options); 3571 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3572 3573 conn->max_recv_dlength = BYTE_UNITS * 3574 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3575 conn->max_xmit_dlength = BYTE_UNITS * 3576 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3577 sess->first_burst = BYTE_UNITS * 3578 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3579 sess->max_burst = BYTE_UNITS * 3580 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3581 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3582 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3583 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3584 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3585 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3586 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3587 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3588 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl); 3589 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout); 3590 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3591 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3592 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3593 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link); 3594 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link); 3595 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3596 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3597 3598 sess->default_taskmgmt_timeout = 3599 le16_to_cpu(fw_ddb_entry->def_timeout); 3600 conn->port = le16_to_cpu(fw_ddb_entry->port); 3601 3602 options = le16_to_cpu(fw_ddb_entry->options); 3603 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3604 if (!conn->ipaddress) { 3605 rc = -ENOMEM; 3606 goto exit_copy; 3607 } 3608 3609 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3610 if (!conn->redirect_ipaddr) { 3611 rc = -ENOMEM; 3612 goto exit_copy; 3613 } 3614 3615 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 3616 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN); 3617 3618 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3619 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; 3620 3621 conn->link_local_ipv6_addr = kmemdup( 3622 fw_ddb_entry->link_local_ipv6_addr, 3623 IPv6_ADDR_LEN, GFP_KERNEL); 3624 if (!conn->link_local_ipv6_addr) { 3625 rc = -ENOMEM; 3626 goto exit_copy; 3627 } 3628 } else { 3629 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3630 } 3631 3632 if (fw_ddb_entry->iscsi_name[0]) { 3633 rc = iscsi_switch_str_param(&sess->targetname, 3634 (char *)fw_ddb_entry->iscsi_name); 3635 if (rc) 3636 goto exit_copy; 3637 } 3638 3639 if (fw_ddb_entry->iscsi_alias[0]) { 3640 rc = iscsi_switch_str_param(&sess->targetalias, 3641 (char *)fw_ddb_entry->iscsi_alias); 3642 if (rc) 3643 goto exit_copy; 3644 } 3645 3646 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3647 3648exit_copy: 3649 return rc; 3650} 3651 3652static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, 3653 struct iscsi_bus_flash_conn *conn, 3654 struct dev_db_entry *fw_ddb_entry) 3655{ 3656 uint16_t options; 3657 int rc = 0; 3658 3659 options = le16_to_cpu(fw_ddb_entry->options); 3660 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11); 3661 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3662 options |= BIT_8; 3663 else 3664 options &= ~BIT_8; 3665 3666 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6); 3667 SET_BITVAL(sess->discovery_sess, options, BIT_4); 3668 SET_BITVAL(sess->entry_state, options, BIT_3); 3669 fw_ddb_entry->options = cpu_to_le16(options); 3670 3671 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3672 SET_BITVAL(conn->hdrdgst_en, options, BIT_13); 3673 SET_BITVAL(conn->datadgst_en, options, BIT_12); 3674 SET_BITVAL(sess->imm_data_en, options, BIT_11); 3675 SET_BITVAL(sess->initial_r2t_en, options, BIT_10); 3676 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9); 3677 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8); 3678 SET_BITVAL(sess->chap_auth_en, options, BIT_7); 3679 SET_BITVAL(conn->snack_req_en, options, BIT_6); 3680 SET_BITVAL(sess->discovery_logout_en, options, BIT_5); 3681 SET_BITVAL(sess->bidi_chap_en, options, BIT_4); 3682 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3); 3683 SET_BITVAL(sess->erl & BIT_1, options, BIT_1); 3684 SET_BITVAL(sess->erl & BIT_0, options, BIT_0); 3685 fw_ddb_entry->iscsi_options = cpu_to_le16(options); 3686 3687 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3688 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6); 3689 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5); 3690 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4); 3691 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3); 3692 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2); 3693 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1); 3694 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0); 3695 fw_ddb_entry->tcp_options = cpu_to_le16(options); 3696 3697 options = le16_to_cpu(fw_ddb_entry->ip_options); 3698 SET_BITVAL(conn->fragment_disable, options, BIT_4); 3699 fw_ddb_entry->ip_options = cpu_to_le16(options); 3700 3701 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); 3702 fw_ddb_entry->iscsi_max_rcv_data_seg_len = 3703 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS); 3704 fw_ddb_entry->iscsi_max_snd_data_seg_len = 3705 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS); 3706 fw_ddb_entry->iscsi_first_burst_len = 3707 cpu_to_le16(sess->first_burst / BYTE_UNITS); 3708 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst / 3709 BYTE_UNITS); 3710 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait); 3711 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); 3712 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); 3713 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); 3714 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); 3715 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); 3716 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); 3717 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); 3718 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); 3719 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); 3720 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); 3721 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx); 3722 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); 3723 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); 3724 fw_ddb_entry->port = cpu_to_le16(conn->port); 3725 fw_ddb_entry->def_timeout = 3726 cpu_to_le16(sess->default_taskmgmt_timeout); 3727 3728 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3729 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class; 3730 else 3731 fw_ddb_entry->ipv4_tos = conn->ipv4_tos; 3732 3733 if (conn->ipaddress) 3734 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress, 3735 sizeof(fw_ddb_entry->ip_addr)); 3736 3737 if (conn->redirect_ipaddr) 3738 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr, 3739 sizeof(fw_ddb_entry->tgt_addr)); 3740 3741 if (conn->link_local_ipv6_addr) 3742 memcpy(fw_ddb_entry->link_local_ipv6_addr, 3743 conn->link_local_ipv6_addr, 3744 sizeof(fw_ddb_entry->link_local_ipv6_addr)); 3745 3746 if (sess->targetname) 3747 memcpy(fw_ddb_entry->iscsi_name, sess->targetname, 3748 sizeof(fw_ddb_entry->iscsi_name)); 3749 3750 if (sess->targetalias) 3751 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias, 3752 sizeof(fw_ddb_entry->iscsi_alias)); 3753 3754 COPY_ISID(fw_ddb_entry->isid, sess->isid); 3755 3756 return rc; 3757} 3758 3759static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn, 3760 struct iscsi_session *sess, 3761 struct dev_db_entry *fw_ddb_entry) 3762{ 3763 unsigned long options = 0; 3764 uint16_t ddb_link; 3765 uint16_t disc_parent; 3766 char ip_addr[DDB_IPADDR_LEN]; 3767 3768 options = le16_to_cpu(fw_ddb_entry->options); 3769 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3770 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3771 &options); 3772 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3773 3774 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3775 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3776 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3777 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3778 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3779 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3780 &options); 3781 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3782 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3783 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3784 &options); 3785 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3786 sess->discovery_auth_optional = 3787 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3788 if (test_bit(ISCSIOPT_ERL1, &options)) 3789 sess->erl |= BIT_1; 3790 if (test_bit(ISCSIOPT_ERL0, &options)) 3791 sess->erl |= BIT_0; 3792 3793 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3794 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3795 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3796 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3797 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3798 conn->tcp_timer_scale |= BIT_3; 3799 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3800 conn->tcp_timer_scale |= BIT_2; 3801 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3802 conn->tcp_timer_scale |= BIT_1; 3803 3804 conn->tcp_timer_scale >>= 1; 3805 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3806 3807 options = le16_to_cpu(fw_ddb_entry->ip_options); 3808 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3809 3810 conn->max_recv_dlength = BYTE_UNITS * 3811 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3812 conn->max_xmit_dlength = BYTE_UNITS * 3813 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3814 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3815 sess->first_burst = BYTE_UNITS * 3816 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3817 sess->max_burst = BYTE_UNITS * 3818 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3819 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3820 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3821 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3822 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3823 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3824 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3825 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3826 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout); 3827 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3828 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3829 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3830 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3831 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3832 3833 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 3834 if (ddb_link == DDB_ISNS) 3835 disc_parent = ISCSI_DISC_PARENT_ISNS; 3836 else if (ddb_link == DDB_NO_LINK) 3837 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3838 else if (ddb_link < MAX_DDB_ENTRIES) 3839 disc_parent = ISCSI_DISC_PARENT_SENDTGT; 3840 else 3841 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3842 3843 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 3844 iscsi_get_discovery_parent_name(disc_parent), 0); 3845 3846 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS, 3847 (char *)fw_ddb_entry->iscsi_alias, 0); 3848 3849 options = le16_to_cpu(fw_ddb_entry->options); 3850 if (options & DDB_OPT_IPV6_DEVICE) { 3851 memset(ip_addr, 0, sizeof(ip_addr)); 3852 sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr); 3853 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR, 3854 (char *)ip_addr, 0); 3855 } 3856} 3857 3858static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, 3859 struct dev_db_entry *fw_ddb_entry, 3860 struct iscsi_cls_session *cls_sess, 3861 struct iscsi_cls_conn *cls_conn) 3862{ 3863 int buflen = 0; 3864 struct iscsi_session *sess; 3865 struct ddb_entry *ddb_entry; 3866 struct ql4_chap_table chap_tbl; 3867 struct iscsi_conn *conn; 3868 char ip_addr[DDB_IPADDR_LEN]; 3869 uint16_t options = 0; 3870 3871 sess = cls_sess->dd_data; 3872 ddb_entry = sess->dd_data; 3873 conn = cls_conn->dd_data; 3874 memset(&chap_tbl, 0, sizeof(chap_tbl)); 3875 3876 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3877 3878 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3879 3880 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout); 3881 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port); 3882 3883 memset(ip_addr, 0, sizeof(ip_addr)); 3884 options = le16_to_cpu(fw_ddb_entry->options); 3885 if (options & DDB_OPT_IPV6_DEVICE) { 3886 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4); 3887 3888 memset(ip_addr, 0, sizeof(ip_addr)); 3889 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr); 3890 } else { 3891 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4); 3892 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr); 3893 } 3894 3895 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, 3896 (char *)ip_addr, buflen); 3897 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME, 3898 (char *)fw_ddb_entry->iscsi_name, buflen); 3899 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, 3900 (char *)ha->name_string, buflen); 3901 3902 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 3903 if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 3904 chap_tbl.secret, 3905 ddb_entry->chap_tbl_idx)) { 3906 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 3907 (char *)chap_tbl.name, 3908 strlen((char *)chap_tbl.name)); 3909 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 3910 (char *)chap_tbl.secret, 3911 chap_tbl.secret_len); 3912 } 3913 } 3914} 3915 3916void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, 3917 struct ddb_entry *ddb_entry) 3918{ 3919 struct iscsi_cls_session *cls_sess; 3920 struct iscsi_cls_conn *cls_conn; 3921 uint32_t ddb_state; 3922 dma_addr_t fw_ddb_entry_dma; 3923 struct dev_db_entry *fw_ddb_entry; 3924 3925 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3926 &fw_ddb_entry_dma, GFP_KERNEL); 3927 if (!fw_ddb_entry) { 3928 ql4_printk(KERN_ERR, ha, 3929 "%s: Unable to allocate dma buffer\n", __func__); 3930 goto exit_session_conn_fwddb_param; 3931 } 3932 3933 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3934 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3935 NULL, NULL, NULL) == QLA_ERROR) { 3936 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3937 "get_ddb_entry for fw_ddb_index %d\n", 3938 ha->host_no, __func__, 3939 ddb_entry->fw_ddb_index)); 3940 goto exit_session_conn_fwddb_param; 3941 } 3942 3943 cls_sess = ddb_entry->sess; 3944 3945 cls_conn = ddb_entry->conn; 3946 3947 /* Update params */ 3948 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 3949 3950exit_session_conn_fwddb_param: 3951 if (fw_ddb_entry) 3952 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3953 fw_ddb_entry, fw_ddb_entry_dma); 3954} 3955 3956void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, 3957 struct ddb_entry *ddb_entry) 3958{ 3959 struct iscsi_cls_session *cls_sess; 3960 struct iscsi_cls_conn *cls_conn; 3961 struct iscsi_session *sess; 3962 struct iscsi_conn *conn; 3963 uint32_t ddb_state; 3964 dma_addr_t fw_ddb_entry_dma; 3965 struct dev_db_entry *fw_ddb_entry; 3966 3967 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3968 &fw_ddb_entry_dma, GFP_KERNEL); 3969 if (!fw_ddb_entry) { 3970 ql4_printk(KERN_ERR, ha, 3971 "%s: Unable to allocate dma buffer\n", __func__); 3972 goto exit_session_conn_param; 3973 } 3974 3975 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3976 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3977 NULL, NULL, NULL) == QLA_ERROR) { 3978 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3979 "get_ddb_entry for fw_ddb_index %d\n", 3980 ha->host_no, __func__, 3981 ddb_entry->fw_ddb_index)); 3982 goto exit_session_conn_param; 3983 } 3984 3985 cls_sess = ddb_entry->sess; 3986 sess = cls_sess->dd_data; 3987 3988 cls_conn = ddb_entry->conn; 3989 conn = cls_conn->dd_data; 3990 3991 /* Update timers after login */ 3992 ddb_entry->default_relogin_timeout = 3993 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) && 3994 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ? 3995 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV; 3996 ddb_entry->default_time2wait = 3997 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3998 3999 /* Update params */ 4000 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 4001 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 4002 4003 memcpy(sess->initiatorname, ha->name_string, 4004 min(sizeof(ha->name_string), sizeof(sess->initiatorname))); 4005 4006exit_session_conn_param: 4007 if (fw_ddb_entry) 4008 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 4009 fw_ddb_entry, fw_ddb_entry_dma); 4010} 4011 4012/* 4013 * Timer routines 4014 */ 4015static void qla4xxx_timer(struct timer_list *t); 4016 4017static void qla4xxx_start_timer(struct scsi_qla_host *ha, 4018 unsigned long interval) 4019{ 4020 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n", 4021 __func__, ha->host->host_no)); 4022 timer_setup(&ha->timer, qla4xxx_timer, 0); 4023 ha->timer.expires = jiffies + interval * HZ; 4024 add_timer(&ha->timer); 4025 ha->timer_active = 1; 4026} 4027 4028static void qla4xxx_stop_timer(struct scsi_qla_host *ha) 4029{ 4030 del_timer_sync(&ha->timer); 4031 ha->timer_active = 0; 4032} 4033 4034/*** 4035 * qla4xxx_mark_device_missing - blocks the session 4036 * @cls_session: Pointer to the session to be blocked 4037 * @ddb_entry: Pointer to device database entry 4038 * 4039 * This routine marks a device missing and close connection. 4040 **/ 4041void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session) 4042{ 4043 iscsi_block_session(cls_session); 4044} 4045 4046/** 4047 * qla4xxx_mark_all_devices_missing - mark all devices as missing. 4048 * @ha: Pointer to host adapter structure. 4049 * 4050 * This routine marks a device missing and resets the relogin retry count. 4051 **/ 4052void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha) 4053{ 4054 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing); 4055} 4056 4057static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, 4058 struct ddb_entry *ddb_entry, 4059 struct scsi_cmnd *cmd) 4060{ 4061 struct srb *srb; 4062 4063 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 4064 if (!srb) 4065 return srb; 4066 4067 kref_init(&srb->srb_ref); 4068 srb->ha = ha; 4069 srb->ddb = ddb_entry; 4070 srb->cmd = cmd; 4071 srb->flags = 0; 4072 CMD_SP(cmd) = (void *)srb; 4073 4074 return srb; 4075} 4076 4077static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb) 4078{ 4079 struct scsi_cmnd *cmd = srb->cmd; 4080 4081 if (srb->flags & SRB_DMA_VALID) { 4082 scsi_dma_unmap(cmd); 4083 srb->flags &= ~SRB_DMA_VALID; 4084 } 4085 CMD_SP(cmd) = NULL; 4086} 4087 4088void qla4xxx_srb_compl(struct kref *ref) 4089{ 4090 struct srb *srb = container_of(ref, struct srb, srb_ref); 4091 struct scsi_cmnd *cmd = srb->cmd; 4092 struct scsi_qla_host *ha = srb->ha; 4093 4094 qla4xxx_srb_free_dma(ha, srb); 4095 4096 mempool_free(srb, ha->srb_mempool); 4097 4098 cmd->scsi_done(cmd); 4099} 4100 4101/** 4102 * qla4xxx_queuecommand - scsi layer issues scsi command to driver. 4103 * @host: scsi host 4104 * @cmd: Pointer to Linux's SCSI command structure 4105 * 4106 * Remarks: 4107 * This routine is invoked by Linux to send a SCSI command to the driver. 4108 * The mid-level driver tries to ensure that queuecommand never gets 4109 * invoked concurrently with itself or the interrupt handler (although 4110 * the interrupt handler may call this routine as part of request- 4111 * completion handling). Unfortunely, it sometimes calls the scheduler 4112 * in interrupt context which is a big NO! NO!. 4113 **/ 4114static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 4115{ 4116 struct scsi_qla_host *ha = to_qla_host(host); 4117 struct ddb_entry *ddb_entry = cmd->device->hostdata; 4118 struct iscsi_cls_session *sess = ddb_entry->sess; 4119 struct srb *srb; 4120 int rval; 4121 4122 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4123 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) 4124 cmd->result = DID_NO_CONNECT << 16; 4125 else 4126 cmd->result = DID_REQUEUE << 16; 4127 goto qc_fail_command; 4128 } 4129 4130 if (!sess) { 4131 cmd->result = DID_IMM_RETRY << 16; 4132 goto qc_fail_command; 4133 } 4134 4135 rval = iscsi_session_chkready(sess); 4136 if (rval) { 4137 cmd->result = rval; 4138 goto qc_fail_command; 4139 } 4140 4141 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4142 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4143 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4144 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4145 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4146 !test_bit(AF_ONLINE, &ha->flags) || 4147 !test_bit(AF_LINK_UP, &ha->flags) || 4148 test_bit(AF_LOOPBACK, &ha->flags) || 4149 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) || 4150 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) || 4151 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) 4152 goto qc_host_busy; 4153 4154 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd); 4155 if (!srb) 4156 goto qc_host_busy; 4157 4158 rval = qla4xxx_send_command_to_isp(ha, srb); 4159 if (rval != QLA_SUCCESS) 4160 goto qc_host_busy_free_sp; 4161 4162 return 0; 4163 4164qc_host_busy_free_sp: 4165 qla4xxx_srb_free_dma(ha, srb); 4166 mempool_free(srb, ha->srb_mempool); 4167 4168qc_host_busy: 4169 return SCSI_MLQUEUE_HOST_BUSY; 4170 4171qc_fail_command: 4172 cmd->scsi_done(cmd); 4173 4174 return 0; 4175} 4176 4177/** 4178 * qla4xxx_mem_free - frees memory allocated to adapter 4179 * @ha: Pointer to host adapter structure. 4180 * 4181 * Frees memory previously allocated by qla4xxx_mem_alloc 4182 **/ 4183static void qla4xxx_mem_free(struct scsi_qla_host *ha) 4184{ 4185 if (ha->queues) 4186 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, 4187 ha->queues_dma); 4188 4189 if (ha->fw_dump) 4190 vfree(ha->fw_dump); 4191 4192 ha->queues_len = 0; 4193 ha->queues = NULL; 4194 ha->queues_dma = 0; 4195 ha->request_ring = NULL; 4196 ha->request_dma = 0; 4197 ha->response_ring = NULL; 4198 ha->response_dma = 0; 4199 ha->shadow_regs = NULL; 4200 ha->shadow_regs_dma = 0; 4201 ha->fw_dump = NULL; 4202 ha->fw_dump_size = 0; 4203 4204 /* Free srb pool. */ 4205 mempool_destroy(ha->srb_mempool); 4206 ha->srb_mempool = NULL; 4207 4208 dma_pool_destroy(ha->chap_dma_pool); 4209 4210 if (ha->chap_list) 4211 vfree(ha->chap_list); 4212 ha->chap_list = NULL; 4213 4214 dma_pool_destroy(ha->fw_ddb_dma_pool); 4215 4216 /* release io space registers */ 4217 if (is_qla8022(ha)) { 4218 if (ha->nx_pcibase) 4219 iounmap( 4220 (struct device_reg_82xx __iomem *)ha->nx_pcibase); 4221 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4222 if (ha->nx_pcibase) 4223 iounmap( 4224 (struct device_reg_83xx __iomem *)ha->nx_pcibase); 4225 } else if (ha->reg) { 4226 iounmap(ha->reg); 4227 } 4228 4229 if (ha->reset_tmplt.buff) 4230 vfree(ha->reset_tmplt.buff); 4231 4232 pci_release_regions(ha->pdev); 4233} 4234 4235/** 4236 * qla4xxx_mem_alloc - allocates memory for use by adapter. 4237 * @ha: Pointer to host adapter structure 4238 * 4239 * Allocates DMA memory for request and response queues. Also allocates memory 4240 * for srbs. 4241 **/ 4242static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) 4243{ 4244 unsigned long align; 4245 4246 /* Allocate contiguous block of DMA memory for queues. */ 4247 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4248 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) + 4249 sizeof(struct shadow_regs) + 4250 MEM_ALIGN_VALUE + 4251 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4252 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, 4253 &ha->queues_dma, GFP_KERNEL); 4254 if (ha->queues == NULL) { 4255 ql4_printk(KERN_WARNING, ha, 4256 "Memory Allocation failed - queues.\n"); 4257 4258 goto mem_alloc_error_exit; 4259 } 4260 4261 /* 4262 * As per RISC alignment requirements -- the bus-address must be a 4263 * multiple of the request-ring size (in bytes). 4264 */ 4265 align = 0; 4266 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1)) 4267 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma & 4268 (MEM_ALIGN_VALUE - 1)); 4269 4270 /* Update request and response queue pointers. */ 4271 ha->request_dma = ha->queues_dma + align; 4272 ha->request_ring = (struct queue_entry *) (ha->queues + align); 4273 ha->response_dma = ha->queues_dma + align + 4274 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE); 4275 ha->response_ring = (struct queue_entry *) (ha->queues + align + 4276 (REQUEST_QUEUE_DEPTH * 4277 QUEUE_SIZE)); 4278 ha->shadow_regs_dma = ha->queues_dma + align + 4279 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4280 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE); 4281 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align + 4282 (REQUEST_QUEUE_DEPTH * 4283 QUEUE_SIZE) + 4284 (RESPONSE_QUEUE_DEPTH * 4285 QUEUE_SIZE)); 4286 4287 /* Allocate memory for srb pool. */ 4288 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, 4289 mempool_free_slab, srb_cachep); 4290 if (ha->srb_mempool == NULL) { 4291 ql4_printk(KERN_WARNING, ha, 4292 "Memory Allocation failed - SRB Pool.\n"); 4293 4294 goto mem_alloc_error_exit; 4295 } 4296 4297 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev, 4298 CHAP_DMA_BLOCK_SIZE, 8, 0); 4299 4300 if (ha->chap_dma_pool == NULL) { 4301 ql4_printk(KERN_WARNING, ha, 4302 "%s: chap_dma_pool allocation failed..\n", __func__); 4303 goto mem_alloc_error_exit; 4304 } 4305 4306 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev, 4307 DDB_DMA_BLOCK_SIZE, 8, 0); 4308 4309 if (ha->fw_ddb_dma_pool == NULL) { 4310 ql4_printk(KERN_WARNING, ha, 4311 "%s: fw_ddb_dma_pool allocation failed..\n", 4312 __func__); 4313 goto mem_alloc_error_exit; 4314 } 4315 4316 return QLA_SUCCESS; 4317 4318mem_alloc_error_exit: 4319 return QLA_ERROR; 4320} 4321 4322/** 4323 * qla4_8xxx_check_temp - Check the ISP82XX temperature. 4324 * @ha: adapter block pointer. 4325 * 4326 * Note: The caller should not hold the idc lock. 4327 **/ 4328static int qla4_8xxx_check_temp(struct scsi_qla_host *ha) 4329{ 4330 uint32_t temp, temp_state, temp_val; 4331 int status = QLA_SUCCESS; 4332 4333 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE); 4334 4335 temp_state = qla82xx_get_temp_state(temp); 4336 temp_val = qla82xx_get_temp_val(temp); 4337 4338 if (temp_state == QLA82XX_TEMP_PANIC) { 4339 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C" 4340 " exceeds maximum allowed. Hardware has been shut" 4341 " down.\n", temp_val); 4342 status = QLA_ERROR; 4343 } else if (temp_state == QLA82XX_TEMP_WARN) { 4344 if (ha->temperature == QLA82XX_TEMP_NORMAL) 4345 ql4_printk(KERN_WARNING, ha, "Device temperature %d" 4346 " degrees C exceeds operating range." 4347 " Immediate action needed.\n", temp_val); 4348 } else { 4349 if (ha->temperature == QLA82XX_TEMP_WARN) 4350 ql4_printk(KERN_INFO, ha, "Device temperature is" 4351 " now %d degrees C in normal range.\n", 4352 temp_val); 4353 } 4354 ha->temperature = temp_state; 4355 return status; 4356} 4357 4358/** 4359 * qla4_8xxx_check_fw_alive - Check firmware health 4360 * @ha: Pointer to host adapter structure. 4361 * 4362 * Context: Interrupt 4363 **/ 4364static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) 4365{ 4366 uint32_t fw_heartbeat_counter; 4367 int status = QLA_SUCCESS; 4368 4369 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha, 4370 QLA8XXX_PEG_ALIVE_COUNTER); 4371 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ 4372 if (fw_heartbeat_counter == 0xffffffff) { 4373 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " 4374 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", 4375 ha->host_no, __func__)); 4376 return status; 4377 } 4378 4379 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { 4380 ha->seconds_since_last_heartbeat++; 4381 /* FW not alive after 2 seconds */ 4382 if (ha->seconds_since_last_heartbeat == 2) { 4383 ha->seconds_since_last_heartbeat = 0; 4384 qla4_8xxx_dump_peg_reg(ha); 4385 status = QLA_ERROR; 4386 } 4387 } else 4388 ha->seconds_since_last_heartbeat = 0; 4389 4390 ha->fw_heartbeat_counter = fw_heartbeat_counter; 4391 return status; 4392} 4393 4394static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha) 4395{ 4396 uint32_t halt_status; 4397 int halt_status_unrecoverable = 0; 4398 4399 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1); 4400 4401 if (is_qla8022(ha)) { 4402 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4403 __func__); 4404 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4405 CRB_NIU_XG_PAUSE_CTL_P0 | 4406 CRB_NIU_XG_PAUSE_CTL_P1); 4407 4408 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67) 4409 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n", 4410 __func__); 4411 if (halt_status & HALT_STATUS_UNRECOVERABLE) 4412 halt_status_unrecoverable = 1; 4413 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4414 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET) 4415 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n", 4416 __func__); 4417 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE) 4418 halt_status_unrecoverable = 1; 4419 } 4420 4421 /* 4422 * Since we cannot change dev_state in interrupt context, 4423 * set appropriate DPC flag then wakeup DPC 4424 */ 4425 if (halt_status_unrecoverable) { 4426 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4427 } else { 4428 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n", 4429 __func__); 4430 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4431 } 4432 qla4xxx_mailbox_premature_completion(ha); 4433 qla4xxx_wake_dpc(ha); 4434} 4435 4436/** 4437 * qla4_8xxx_watchdog - Poll dev state 4438 * @ha: Pointer to host adapter structure. 4439 * 4440 * Context: Interrupt 4441 **/ 4442void qla4_8xxx_watchdog(struct scsi_qla_host *ha) 4443{ 4444 uint32_t dev_state; 4445 uint32_t idc_ctrl; 4446 4447 if (is_qla8032(ha) && 4448 (qla4_83xx_is_detached(ha) == QLA_SUCCESS)) 4449 WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n", 4450 __func__, ha->func_num); 4451 4452 /* don't poll if reset is going on */ 4453 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4454 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4455 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { 4456 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); 4457 4458 if (qla4_8xxx_check_temp(ha)) { 4459 if (is_qla8022(ha)) { 4460 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n"); 4461 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4462 CRB_NIU_XG_PAUSE_CTL_P0 | 4463 CRB_NIU_XG_PAUSE_CTL_P1); 4464 } 4465 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4466 qla4xxx_wake_dpc(ha); 4467 } else if (dev_state == QLA8XXX_DEV_NEED_RESET && 4468 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 4469 4470 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n", 4471 __func__); 4472 4473 if (is_qla8032(ha) || is_qla8042(ha)) { 4474 idc_ctrl = qla4_83xx_rd_reg(ha, 4475 QLA83XX_IDC_DRV_CTRL); 4476 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) { 4477 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n", 4478 __func__); 4479 qla4xxx_mailbox_premature_completion( 4480 ha); 4481 } 4482 } 4483 4484 if ((is_qla8032(ha) || is_qla8042(ha)) || 4485 (is_qla8022(ha) && !ql4xdontresethba)) { 4486 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4487 qla4xxx_wake_dpc(ha); 4488 } 4489 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && 4490 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 4491 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n", 4492 __func__); 4493 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags); 4494 qla4xxx_wake_dpc(ha); 4495 } else { 4496 /* Check firmware health */ 4497 if (qla4_8xxx_check_fw_alive(ha)) 4498 qla4_8xxx_process_fw_error(ha); 4499 } 4500 } 4501} 4502 4503static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 4504{ 4505 struct iscsi_session *sess; 4506 struct ddb_entry *ddb_entry; 4507 struct scsi_qla_host *ha; 4508 4509 sess = cls_sess->dd_data; 4510 ddb_entry = sess->dd_data; 4511 ha = ddb_entry->ha; 4512 4513 if (!(ddb_entry->ddb_type == FLASH_DDB)) 4514 return; 4515 4516 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) && 4517 !iscsi_is_session_online(cls_sess)) { 4518 if (atomic_read(&ddb_entry->retry_relogin_timer) != 4519 INVALID_ENTRY) { 4520 if (atomic_read(&ddb_entry->retry_relogin_timer) == 4521 0) { 4522 atomic_set(&ddb_entry->retry_relogin_timer, 4523 INVALID_ENTRY); 4524 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4525 set_bit(DF_RELOGIN, &ddb_entry->flags); 4526 DEBUG2(ql4_printk(KERN_INFO, ha, 4527 "%s: index [%d] login device\n", 4528 __func__, ddb_entry->fw_ddb_index)); 4529 } else 4530 atomic_dec(&ddb_entry->retry_relogin_timer); 4531 } 4532 } 4533 4534 /* Wait for relogin to timeout */ 4535 if (atomic_read(&ddb_entry->relogin_timer) && 4536 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) { 4537 /* 4538 * If the relogin times out and the device is 4539 * still NOT ONLINE then try and relogin again. 4540 */ 4541 if (!iscsi_is_session_online(cls_sess)) { 4542 /* Reset retry relogin timer */ 4543 atomic_inc(&ddb_entry->relogin_retry_count); 4544 DEBUG2(ql4_printk(KERN_INFO, ha, 4545 "%s: index[%d] relogin timed out-retrying" 4546 " relogin (%d), retry (%d)\n", __func__, 4547 ddb_entry->fw_ddb_index, 4548 atomic_read(&ddb_entry->relogin_retry_count), 4549 ddb_entry->default_time2wait + 4)); 4550 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4551 atomic_set(&ddb_entry->retry_relogin_timer, 4552 ddb_entry->default_time2wait + 4); 4553 } 4554 } 4555} 4556 4557/** 4558 * qla4xxx_timer - checks every second for work to do. 4559 * @t: Context to obtain pointer to host adapter structure. 4560 **/ 4561static void qla4xxx_timer(struct timer_list *t) 4562{ 4563 struct scsi_qla_host *ha = from_timer(ha, t, timer); 4564 int start_dpc = 0; 4565 uint16_t w; 4566 4567 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb); 4568 4569 /* If we are in the middle of AER/EEH processing 4570 * skip any processing and reschedule the timer 4571 */ 4572 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4573 mod_timer(&ha->timer, jiffies + HZ); 4574 return; 4575 } 4576 4577 /* Hardware read to trigger an EEH error during mailbox waits. */ 4578 if (!pci_channel_offline(ha->pdev)) 4579 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 4580 4581 if (is_qla80XX(ha)) 4582 qla4_8xxx_watchdog(ha); 4583 4584 if (is_qla40XX(ha)) { 4585 /* Check for heartbeat interval. */ 4586 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && 4587 ha->heartbeat_interval != 0) { 4588 ha->seconds_since_last_heartbeat++; 4589 if (ha->seconds_since_last_heartbeat > 4590 ha->heartbeat_interval + 2) 4591 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4592 } 4593 } 4594 4595 /* Process any deferred work. */ 4596 if (!list_empty(&ha->work_list)) 4597 start_dpc++; 4598 4599 /* Wakeup the dpc routine for this adapter, if needed. */ 4600 if (start_dpc || 4601 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4602 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || 4603 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || 4604 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 4605 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4606 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) || 4607 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || 4608 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4609 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4610 test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) || 4611 test_bit(DPC_AEN, &ha->dpc_flags)) { 4612 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" 4613 " - dpc flags = 0x%lx\n", 4614 ha->host_no, __func__, ha->dpc_flags)); 4615 qla4xxx_wake_dpc(ha); 4616 } 4617 4618 /* Reschedule timer thread to call us back in one second */ 4619 mod_timer(&ha->timer, jiffies + HZ); 4620 4621 DEBUG2(ha->seconds_since_last_intr++); 4622} 4623 4624/** 4625 * qla4xxx_cmd_wait - waits for all outstanding commands to complete 4626 * @ha: Pointer to host adapter structure. 4627 * 4628 * This routine stalls the driver until all outstanding commands are returned. 4629 * Caller must release the Hardware Lock prior to calling this routine. 4630 **/ 4631static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) 4632{ 4633 uint32_t index = 0; 4634 unsigned long flags; 4635 struct scsi_cmnd *cmd; 4636 unsigned long wtime; 4637 uint32_t wtmo; 4638 4639 if (is_qla40XX(ha)) 4640 wtmo = WAIT_CMD_TOV; 4641 else 4642 wtmo = ha->nx_reset_timeout / 2; 4643 4644 wtime = jiffies + (wtmo * HZ); 4645 4646 DEBUG2(ql4_printk(KERN_INFO, ha, 4647 "Wait up to %u seconds for cmds to complete\n", 4648 wtmo)); 4649 4650 while (!time_after_eq(jiffies, wtime)) { 4651 spin_lock_irqsave(&ha->hardware_lock, flags); 4652 /* Find a command that hasn't completed. */ 4653 for (index = 0; index < ha->host->can_queue; index++) { 4654 cmd = scsi_host_find_tag(ha->host, index); 4655 /* 4656 * We cannot just check if the index is valid, 4657 * becase if we are run from the scsi eh, then 4658 * the scsi/block layer is going to prevent 4659 * the tag from being released. 4660 */ 4661 if (cmd != NULL && CMD_SP(cmd)) 4662 break; 4663 } 4664 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4665 4666 /* If No Commands are pending, wait is complete */ 4667 if (index == ha->host->can_queue) 4668 return QLA_SUCCESS; 4669 4670 msleep(1000); 4671 } 4672 /* If we timed out on waiting for commands to come back 4673 * return ERROR. */ 4674 return QLA_ERROR; 4675} 4676 4677int qla4xxx_hw_reset(struct scsi_qla_host *ha) 4678{ 4679 uint32_t ctrl_status; 4680 unsigned long flags = 0; 4681 4682 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__)); 4683 4684 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) 4685 return QLA_ERROR; 4686 4687 spin_lock_irqsave(&ha->hardware_lock, flags); 4688 4689 /* 4690 * If the SCSI Reset Interrupt bit is set, clear it. 4691 * Otherwise, the Soft Reset won't work. 4692 */ 4693 ctrl_status = readw(&ha->reg->ctrl_status); 4694 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) 4695 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4696 4697 /* Issue Soft Reset */ 4698 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status); 4699 readl(&ha->reg->ctrl_status); 4700 4701 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4702 return QLA_SUCCESS; 4703} 4704 4705/** 4706 * qla4xxx_soft_reset - performs soft reset. 4707 * @ha: Pointer to host adapter structure. 4708 **/ 4709int qla4xxx_soft_reset(struct scsi_qla_host *ha) 4710{ 4711 uint32_t max_wait_time; 4712 unsigned long flags = 0; 4713 int status; 4714 uint32_t ctrl_status; 4715 4716 status = qla4xxx_hw_reset(ha); 4717 if (status != QLA_SUCCESS) 4718 return status; 4719 4720 status = QLA_ERROR; 4721 /* Wait until the Network Reset Intr bit is cleared */ 4722 max_wait_time = RESET_INTR_TOV; 4723 do { 4724 spin_lock_irqsave(&ha->hardware_lock, flags); 4725 ctrl_status = readw(&ha->reg->ctrl_status); 4726 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4727 4728 if ((ctrl_status & CSR_NET_RESET_INTR) == 0) 4729 break; 4730 4731 msleep(1000); 4732 } while ((--max_wait_time)); 4733 4734 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) { 4735 DEBUG2(printk(KERN_WARNING 4736 "scsi%ld: Network Reset Intr not cleared by " 4737 "Network function, clearing it now!\n", 4738 ha->host_no)); 4739 spin_lock_irqsave(&ha->hardware_lock, flags); 4740 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status); 4741 readl(&ha->reg->ctrl_status); 4742 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4743 } 4744 4745 /* Wait until the firmware tells us the Soft Reset is done */ 4746 max_wait_time = SOFT_RESET_TOV; 4747 do { 4748 spin_lock_irqsave(&ha->hardware_lock, flags); 4749 ctrl_status = readw(&ha->reg->ctrl_status); 4750 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4751 4752 if ((ctrl_status & CSR_SOFT_RESET) == 0) { 4753 status = QLA_SUCCESS; 4754 break; 4755 } 4756 4757 msleep(1000); 4758 } while ((--max_wait_time)); 4759 4760 /* 4761 * Also, make sure that the SCSI Reset Interrupt bit has been cleared 4762 * after the soft reset has taken place. 4763 */ 4764 spin_lock_irqsave(&ha->hardware_lock, flags); 4765 ctrl_status = readw(&ha->reg->ctrl_status); 4766 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) { 4767 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4768 readl(&ha->reg->ctrl_status); 4769 } 4770 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4771 4772 /* If soft reset fails then most probably the bios on other 4773 * function is also enabled. 4774 * Since the initialization is sequential the other fn 4775 * wont be able to acknowledge the soft reset. 4776 * Issue a force soft reset to workaround this scenario. 4777 */ 4778 if (max_wait_time == 0) { 4779 /* Issue Force Soft Reset */ 4780 spin_lock_irqsave(&ha->hardware_lock, flags); 4781 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status); 4782 readl(&ha->reg->ctrl_status); 4783 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4784 /* Wait until the firmware tells us the Soft Reset is done */ 4785 max_wait_time = SOFT_RESET_TOV; 4786 do { 4787 spin_lock_irqsave(&ha->hardware_lock, flags); 4788 ctrl_status = readw(&ha->reg->ctrl_status); 4789 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4790 4791 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) { 4792 status = QLA_SUCCESS; 4793 break; 4794 } 4795 4796 msleep(1000); 4797 } while ((--max_wait_time)); 4798 } 4799 4800 return status; 4801} 4802 4803/** 4804 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S. 4805 * @ha: Pointer to host adapter structure. 4806 * @res: returned scsi status 4807 * 4808 * This routine is called just prior to a HARD RESET to return all 4809 * outstanding commands back to the Operating System. 4810 * Caller should make sure that the following locks are released 4811 * before this calling routine: Hardware lock, and io_request_lock. 4812 **/ 4813static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res) 4814{ 4815 struct srb *srb; 4816 int i; 4817 unsigned long flags; 4818 4819 spin_lock_irqsave(&ha->hardware_lock, flags); 4820 for (i = 0; i < ha->host->can_queue; i++) { 4821 srb = qla4xxx_del_from_active_array(ha, i); 4822 if (srb != NULL) { 4823 srb->cmd->result = res; 4824 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 4825 } 4826 } 4827 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4828} 4829 4830void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) 4831{ 4832 clear_bit(AF_ONLINE, &ha->flags); 4833 4834 /* Disable the board */ 4835 ql4_printk(KERN_INFO, ha, "Disabling the board\n"); 4836 4837 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 4838 qla4xxx_mark_all_devices_missing(ha); 4839 clear_bit(AF_INIT_DONE, &ha->flags); 4840} 4841 4842static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session) 4843{ 4844 struct iscsi_session *sess; 4845 struct ddb_entry *ddb_entry; 4846 4847 sess = cls_session->dd_data; 4848 ddb_entry = sess->dd_data; 4849 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED; 4850 4851 if (ddb_entry->ddb_type == FLASH_DDB) 4852 iscsi_block_session(ddb_entry->sess); 4853 else 4854 iscsi_session_failure(cls_session->dd_data, 4855 ISCSI_ERR_CONN_FAILED); 4856} 4857 4858/** 4859 * qla4xxx_recover_adapter - recovers adapter after a fatal error 4860 * @ha: Pointer to host adapter structure. 4861 **/ 4862static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) 4863{ 4864 int status = QLA_ERROR; 4865 uint8_t reset_chip = 0; 4866 uint32_t dev_state; 4867 unsigned long wait; 4868 4869 /* Stall incoming I/O until we are done */ 4870 scsi_block_requests(ha->host); 4871 clear_bit(AF_ONLINE, &ha->flags); 4872 clear_bit(AF_LINK_UP, &ha->flags); 4873 4874 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__)); 4875 4876 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 4877 4878 if ((is_qla8032(ha) || is_qla8042(ha)) && 4879 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4880 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4881 __func__); 4882 /* disable pause frame for ISP83xx */ 4883 qla4_83xx_disable_pause(ha); 4884 } 4885 4886 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 4887 4888 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 4889 reset_chip = 1; 4890 4891 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific) 4892 * do not reset adapter, jump to initialize_adapter */ 4893 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4894 status = QLA_SUCCESS; 4895 goto recover_ha_init_adapter; 4896 } 4897 4898 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked 4899 * from eh_host_reset or ioctl module */ 4900 if (is_qla80XX(ha) && !reset_chip && 4901 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4902 4903 DEBUG2(ql4_printk(KERN_INFO, ha, 4904 "scsi%ld: %s - Performing stop_firmware...\n", 4905 ha->host_no, __func__)); 4906 status = ha->isp_ops->reset_firmware(ha); 4907 if (status == QLA_SUCCESS) { 4908 ha->isp_ops->disable_intrs(ha); 4909 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4910 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4911 } else { 4912 /* If the stop_firmware fails then 4913 * reset the entire chip */ 4914 reset_chip = 1; 4915 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 4916 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4917 } 4918 } 4919 4920 /* Issue full chip reset if recovering from a catastrophic error, 4921 * or if stop_firmware fails for ISP-8xxx. 4922 * This is the default case for ISP-4xxx */ 4923 if (is_qla40XX(ha) || reset_chip) { 4924 if (is_qla40XX(ha)) 4925 goto chip_reset; 4926 4927 /* Check if 8XXX firmware is alive or not 4928 * We may have arrived here from NEED_RESET 4929 * detection only */ 4930 if (test_bit(AF_FW_RECOVERY, &ha->flags)) 4931 goto chip_reset; 4932 4933 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ); 4934 while (time_before(jiffies, wait)) { 4935 if (qla4_8xxx_check_fw_alive(ha)) { 4936 qla4xxx_mailbox_premature_completion(ha); 4937 break; 4938 } 4939 4940 set_current_state(TASK_UNINTERRUPTIBLE); 4941 schedule_timeout(HZ); 4942 } 4943chip_reset: 4944 if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 4945 qla4xxx_cmd_wait(ha); 4946 4947 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4948 DEBUG2(ql4_printk(KERN_INFO, ha, 4949 "scsi%ld: %s - Performing chip reset..\n", 4950 ha->host_no, __func__)); 4951 status = ha->isp_ops->reset_chip(ha); 4952 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4953 } 4954 4955 /* Flush any pending ddb changed AENs */ 4956 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4957 4958recover_ha_init_adapter: 4959 /* Upon successful firmware/chip reset, re-initialize the adapter */ 4960 if (status == QLA_SUCCESS) { 4961 /* For ISP-4xxx, force function 1 to always initialize 4962 * before function 3 to prevent both funcions from 4963 * stepping on top of the other */ 4964 if (is_qla40XX(ha) && (ha->mac_index == 3)) 4965 ssleep(6); 4966 4967 /* NOTE: AF_ONLINE flag set upon successful completion of 4968 * qla4xxx_initialize_adapter */ 4969 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 4970 if (is_qla80XX(ha) && (status == QLA_ERROR)) { 4971 status = qla4_8xxx_check_init_adapter_retry(ha); 4972 if (status == QLA_ERROR) { 4973 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n", 4974 ha->host_no, __func__); 4975 qla4xxx_dead_adapter_cleanup(ha); 4976 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4977 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4978 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4979 &ha->dpc_flags); 4980 goto exit_recover; 4981 } 4982 } 4983 } 4984 4985 /* Retry failed adapter initialization, if necessary 4986 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific) 4987 * case to prevent ping-pong resets between functions */ 4988 if (!test_bit(AF_ONLINE, &ha->flags) && 4989 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4990 /* Adapter initialization failed, see if we can retry 4991 * resetting the ha. 4992 * Since we don't want to block the DPC for too long 4993 * with multiple resets in the same thread, 4994 * utilize DPC to retry */ 4995 if (is_qla80XX(ha)) { 4996 ha->isp_ops->idc_lock(ha); 4997 dev_state = qla4_8xxx_rd_direct(ha, 4998 QLA8XXX_CRB_DEV_STATE); 4999 ha->isp_ops->idc_unlock(ha); 5000 if (dev_state == QLA8XXX_DEV_FAILED) { 5001 ql4_printk(KERN_INFO, ha, "%s: don't retry " 5002 "recover adapter. H/W is in Failed " 5003 "state\n", __func__); 5004 qla4xxx_dead_adapter_cleanup(ha); 5005 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5006 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5007 clear_bit(DPC_RESET_HA_FW_CONTEXT, 5008 &ha->dpc_flags); 5009 status = QLA_ERROR; 5010 5011 goto exit_recover; 5012 } 5013 } 5014 5015 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) { 5016 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES; 5017 DEBUG2(printk("scsi%ld: recover adapter - retrying " 5018 "(%d) more times\n", ha->host_no, 5019 ha->retry_reset_ha_cnt)); 5020 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5021 status = QLA_ERROR; 5022 } else { 5023 if (ha->retry_reset_ha_cnt > 0) { 5024 /* Schedule another Reset HA--DPC will retry */ 5025 ha->retry_reset_ha_cnt--; 5026 DEBUG2(printk("scsi%ld: recover adapter - " 5027 "retry remaining %d\n", 5028 ha->host_no, 5029 ha->retry_reset_ha_cnt)); 5030 status = QLA_ERROR; 5031 } 5032 5033 if (ha->retry_reset_ha_cnt == 0) { 5034 /* Recover adapter retries have been exhausted. 5035 * Adapter DEAD */ 5036 DEBUG2(printk("scsi%ld: recover adapter " 5037 "failed - board disabled\n", 5038 ha->host_no)); 5039 qla4xxx_dead_adapter_cleanup(ha); 5040 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5041 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5042 clear_bit(DPC_RESET_HA_FW_CONTEXT, 5043 &ha->dpc_flags); 5044 status = QLA_ERROR; 5045 } 5046 } 5047 } else { 5048 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5049 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5050 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5051 } 5052 5053exit_recover: 5054 ha->adapter_error_count++; 5055 5056 if (test_bit(AF_ONLINE, &ha->flags)) 5057 ha->isp_ops->enable_intrs(ha); 5058 5059 scsi_unblock_requests(ha->host); 5060 5061 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 5062 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no, 5063 status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); 5064 5065 return status; 5066} 5067 5068static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session) 5069{ 5070 struct iscsi_session *sess; 5071 struct ddb_entry *ddb_entry; 5072 struct scsi_qla_host *ha; 5073 5074 sess = cls_session->dd_data; 5075 ddb_entry = sess->dd_data; 5076 ha = ddb_entry->ha; 5077 if (!iscsi_is_session_online(cls_session)) { 5078 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { 5079 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5080 " unblock session\n", ha->host_no, __func__, 5081 ddb_entry->fw_ddb_index); 5082 iscsi_unblock_session(ddb_entry->sess); 5083 } else { 5084 /* Trigger relogin */ 5085 if (ddb_entry->ddb_type == FLASH_DDB) { 5086 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) || 5087 test_bit(DF_DISABLE_RELOGIN, 5088 &ddb_entry->flags))) 5089 qla4xxx_arm_relogin_timer(ddb_entry); 5090 } else 5091 iscsi_session_failure(cls_session->dd_data, 5092 ISCSI_ERR_CONN_FAILED); 5093 } 5094 } 5095} 5096 5097int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session) 5098{ 5099 struct iscsi_session *sess; 5100 struct ddb_entry *ddb_entry; 5101 struct scsi_qla_host *ha; 5102 5103 sess = cls_session->dd_data; 5104 ddb_entry = sess->dd_data; 5105 ha = ddb_entry->ha; 5106 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5107 " unblock session\n", ha->host_no, __func__, 5108 ddb_entry->fw_ddb_index); 5109 5110 iscsi_unblock_session(ddb_entry->sess); 5111 5112 /* Start scan target */ 5113 if (test_bit(AF_ONLINE, &ha->flags)) { 5114 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5115 " start scan\n", ha->host_no, __func__, 5116 ddb_entry->fw_ddb_index); 5117 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work); 5118 } 5119 return QLA_SUCCESS; 5120} 5121 5122int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session) 5123{ 5124 struct iscsi_session *sess; 5125 struct ddb_entry *ddb_entry; 5126 struct scsi_qla_host *ha; 5127 int status = QLA_SUCCESS; 5128 5129 sess = cls_session->dd_data; 5130 ddb_entry = sess->dd_data; 5131 ha = ddb_entry->ha; 5132 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5133 " unblock user space session\n", ha->host_no, __func__, 5134 ddb_entry->fw_ddb_index); 5135 5136 if (!iscsi_is_session_online(cls_session)) { 5137 iscsi_conn_start(ddb_entry->conn); 5138 iscsi_conn_login_event(ddb_entry->conn, 5139 ISCSI_CONN_STATE_LOGGED_IN); 5140 } else { 5141 ql4_printk(KERN_INFO, ha, 5142 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n", 5143 ha->host_no, __func__, ddb_entry->fw_ddb_index, 5144 cls_session->sid); 5145 status = QLA_ERROR; 5146 } 5147 5148 return status; 5149} 5150 5151static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) 5152{ 5153 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices); 5154} 5155 5156static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 5157{ 5158 uint16_t relogin_timer; 5159 struct iscsi_session *sess; 5160 struct ddb_entry *ddb_entry; 5161 struct scsi_qla_host *ha; 5162 5163 sess = cls_sess->dd_data; 5164 ddb_entry = sess->dd_data; 5165 ha = ddb_entry->ha; 5166 5167 relogin_timer = max(ddb_entry->default_relogin_timeout, 5168 (uint16_t)RELOGIN_TOV); 5169 atomic_set(&ddb_entry->relogin_timer, relogin_timer); 5170 5171 DEBUG2(ql4_printk(KERN_INFO, ha, 5172 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no, 5173 ddb_entry->fw_ddb_index, relogin_timer)); 5174 5175 qla4xxx_login_flash_ddb(cls_sess); 5176} 5177 5178static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess) 5179{ 5180 struct iscsi_session *sess; 5181 struct ddb_entry *ddb_entry; 5182 struct scsi_qla_host *ha; 5183 5184 sess = cls_sess->dd_data; 5185 ddb_entry = sess->dd_data; 5186 ha = ddb_entry->ha; 5187 5188 if (!(ddb_entry->ddb_type == FLASH_DDB)) 5189 return; 5190 5191 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 5192 return; 5193 5194 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && 5195 !iscsi_is_session_online(cls_sess)) { 5196 DEBUG2(ql4_printk(KERN_INFO, ha, 5197 "relogin issued\n")); 5198 qla4xxx_relogin_flash_ddb(cls_sess); 5199 } 5200} 5201 5202void qla4xxx_wake_dpc(struct scsi_qla_host *ha) 5203{ 5204 if (ha->dpc_thread) 5205 queue_work(ha->dpc_thread, &ha->dpc_work); 5206} 5207 5208static struct qla4_work_evt * 5209qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size, 5210 enum qla4_work_type type) 5211{ 5212 struct qla4_work_evt *e; 5213 uint32_t size = sizeof(struct qla4_work_evt) + data_size; 5214 5215 e = kzalloc(size, GFP_ATOMIC); 5216 if (!e) 5217 return NULL; 5218 5219 INIT_LIST_HEAD(&e->list); 5220 e->type = type; 5221 return e; 5222} 5223 5224static void qla4xxx_post_work(struct scsi_qla_host *ha, 5225 struct qla4_work_evt *e) 5226{ 5227 unsigned long flags; 5228 5229 spin_lock_irqsave(&ha->work_lock, flags); 5230 list_add_tail(&e->list, &ha->work_list); 5231 spin_unlock_irqrestore(&ha->work_lock, flags); 5232 qla4xxx_wake_dpc(ha); 5233} 5234 5235int qla4xxx_post_aen_work(struct scsi_qla_host *ha, 5236 enum iscsi_host_event_code aen_code, 5237 uint32_t data_size, uint8_t *data) 5238{ 5239 struct qla4_work_evt *e; 5240 5241 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN); 5242 if (!e) 5243 return QLA_ERROR; 5244 5245 e->u.aen.code = aen_code; 5246 e->u.aen.data_size = data_size; 5247 memcpy(e->u.aen.data, data, data_size); 5248 5249 qla4xxx_post_work(ha, e); 5250 5251 return QLA_SUCCESS; 5252} 5253 5254int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha, 5255 uint32_t status, uint32_t pid, 5256 uint32_t data_size, uint8_t *data) 5257{ 5258 struct qla4_work_evt *e; 5259 5260 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS); 5261 if (!e) 5262 return QLA_ERROR; 5263 5264 e->u.ping.status = status; 5265 e->u.ping.pid = pid; 5266 e->u.ping.data_size = data_size; 5267 memcpy(e->u.ping.data, data, data_size); 5268 5269 qla4xxx_post_work(ha, e); 5270 5271 return QLA_SUCCESS; 5272} 5273 5274static void qla4xxx_do_work(struct scsi_qla_host *ha) 5275{ 5276 struct qla4_work_evt *e, *tmp; 5277 unsigned long flags; 5278 LIST_HEAD(work); 5279 5280 spin_lock_irqsave(&ha->work_lock, flags); 5281 list_splice_init(&ha->work_list, &work); 5282 spin_unlock_irqrestore(&ha->work_lock, flags); 5283 5284 list_for_each_entry_safe(e, tmp, &work, list) { 5285 list_del_init(&e->list); 5286 5287 switch (e->type) { 5288 case QLA4_EVENT_AEN: 5289 iscsi_post_host_event(ha->host_no, 5290 &qla4xxx_iscsi_transport, 5291 e->u.aen.code, 5292 e->u.aen.data_size, 5293 e->u.aen.data); 5294 break; 5295 case QLA4_EVENT_PING_STATUS: 5296 iscsi_ping_comp_event(ha->host_no, 5297 &qla4xxx_iscsi_transport, 5298 e->u.ping.status, 5299 e->u.ping.pid, 5300 e->u.ping.data_size, 5301 e->u.ping.data); 5302 break; 5303 default: 5304 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not " 5305 "supported", e->type); 5306 } 5307 kfree(e); 5308 } 5309} 5310 5311/** 5312 * qla4xxx_do_dpc - dpc routine 5313 * @work: Context to obtain pointer to host adapter structure. 5314 * 5315 * This routine is a task that is schedule by the interrupt handler 5316 * to perform the background processing for interrupts. We put it 5317 * on a task queue that is consumed whenever the scheduler runs; that's 5318 * so you can do anything (i.e. put the process to sleep etc). In fact, 5319 * the mid-level tries to sleep when it reaches the driver threshold 5320 * "host->can_queue". This can cause a panic if we were in our interrupt code. 5321 **/ 5322static void qla4xxx_do_dpc(struct work_struct *work) 5323{ 5324 struct scsi_qla_host *ha = 5325 container_of(work, struct scsi_qla_host, dpc_work); 5326 int status = QLA_ERROR; 5327 5328 DEBUG2(ql4_printk(KERN_INFO, ha, 5329 "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n", 5330 ha->host_no, __func__, ha->flags, ha->dpc_flags)); 5331 5332 /* Initialization not yet finished. Don't do anything yet. */ 5333 if (!test_bit(AF_INIT_DONE, &ha->flags)) 5334 return; 5335 5336 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 5337 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", 5338 ha->host_no, __func__, ha->flags)); 5339 return; 5340 } 5341 5342 /* post events to application */ 5343 qla4xxx_do_work(ha); 5344 5345 if (is_qla80XX(ha)) { 5346 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { 5347 if (is_qla8032(ha) || is_qla8042(ha)) { 5348 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 5349 __func__); 5350 /* disable pause frame for ISP83xx */ 5351 qla4_83xx_disable_pause(ha); 5352 } 5353 5354 ha->isp_ops->idc_lock(ha); 5355 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 5356 QLA8XXX_DEV_FAILED); 5357 ha->isp_ops->idc_unlock(ha); 5358 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); 5359 qla4_8xxx_device_state_handler(ha); 5360 } 5361 5362 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) { 5363 if (is_qla8042(ha)) { 5364 if (ha->idc_info.info2 & 5365 ENABLE_INTERNAL_LOOPBACK) { 5366 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n", 5367 __func__); 5368 status = qla4_84xx_config_acb(ha, 5369 ACB_CONFIG_DISABLE); 5370 if (status != QLA_SUCCESS) { 5371 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n", 5372 __func__); 5373 } 5374 } 5375 } 5376 qla4_83xx_post_idc_ack(ha); 5377 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags); 5378 } 5379 5380 if (is_qla8042(ha) && 5381 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) { 5382 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n", 5383 __func__); 5384 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) != 5385 QLA_SUCCESS) { 5386 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ", 5387 __func__); 5388 } 5389 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags); 5390 } 5391 5392 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 5393 qla4_8xxx_need_qsnt_handler(ha); 5394 } 5395 } 5396 5397 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) && 5398 (test_bit(DPC_RESET_HA, &ha->dpc_flags) || 5399 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 5400 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { 5401 if ((is_qla8022(ha) && ql4xdontresethba) || 5402 ((is_qla8032(ha) || is_qla8042(ha)) && 5403 qla4_83xx_idc_dontreset(ha))) { 5404 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 5405 ha->host_no, __func__)); 5406 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5407 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5408 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5409 goto dpc_post_reset_ha; 5410 } 5411 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 5412 test_bit(DPC_RESET_HA, &ha->dpc_flags)) 5413 qla4xxx_recover_adapter(ha); 5414 5415 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 5416 uint8_t wait_time = RESET_INTR_TOV; 5417 5418 while ((readw(&ha->reg->ctrl_status) & 5419 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) { 5420 if (--wait_time == 0) 5421 break; 5422 msleep(1000); 5423 } 5424 if (wait_time == 0) 5425 DEBUG2(printk("scsi%ld: %s: SR|FSR " 5426 "bit not cleared-- resetting\n", 5427 ha->host_no, __func__)); 5428 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 5429 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) { 5430 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 5431 status = qla4xxx_recover_adapter(ha); 5432 } 5433 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5434 if (status == QLA_SUCCESS) 5435 ha->isp_ops->enable_intrs(ha); 5436 } 5437 } 5438 5439dpc_post_reset_ha: 5440 /* ---- process AEN? --- */ 5441 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) 5442 qla4xxx_process_aen(ha, PROCESS_ALL_AENS); 5443 5444 /* ---- Get DHCP IP Address? --- */ 5445 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) 5446 qla4xxx_get_dhcp_ip_address(ha); 5447 5448 /* ---- relogin device? --- */ 5449 if (adapter_up(ha) && 5450 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) { 5451 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin); 5452 } 5453 5454 /* ---- link change? --- */ 5455 if (!test_bit(AF_LOOPBACK, &ha->flags) && 5456 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { 5457 if (!test_bit(AF_LINK_UP, &ha->flags)) { 5458 /* ---- link down? --- */ 5459 qla4xxx_mark_all_devices_missing(ha); 5460 } else { 5461 /* ---- link up? --- * 5462 * F/W will auto login to all devices ONLY ONCE after 5463 * link up during driver initialization and runtime 5464 * fatal error recovery. Therefore, the driver must 5465 * manually relogin to devices when recovering from 5466 * connection failures, logouts, expired KATO, etc. */ 5467 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) { 5468 qla4xxx_build_ddb_list(ha, ha->is_reset); 5469 iscsi_host_for_each_session(ha->host, 5470 qla4xxx_login_flash_ddb); 5471 } else 5472 qla4xxx_relogin_all_devices(ha); 5473 } 5474 } 5475 if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) { 5476 if (qla4xxx_sysfs_ddb_export(ha)) 5477 ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n", 5478 __func__); 5479 } 5480} 5481 5482/** 5483 * qla4xxx_free_adapter - release the adapter 5484 * @ha: pointer to adapter structure 5485 **/ 5486static void qla4xxx_free_adapter(struct scsi_qla_host *ha) 5487{ 5488 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 5489 5490 /* Turn-off interrupts on the card. */ 5491 ha->isp_ops->disable_intrs(ha); 5492 5493 if (is_qla40XX(ha)) { 5494 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 5495 &ha->reg->ctrl_status); 5496 readl(&ha->reg->ctrl_status); 5497 } else if (is_qla8022(ha)) { 5498 writel(0, &ha->qla4_82xx_reg->host_int); 5499 readl(&ha->qla4_82xx_reg->host_int); 5500 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5501 writel(0, &ha->qla4_83xx_reg->risc_intr); 5502 readl(&ha->qla4_83xx_reg->risc_intr); 5503 } 5504 5505 /* Remove timer thread, if present */ 5506 if (ha->timer_active) 5507 qla4xxx_stop_timer(ha); 5508 5509 /* Kill the kernel thread for this host */ 5510 if (ha->dpc_thread) 5511 destroy_workqueue(ha->dpc_thread); 5512 5513 /* Kill the kernel thread for this host */ 5514 if (ha->task_wq) 5515 destroy_workqueue(ha->task_wq); 5516 5517 /* Put firmware in known state */ 5518 ha->isp_ops->reset_firmware(ha); 5519 5520 if (is_qla80XX(ha)) { 5521 ha->isp_ops->idc_lock(ha); 5522 qla4_8xxx_clear_drv_active(ha); 5523 ha->isp_ops->idc_unlock(ha); 5524 } 5525 5526 /* Detach interrupts */ 5527 qla4xxx_free_irqs(ha); 5528 5529 /* free extra memory */ 5530 qla4xxx_mem_free(ha); 5531} 5532 5533int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) 5534{ 5535 int status = 0; 5536 unsigned long mem_base, mem_len; 5537 struct pci_dev *pdev = ha->pdev; 5538 5539 status = pci_request_regions(pdev, DRIVER_NAME); 5540 if (status) { 5541 printk(KERN_WARNING 5542 "scsi(%ld) Failed to reserve PIO regions (%s) " 5543 "status=%d\n", ha->host_no, pci_name(pdev), status); 5544 goto iospace_error_exit; 5545 } 5546 5547 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", 5548 __func__, pdev->revision)); 5549 ha->revision_id = pdev->revision; 5550 5551 /* remap phys address */ 5552 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 5553 mem_len = pci_resource_len(pdev, 0); 5554 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n", 5555 __func__, mem_base, mem_len)); 5556 5557 /* mapping of pcibase pointer */ 5558 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len); 5559 if (!ha->nx_pcibase) { 5560 printk(KERN_ERR 5561 "cannot remap MMIO (%s), aborting\n", pci_name(pdev)); 5562 pci_release_regions(ha->pdev); 5563 goto iospace_error_exit; 5564 } 5565 5566 /* Mapping of IO base pointer, door bell read and write pointer */ 5567 5568 /* mapping of IO base pointer */ 5569 if (is_qla8022(ha)) { 5570 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *) 5571 ((uint8_t *)ha->nx_pcibase + 0xbc000 + 5572 (ha->pdev->devfn << 11)); 5573 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : 5574 QLA82XX_CAM_RAM_DB2); 5575 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5576 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *) 5577 ((uint8_t *)ha->nx_pcibase); 5578 } 5579 5580 return 0; 5581iospace_error_exit: 5582 return -ENOMEM; 5583} 5584 5585/*** 5586 * qla4xxx_iospace_config - maps registers 5587 * @ha: pointer to adapter structure 5588 * 5589 * This routines maps HBA's registers from the pci address space 5590 * into the kernel virtual address space for memory mapped i/o. 5591 **/ 5592int qla4xxx_iospace_config(struct scsi_qla_host *ha) 5593{ 5594 unsigned long pio, pio_len, pio_flags; 5595 unsigned long mmio, mmio_len, mmio_flags; 5596 5597 pio = pci_resource_start(ha->pdev, 0); 5598 pio_len = pci_resource_len(ha->pdev, 0); 5599 pio_flags = pci_resource_flags(ha->pdev, 0); 5600 if (pio_flags & IORESOURCE_IO) { 5601 if (pio_len < MIN_IOBASE_LEN) { 5602 ql4_printk(KERN_WARNING, ha, 5603 "Invalid PCI I/O region size\n"); 5604 pio = 0; 5605 } 5606 } else { 5607 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n"); 5608 pio = 0; 5609 } 5610 5611 /* Use MMIO operations for all accesses. */ 5612 mmio = pci_resource_start(ha->pdev, 1); 5613 mmio_len = pci_resource_len(ha->pdev, 1); 5614 mmio_flags = pci_resource_flags(ha->pdev, 1); 5615 5616 if (!(mmio_flags & IORESOURCE_MEM)) { 5617 ql4_printk(KERN_ERR, ha, 5618 "region #0 not an MMIO resource, aborting\n"); 5619 5620 goto iospace_error_exit; 5621 } 5622 5623 if (mmio_len < MIN_IOBASE_LEN) { 5624 ql4_printk(KERN_ERR, ha, 5625 "Invalid PCI mem region size, aborting\n"); 5626 goto iospace_error_exit; 5627 } 5628 5629 if (pci_request_regions(ha->pdev, DRIVER_NAME)) { 5630 ql4_printk(KERN_WARNING, ha, 5631 "Failed to reserve PIO/MMIO regions\n"); 5632 5633 goto iospace_error_exit; 5634 } 5635 5636 ha->pio_address = pio; 5637 ha->pio_length = pio_len; 5638 ha->reg = ioremap(mmio, MIN_IOBASE_LEN); 5639 if (!ha->reg) { 5640 ql4_printk(KERN_ERR, ha, 5641 "cannot remap MMIO, aborting\n"); 5642 5643 goto iospace_error_exit; 5644 } 5645 5646 return 0; 5647 5648iospace_error_exit: 5649 return -ENOMEM; 5650} 5651 5652static struct isp_operations qla4xxx_isp_ops = { 5653 .iospace_config = qla4xxx_iospace_config, 5654 .pci_config = qla4xxx_pci_config, 5655 .disable_intrs = qla4xxx_disable_intrs, 5656 .enable_intrs = qla4xxx_enable_intrs, 5657 .start_firmware = qla4xxx_start_firmware, 5658 .intr_handler = qla4xxx_intr_handler, 5659 .interrupt_service_routine = qla4xxx_interrupt_service_routine, 5660 .reset_chip = qla4xxx_soft_reset, 5661 .reset_firmware = qla4xxx_hw_reset, 5662 .queue_iocb = qla4xxx_queue_iocb, 5663 .complete_iocb = qla4xxx_complete_iocb, 5664 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5665 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5666 .get_sys_info = qla4xxx_get_sys_info, 5667 .queue_mailbox_command = qla4xxx_queue_mbox_cmd, 5668 .process_mailbox_interrupt = qla4xxx_process_mbox_intr, 5669}; 5670 5671static struct isp_operations qla4_82xx_isp_ops = { 5672 .iospace_config = qla4_8xxx_iospace_config, 5673 .pci_config = qla4_8xxx_pci_config, 5674 .disable_intrs = qla4_82xx_disable_intrs, 5675 .enable_intrs = qla4_82xx_enable_intrs, 5676 .start_firmware = qla4_8xxx_load_risc, 5677 .restart_firmware = qla4_82xx_try_start_fw, 5678 .intr_handler = qla4_82xx_intr_handler, 5679 .interrupt_service_routine = qla4_82xx_interrupt_service_routine, 5680 .need_reset = qla4_8xxx_need_reset, 5681 .reset_chip = qla4_82xx_isp_reset, 5682 .reset_firmware = qla4_8xxx_stop_firmware, 5683 .queue_iocb = qla4_82xx_queue_iocb, 5684 .complete_iocb = qla4_82xx_complete_iocb, 5685 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out, 5686 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in, 5687 .get_sys_info = qla4_8xxx_get_sys_info, 5688 .rd_reg_direct = qla4_82xx_rd_32, 5689 .wr_reg_direct = qla4_82xx_wr_32, 5690 .rd_reg_indirect = qla4_82xx_md_rd_32, 5691 .wr_reg_indirect = qla4_82xx_md_wr_32, 5692 .idc_lock = qla4_82xx_idc_lock, 5693 .idc_unlock = qla4_82xx_idc_unlock, 5694 .rom_lock_recovery = qla4_82xx_rom_lock_recovery, 5695 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd, 5696 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr, 5697}; 5698 5699static struct isp_operations qla4_83xx_isp_ops = { 5700 .iospace_config = qla4_8xxx_iospace_config, 5701 .pci_config = qla4_8xxx_pci_config, 5702 .disable_intrs = qla4_83xx_disable_intrs, 5703 .enable_intrs = qla4_83xx_enable_intrs, 5704 .start_firmware = qla4_8xxx_load_risc, 5705 .restart_firmware = qla4_83xx_start_firmware, 5706 .intr_handler = qla4_83xx_intr_handler, 5707 .interrupt_service_routine = qla4_83xx_interrupt_service_routine, 5708 .need_reset = qla4_8xxx_need_reset, 5709 .reset_chip = qla4_83xx_isp_reset, 5710 .reset_firmware = qla4_8xxx_stop_firmware, 5711 .queue_iocb = qla4_83xx_queue_iocb, 5712 .complete_iocb = qla4_83xx_complete_iocb, 5713 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5714 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5715 .get_sys_info = qla4_8xxx_get_sys_info, 5716 .rd_reg_direct = qla4_83xx_rd_reg, 5717 .wr_reg_direct = qla4_83xx_wr_reg, 5718 .rd_reg_indirect = qla4_83xx_rd_reg_indirect, 5719 .wr_reg_indirect = qla4_83xx_wr_reg_indirect, 5720 .idc_lock = qla4_83xx_drv_lock, 5721 .idc_unlock = qla4_83xx_drv_unlock, 5722 .rom_lock_recovery = qla4_83xx_rom_lock_recovery, 5723 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd, 5724 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr, 5725}; 5726 5727uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5728{ 5729 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out); 5730} 5731 5732uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5733{ 5734 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out)); 5735} 5736 5737uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5738{ 5739 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); 5740} 5741 5742uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5743{ 5744 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in)); 5745} 5746 5747static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) 5748{ 5749 struct scsi_qla_host *ha = data; 5750 char *str = buf; 5751 int rc; 5752 5753 switch (type) { 5754 case ISCSI_BOOT_ETH_FLAGS: 5755 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); 5756 break; 5757 case ISCSI_BOOT_ETH_INDEX: 5758 rc = sprintf(str, "0\n"); 5759 break; 5760 case ISCSI_BOOT_ETH_MAC: 5761 rc = sysfs_format_mac(str, ha->my_mac, 5762 MAC_ADDR_LEN); 5763 break; 5764 default: 5765 rc = -ENOSYS; 5766 break; 5767 } 5768 return rc; 5769} 5770 5771static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type) 5772{ 5773 int rc; 5774 5775 switch (type) { 5776 case ISCSI_BOOT_ETH_FLAGS: 5777 case ISCSI_BOOT_ETH_MAC: 5778 case ISCSI_BOOT_ETH_INDEX: 5779 rc = S_IRUGO; 5780 break; 5781 default: 5782 rc = 0; 5783 break; 5784 } 5785 return rc; 5786} 5787 5788static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf) 5789{ 5790 struct scsi_qla_host *ha = data; 5791 char *str = buf; 5792 int rc; 5793 5794 switch (type) { 5795 case ISCSI_BOOT_INI_INITIATOR_NAME: 5796 rc = sprintf(str, "%s\n", ha->name_string); 5797 break; 5798 default: 5799 rc = -ENOSYS; 5800 break; 5801 } 5802 return rc; 5803} 5804 5805static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type) 5806{ 5807 int rc; 5808 5809 switch (type) { 5810 case ISCSI_BOOT_INI_INITIATOR_NAME: 5811 rc = S_IRUGO; 5812 break; 5813 default: 5814 rc = 0; 5815 break; 5816 } 5817 return rc; 5818} 5819 5820static ssize_t 5821qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type, 5822 char *buf) 5823{ 5824 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 5825 char *str = buf; 5826 int rc; 5827 5828 switch (type) { 5829 case ISCSI_BOOT_TGT_NAME: 5830 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name); 5831 break; 5832 case ISCSI_BOOT_TGT_IP_ADDR: 5833 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1) 5834 rc = sprintf(buf, "%pI4\n", 5835 &boot_conn->dest_ipaddr.ip_address); 5836 else 5837 rc = sprintf(str, "%pI6\n", 5838 &boot_conn->dest_ipaddr.ip_address); 5839 break; 5840 case ISCSI_BOOT_TGT_PORT: 5841 rc = sprintf(str, "%d\n", boot_conn->dest_port); 5842 break; 5843 case ISCSI_BOOT_TGT_CHAP_NAME: 5844 rc = sprintf(str, "%.*s\n", 5845 boot_conn->chap.target_chap_name_length, 5846 (char *)&boot_conn->chap.target_chap_name); 5847 break; 5848 case ISCSI_BOOT_TGT_CHAP_SECRET: 5849 rc = sprintf(str, "%.*s\n", 5850 boot_conn->chap.target_secret_length, 5851 (char *)&boot_conn->chap.target_secret); 5852 break; 5853 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5854 rc = sprintf(str, "%.*s\n", 5855 boot_conn->chap.intr_chap_name_length, 5856 (char *)&boot_conn->chap.intr_chap_name); 5857 break; 5858 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5859 rc = sprintf(str, "%.*s\n", 5860 boot_conn->chap.intr_secret_length, 5861 (char *)&boot_conn->chap.intr_secret); 5862 break; 5863 case ISCSI_BOOT_TGT_FLAGS: 5864 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); 5865 break; 5866 case ISCSI_BOOT_TGT_NIC_ASSOC: 5867 rc = sprintf(str, "0\n"); 5868 break; 5869 default: 5870 rc = -ENOSYS; 5871 break; 5872 } 5873 return rc; 5874} 5875 5876static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf) 5877{ 5878 struct scsi_qla_host *ha = data; 5879 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess); 5880 5881 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5882} 5883 5884static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf) 5885{ 5886 struct scsi_qla_host *ha = data; 5887 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess); 5888 5889 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5890} 5891 5892static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type) 5893{ 5894 int rc; 5895 5896 switch (type) { 5897 case ISCSI_BOOT_TGT_NAME: 5898 case ISCSI_BOOT_TGT_IP_ADDR: 5899 case ISCSI_BOOT_TGT_PORT: 5900 case ISCSI_BOOT_TGT_CHAP_NAME: 5901 case ISCSI_BOOT_TGT_CHAP_SECRET: 5902 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5903 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5904 case ISCSI_BOOT_TGT_NIC_ASSOC: 5905 case ISCSI_BOOT_TGT_FLAGS: 5906 rc = S_IRUGO; 5907 break; 5908 default: 5909 rc = 0; 5910 break; 5911 } 5912 return rc; 5913} 5914 5915static void qla4xxx_boot_release(void *data) 5916{ 5917 struct scsi_qla_host *ha = data; 5918 5919 scsi_host_put(ha->host); 5920} 5921 5922static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) 5923{ 5924 dma_addr_t buf_dma; 5925 uint32_t addr, pri_addr, sec_addr; 5926 uint32_t offset; 5927 uint16_t func_num; 5928 uint8_t val; 5929 uint8_t *buf = NULL; 5930 size_t size = 13 * sizeof(uint8_t); 5931 int ret = QLA_SUCCESS; 5932 5933 func_num = PCI_FUNC(ha->pdev->devfn); 5934 5935 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n", 5936 __func__, ha->pdev->device, func_num); 5937 5938 if (is_qla40XX(ha)) { 5939 if (func_num == 1) { 5940 addr = NVRAM_PORT0_BOOT_MODE; 5941 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT; 5942 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT; 5943 } else if (func_num == 3) { 5944 addr = NVRAM_PORT1_BOOT_MODE; 5945 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT; 5946 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT; 5947 } else { 5948 ret = QLA_ERROR; 5949 goto exit_boot_info; 5950 } 5951 5952 /* Check Boot Mode */ 5953 val = rd_nvram_byte(ha, addr); 5954 if (!(val & 0x07)) { 5955 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot " 5956 "options : 0x%x\n", __func__, val)); 5957 ret = QLA_ERROR; 5958 goto exit_boot_info; 5959 } 5960 5961 /* get primary valid target index */ 5962 val = rd_nvram_byte(ha, pri_addr); 5963 if (val & BIT_7) 5964 ddb_index[0] = (val & 0x7f); 5965 5966 /* get secondary valid target index */ 5967 val = rd_nvram_byte(ha, sec_addr); 5968 if (val & BIT_7) 5969 ddb_index[1] = (val & 0x7f); 5970 goto exit_boot_info; 5971 } else if (is_qla80XX(ha)) { 5972 buf = dma_alloc_coherent(&ha->pdev->dev, size, 5973 &buf_dma, GFP_KERNEL); 5974 if (!buf) { 5975 DEBUG2(ql4_printk(KERN_ERR, ha, 5976 "%s: Unable to allocate dma buffer\n", 5977 __func__)); 5978 ret = QLA_ERROR; 5979 goto exit_boot_info; 5980 } 5981 5982 if (ha->port_num == 0) 5983 offset = BOOT_PARAM_OFFSET_PORT0; 5984 else if (ha->port_num == 1) 5985 offset = BOOT_PARAM_OFFSET_PORT1; 5986 else { 5987 ret = QLA_ERROR; 5988 goto exit_boot_info_free; 5989 } 5990 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) + 5991 offset; 5992 if (qla4xxx_get_flash(ha, buf_dma, addr, 5993 13 * sizeof(uint8_t)) != QLA_SUCCESS) { 5994 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" 5995 " failed\n", ha->host_no, __func__)); 5996 ret = QLA_ERROR; 5997 goto exit_boot_info_free; 5998 } 5999 /* Check Boot Mode */ 6000 if (!(buf[1] & 0x07)) { 6001 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options" 6002 " : 0x%x\n", buf[1])); 6003 ret = QLA_ERROR; 6004 goto exit_boot_info_free; 6005 } 6006 6007 /* get primary valid target index */ 6008 if (buf[2] & BIT_7) 6009 ddb_index[0] = buf[2] & 0x7f; 6010 6011 /* get secondary valid target index */ 6012 if (buf[11] & BIT_7) 6013 ddb_index[1] = buf[11] & 0x7f; 6014 } else { 6015 ret = QLA_ERROR; 6016 goto exit_boot_info; 6017 } 6018 6019 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary" 6020 " target ID %d\n", __func__, ddb_index[0], 6021 ddb_index[1])); 6022 6023exit_boot_info_free: 6024 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma); 6025exit_boot_info: 6026 ha->pri_ddb_idx = ddb_index[0]; 6027 ha->sec_ddb_idx = ddb_index[1]; 6028 return ret; 6029} 6030 6031/** 6032 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password 6033 * @ha: pointer to adapter structure 6034 * @username: CHAP username to be returned 6035 * @password: CHAP password to be returned 6036 * 6037 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP 6038 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/. 6039 * So from the CHAP cache find the first BIDI CHAP entry and set it 6040 * to the boot record in sysfs. 6041 **/ 6042static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, 6043 char *password) 6044{ 6045 int i, ret = -EINVAL; 6046 int max_chap_entries = 0; 6047 struct ql4_chap_table *chap_table; 6048 6049 if (is_qla80XX(ha)) 6050 max_chap_entries = (ha->hw.flt_chap_size / 2) / 6051 sizeof(struct ql4_chap_table); 6052 else 6053 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 6054 6055 if (!ha->chap_list) { 6056 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); 6057 return ret; 6058 } 6059 6060 mutex_lock(&ha->chap_sem); 6061 for (i = 0; i < max_chap_entries; i++) { 6062 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 6063 if (chap_table->cookie != 6064 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 6065 continue; 6066 } 6067 6068 if (chap_table->flags & BIT_7) /* local */ 6069 continue; 6070 6071 if (!(chap_table->flags & BIT_6)) /* Not BIDI */ 6072 continue; 6073 6074 strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); 6075 strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); 6076 ret = 0; 6077 break; 6078 } 6079 mutex_unlock(&ha->chap_sem); 6080 6081 return ret; 6082} 6083 6084 6085static int qla4xxx_get_boot_target(struct scsi_qla_host *ha, 6086 struct ql4_boot_session_info *boot_sess, 6087 uint16_t ddb_index) 6088{ 6089 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 6090 struct dev_db_entry *fw_ddb_entry; 6091 dma_addr_t fw_ddb_entry_dma; 6092 uint16_t idx; 6093 uint16_t options; 6094 int ret = QLA_SUCCESS; 6095 6096 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6097 &fw_ddb_entry_dma, GFP_KERNEL); 6098 if (!fw_ddb_entry) { 6099 DEBUG2(ql4_printk(KERN_ERR, ha, 6100 "%s: Unable to allocate dma buffer.\n", 6101 __func__)); 6102 ret = QLA_ERROR; 6103 return ret; 6104 } 6105 6106 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry, 6107 fw_ddb_entry_dma, ddb_index)) { 6108 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at " 6109 "index [%d]\n", __func__, ddb_index)); 6110 ret = QLA_ERROR; 6111 goto exit_boot_target; 6112 } 6113 6114 /* Update target name and IP from DDB */ 6115 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name, 6116 min(sizeof(boot_sess->target_name), 6117 sizeof(fw_ddb_entry->iscsi_name))); 6118 6119 options = le16_to_cpu(fw_ddb_entry->options); 6120 if (options & DDB_OPT_IPV6_DEVICE) { 6121 memcpy(&boot_conn->dest_ipaddr.ip_address, 6122 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN); 6123 } else { 6124 boot_conn->dest_ipaddr.ip_type = 0x1; 6125 memcpy(&boot_conn->dest_ipaddr.ip_address, 6126 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN); 6127 } 6128 6129 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port); 6130 6131 /* update chap information */ 6132 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 6133 6134 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6135 6136 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n")); 6137 6138 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap. 6139 target_chap_name, 6140 (char *)&boot_conn->chap.target_secret, 6141 idx); 6142 if (ret) { 6143 ql4_printk(KERN_ERR, ha, "Failed to set chap\n"); 6144 ret = QLA_ERROR; 6145 goto exit_boot_target; 6146 } 6147 6148 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6149 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6150 } 6151 6152 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6153 6154 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n")); 6155 6156 ret = qla4xxx_get_bidi_chap(ha, 6157 (char *)&boot_conn->chap.intr_chap_name, 6158 (char *)&boot_conn->chap.intr_secret); 6159 6160 if (ret) { 6161 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n"); 6162 ret = QLA_ERROR; 6163 goto exit_boot_target; 6164 } 6165 6166 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6167 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6168 } 6169 6170exit_boot_target: 6171 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6172 fw_ddb_entry, fw_ddb_entry_dma); 6173 return ret; 6174} 6175 6176static int qla4xxx_get_boot_info(struct scsi_qla_host *ha) 6177{ 6178 uint16_t ddb_index[2]; 6179 int ret = QLA_ERROR; 6180 int rval; 6181 6182 memset(ddb_index, 0, sizeof(ddb_index)); 6183 ddb_index[0] = 0xffff; 6184 ddb_index[1] = 0xffff; 6185 ret = get_fw_boot_info(ha, ddb_index); 6186 if (ret != QLA_SUCCESS) { 6187 DEBUG2(ql4_printk(KERN_INFO, ha, 6188 "%s: No boot target configured.\n", __func__)); 6189 return ret; 6190 } 6191 6192 if (ql4xdisablesysfsboot) 6193 return QLA_SUCCESS; 6194 6195 if (ddb_index[0] == 0xffff) 6196 goto sec_target; 6197 6198 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess), 6199 ddb_index[0]); 6200 if (rval != QLA_SUCCESS) { 6201 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not " 6202 "configured\n", __func__)); 6203 } else 6204 ret = QLA_SUCCESS; 6205 6206sec_target: 6207 if (ddb_index[1] == 0xffff) 6208 goto exit_get_boot_info; 6209 6210 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess), 6211 ddb_index[1]); 6212 if (rval != QLA_SUCCESS) { 6213 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not" 6214 " configured\n", __func__)); 6215 } else 6216 ret = QLA_SUCCESS; 6217 6218exit_get_boot_info: 6219 return ret; 6220} 6221 6222static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha) 6223{ 6224 struct iscsi_boot_kobj *boot_kobj; 6225 6226 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS) 6227 return QLA_ERROR; 6228 6229 if (ql4xdisablesysfsboot) { 6230 ql4_printk(KERN_INFO, ha, 6231 "%s: syfsboot disabled - driver will trigger login " 6232 "and publish session for discovery .\n", __func__); 6233 return QLA_SUCCESS; 6234 } 6235 6236 6237 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no); 6238 if (!ha->boot_kset) 6239 goto kset_free; 6240 6241 if (!scsi_host_get(ha->host)) 6242 goto kset_free; 6243 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha, 6244 qla4xxx_show_boot_tgt_pri_info, 6245 qla4xxx_tgt_get_attr_visibility, 6246 qla4xxx_boot_release); 6247 if (!boot_kobj) 6248 goto put_host; 6249 6250 if (!scsi_host_get(ha->host)) 6251 goto kset_free; 6252 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha, 6253 qla4xxx_show_boot_tgt_sec_info, 6254 qla4xxx_tgt_get_attr_visibility, 6255 qla4xxx_boot_release); 6256 if (!boot_kobj) 6257 goto put_host; 6258 6259 if (!scsi_host_get(ha->host)) 6260 goto kset_free; 6261 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha, 6262 qla4xxx_show_boot_ini_info, 6263 qla4xxx_ini_get_attr_visibility, 6264 qla4xxx_boot_release); 6265 if (!boot_kobj) 6266 goto put_host; 6267 6268 if (!scsi_host_get(ha->host)) 6269 goto kset_free; 6270 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha, 6271 qla4xxx_show_boot_eth_info, 6272 qla4xxx_eth_get_attr_visibility, 6273 qla4xxx_boot_release); 6274 if (!boot_kobj) 6275 goto put_host; 6276 6277 return QLA_SUCCESS; 6278 6279put_host: 6280 scsi_host_put(ha->host); 6281kset_free: 6282 iscsi_boot_destroy_kset(ha->boot_kset); 6283 return -ENOMEM; 6284} 6285 6286 6287static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, 6288 struct ql4_tuple_ddb *tddb) 6289{ 6290 struct iscsi_cls_session *cls_sess; 6291 struct iscsi_cls_conn *cls_conn; 6292 struct iscsi_session *sess; 6293 struct iscsi_conn *conn; 6294 6295 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 6296 cls_sess = ddb_entry->sess; 6297 sess = cls_sess->dd_data; 6298 cls_conn = ddb_entry->conn; 6299 conn = cls_conn->dd_data; 6300 6301 tddb->tpgt = sess->tpgt; 6302 tddb->port = conn->persistent_port; 6303 strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); 6304 strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); 6305} 6306 6307static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, 6308 struct ql4_tuple_ddb *tddb, 6309 uint8_t *flash_isid) 6310{ 6311 uint16_t options = 0; 6312 6313 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 6314 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], 6315 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name))); 6316 6317 options = le16_to_cpu(fw_ddb_entry->options); 6318 if (options & DDB_OPT_IPV6_DEVICE) 6319 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr); 6320 else 6321 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr); 6322 6323 tddb->port = le16_to_cpu(fw_ddb_entry->port); 6324 6325 if (flash_isid == NULL) 6326 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], 6327 sizeof(tddb->isid)); 6328 else 6329 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid)); 6330} 6331 6332static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, 6333 struct ql4_tuple_ddb *old_tddb, 6334 struct ql4_tuple_ddb *new_tddb, 6335 uint8_t is_isid_compare) 6336{ 6337 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6338 return QLA_ERROR; 6339 6340 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr)) 6341 return QLA_ERROR; 6342 6343 if (old_tddb->port != new_tddb->port) 6344 return QLA_ERROR; 6345 6346 /* For multi sessions, driver generates the ISID, so do not compare 6347 * ISID in reset path since it would be a comparison between the 6348 * driver generated ISID and firmware generated ISID. This could 6349 * lead to adding duplicated DDBs in the list as driver generated 6350 * ISID would not match firmware generated ISID. 6351 */ 6352 if (is_isid_compare) { 6353 DEBUG2(ql4_printk(KERN_INFO, ha, 6354 "%s: old ISID [%pmR] New ISID [%pmR]\n", 6355 __func__, old_tddb->isid, new_tddb->isid)); 6356 6357 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6358 sizeof(old_tddb->isid))) 6359 return QLA_ERROR; 6360 } 6361 6362 DEBUG2(ql4_printk(KERN_INFO, ha, 6363 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]", 6364 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr, 6365 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt, 6366 new_tddb->ip_addr, new_tddb->iscsi_name)); 6367 6368 return QLA_SUCCESS; 6369} 6370 6371static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, 6372 struct dev_db_entry *fw_ddb_entry, 6373 uint32_t *index) 6374{ 6375 struct ddb_entry *ddb_entry; 6376 struct ql4_tuple_ddb *fw_tddb = NULL; 6377 struct ql4_tuple_ddb *tmp_tddb = NULL; 6378 int idx; 6379 int ret = QLA_ERROR; 6380 6381 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6382 if (!fw_tddb) { 6383 DEBUG2(ql4_printk(KERN_WARNING, ha, 6384 "Memory Allocation failed.\n")); 6385 ret = QLA_SUCCESS; 6386 goto exit_check; 6387 } 6388 6389 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6390 if (!tmp_tddb) { 6391 DEBUG2(ql4_printk(KERN_WARNING, ha, 6392 "Memory Allocation failed.\n")); 6393 ret = QLA_SUCCESS; 6394 goto exit_check; 6395 } 6396 6397 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6398 6399 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 6400 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 6401 if (ddb_entry == NULL) 6402 continue; 6403 6404 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); 6405 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { 6406 ret = QLA_SUCCESS; /* found */ 6407 if (index != NULL) 6408 *index = idx; 6409 goto exit_check; 6410 } 6411 } 6412 6413exit_check: 6414 if (fw_tddb) 6415 vfree(fw_tddb); 6416 if (tmp_tddb) 6417 vfree(tmp_tddb); 6418 return ret; 6419} 6420 6421/** 6422 * qla4xxx_check_existing_isid - check if target with same isid exist 6423 * in target list 6424 * @list_nt: list of target 6425 * @isid: isid to check 6426 * 6427 * This routine return QLA_SUCCESS if target with same isid exist 6428 **/ 6429static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid) 6430{ 6431 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6432 struct dev_db_entry *fw_ddb_entry; 6433 6434 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6435 fw_ddb_entry = &nt_ddb_idx->fw_ddb; 6436 6437 if (memcmp(&fw_ddb_entry->isid[0], &isid[0], 6438 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) { 6439 return QLA_SUCCESS; 6440 } 6441 } 6442 return QLA_ERROR; 6443} 6444 6445/** 6446 * qla4xxx_update_isid - compare ddbs and updated isid 6447 * @ha: Pointer to host adapter structure. 6448 * @list_nt: list of nt target 6449 * @fw_ddb_entry: firmware ddb entry 6450 * 6451 * This routine update isid if ddbs have same iqn, same isid and 6452 * different IP addr. 6453 * Return QLA_SUCCESS if isid is updated. 6454 **/ 6455static int qla4xxx_update_isid(struct scsi_qla_host *ha, 6456 struct list_head *list_nt, 6457 struct dev_db_entry *fw_ddb_entry) 6458{ 6459 uint8_t base_value, i; 6460 6461 base_value = fw_ddb_entry->isid[1] & 0x1f; 6462 for (i = 0; i < 8; i++) { 6463 fw_ddb_entry->isid[1] = (base_value | (i << 5)); 6464 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6465 break; 6466 } 6467 6468 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6469 return QLA_ERROR; 6470 6471 return QLA_SUCCESS; 6472} 6473 6474/** 6475 * qla4xxx_should_update_isid - check if isid need to update 6476 * @ha: Pointer to host adapter structure. 6477 * @old_tddb: ddb tuple 6478 * @new_tddb: ddb tuple 6479 * 6480 * Return QLA_SUCCESS if different IP, different PORT, same iqn, 6481 * same isid 6482 **/ 6483static int qla4xxx_should_update_isid(struct scsi_qla_host *ha, 6484 struct ql4_tuple_ddb *old_tddb, 6485 struct ql4_tuple_ddb *new_tddb) 6486{ 6487 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) { 6488 /* Same ip */ 6489 if (old_tddb->port == new_tddb->port) 6490 return QLA_ERROR; 6491 } 6492 6493 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6494 /* different iqn */ 6495 return QLA_ERROR; 6496 6497 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6498 sizeof(old_tddb->isid))) 6499 /* different isid */ 6500 return QLA_ERROR; 6501 6502 return QLA_SUCCESS; 6503} 6504 6505/** 6506 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt 6507 * @ha: Pointer to host adapter structure. 6508 * @list_nt: list of nt target. 6509 * @fw_ddb_entry: firmware ddb entry. 6510 * 6511 * This routine check if fw_ddb_entry already exists in list_nt to avoid 6512 * duplicate ddb in list_nt. 6513 * Return QLA_SUCCESS if duplicate ddb exit in list_nl. 6514 * Note: This function also update isid of DDB if required. 6515 **/ 6516 6517static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, 6518 struct list_head *list_nt, 6519 struct dev_db_entry *fw_ddb_entry) 6520{ 6521 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6522 struct ql4_tuple_ddb *fw_tddb = NULL; 6523 struct ql4_tuple_ddb *tmp_tddb = NULL; 6524 int rval, ret = QLA_ERROR; 6525 6526 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6527 if (!fw_tddb) { 6528 DEBUG2(ql4_printk(KERN_WARNING, ha, 6529 "Memory Allocation failed.\n")); 6530 ret = QLA_SUCCESS; 6531 goto exit_check; 6532 } 6533 6534 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6535 if (!tmp_tddb) { 6536 DEBUG2(ql4_printk(KERN_WARNING, ha, 6537 "Memory Allocation failed.\n")); 6538 ret = QLA_SUCCESS; 6539 goto exit_check; 6540 } 6541 6542 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6543 6544 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6545 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, 6546 nt_ddb_idx->flash_isid); 6547 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true); 6548 /* found duplicate ddb */ 6549 if (ret == QLA_SUCCESS) 6550 goto exit_check; 6551 } 6552 6553 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6554 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL); 6555 6556 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb); 6557 if (ret == QLA_SUCCESS) { 6558 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry); 6559 if (rval == QLA_SUCCESS) 6560 ret = QLA_ERROR; 6561 else 6562 ret = QLA_SUCCESS; 6563 6564 goto exit_check; 6565 } 6566 } 6567 6568exit_check: 6569 if (fw_tddb) 6570 vfree(fw_tddb); 6571 if (tmp_tddb) 6572 vfree(tmp_tddb); 6573 return ret; 6574} 6575 6576static void qla4xxx_free_ddb_list(struct list_head *list_ddb) 6577{ 6578 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6579 6580 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6581 list_del_init(&ddb_idx->list); 6582 vfree(ddb_idx); 6583 } 6584} 6585 6586static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, 6587 struct dev_db_entry *fw_ddb_entry) 6588{ 6589 struct iscsi_endpoint *ep; 6590 struct sockaddr_in *addr; 6591 struct sockaddr_in6 *addr6; 6592 struct sockaddr *t_addr; 6593 struct sockaddr_storage *dst_addr; 6594 char *ip; 6595 6596 /* TODO: need to destroy on unload iscsi_endpoint*/ 6597 dst_addr = vmalloc(sizeof(*dst_addr)); 6598 if (!dst_addr) 6599 return NULL; 6600 6601 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) { 6602 t_addr = (struct sockaddr *)dst_addr; 6603 t_addr->sa_family = AF_INET6; 6604 addr6 = (struct sockaddr_in6 *)dst_addr; 6605 ip = (char *)&addr6->sin6_addr; 6606 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 6607 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6608 6609 } else { 6610 t_addr = (struct sockaddr *)dst_addr; 6611 t_addr->sa_family = AF_INET; 6612 addr = (struct sockaddr_in *)dst_addr; 6613 ip = (char *)&addr->sin_addr; 6614 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN); 6615 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6616 } 6617 6618 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); 6619 vfree(dst_addr); 6620 return ep; 6621} 6622 6623static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx) 6624{ 6625 if (ql4xdisablesysfsboot) 6626 return QLA_SUCCESS; 6627 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx) 6628 return QLA_ERROR; 6629 return QLA_SUCCESS; 6630} 6631 6632static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, 6633 struct ddb_entry *ddb_entry, 6634 uint16_t idx) 6635{ 6636 uint16_t def_timeout; 6637 6638 ddb_entry->ddb_type = FLASH_DDB; 6639 ddb_entry->fw_ddb_index = INVALID_ENTRY; 6640 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 6641 ddb_entry->ha = ha; 6642 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; 6643 ddb_entry->ddb_change = qla4xxx_flash_ddb_change; 6644 ddb_entry->chap_tbl_idx = INVALID_ENTRY; 6645 6646 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 6647 atomic_set(&ddb_entry->relogin_timer, 0); 6648 atomic_set(&ddb_entry->relogin_retry_count, 0); 6649 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); 6650 ddb_entry->default_relogin_timeout = 6651 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ? 6652 def_timeout : LOGIN_TOV; 6653 ddb_entry->default_time2wait = 6654 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); 6655 6656 if (ql4xdisablesysfsboot && 6657 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)) 6658 set_bit(DF_BOOT_TGT, &ddb_entry->flags); 6659} 6660 6661static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) 6662{ 6663 uint32_t idx = 0; 6664 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */ 6665 uint32_t sts[MBOX_REG_COUNT]; 6666 uint32_t ip_state; 6667 unsigned long wtime; 6668 int ret; 6669 6670 wtime = jiffies + (HZ * IP_CONFIG_TOV); 6671 do { 6672 for (idx = 0; idx < IP_ADDR_COUNT; idx++) { 6673 if (ip_idx[idx] == -1) 6674 continue; 6675 6676 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts); 6677 6678 if (ret == QLA_ERROR) { 6679 ip_idx[idx] = -1; 6680 continue; 6681 } 6682 6683 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT; 6684 6685 DEBUG2(ql4_printk(KERN_INFO, ha, 6686 "Waiting for IP state for idx = %d, state = 0x%x\n", 6687 ip_idx[idx], ip_state)); 6688 if (ip_state == IP_ADDRSTATE_UNCONFIGURED || 6689 ip_state == IP_ADDRSTATE_INVALID || 6690 ip_state == IP_ADDRSTATE_PREFERRED || 6691 ip_state == IP_ADDRSTATE_DEPRICATED || 6692 ip_state == IP_ADDRSTATE_DISABLING) 6693 ip_idx[idx] = -1; 6694 } 6695 6696 /* Break if all IP states checked */ 6697 if ((ip_idx[0] == -1) && 6698 (ip_idx[1] == -1) && 6699 (ip_idx[2] == -1) && 6700 (ip_idx[3] == -1)) 6701 break; 6702 schedule_timeout_uninterruptible(HZ); 6703 } while (time_after(wtime, jiffies)); 6704} 6705 6706static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry, 6707 struct dev_db_entry *flash_ddb_entry) 6708{ 6709 uint16_t options = 0; 6710 size_t ip_len = IP_ADDR_LEN; 6711 6712 options = le16_to_cpu(fw_ddb_entry->options); 6713 if (options & DDB_OPT_IPV6_DEVICE) 6714 ip_len = IPv6_ADDR_LEN; 6715 6716 if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len)) 6717 return QLA_ERROR; 6718 6719 if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0], 6720 sizeof(fw_ddb_entry->isid))) 6721 return QLA_ERROR; 6722 6723 if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port, 6724 sizeof(fw_ddb_entry->port))) 6725 return QLA_ERROR; 6726 6727 return QLA_SUCCESS; 6728} 6729 6730static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha, 6731 struct dev_db_entry *fw_ddb_entry, 6732 uint32_t fw_idx, uint32_t *flash_index) 6733{ 6734 struct dev_db_entry *flash_ddb_entry; 6735 dma_addr_t flash_ddb_entry_dma; 6736 uint32_t idx = 0; 6737 int max_ddbs; 6738 int ret = QLA_ERROR, status; 6739 6740 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6741 MAX_DEV_DB_ENTRIES; 6742 6743 flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6744 &flash_ddb_entry_dma); 6745 if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) { 6746 ql4_printk(KERN_ERR, ha, "Out of memory\n"); 6747 goto exit_find_st_idx; 6748 } 6749 6750 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6751 flash_ddb_entry_dma, fw_idx); 6752 if (status == QLA_SUCCESS) { 6753 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6754 if (status == QLA_SUCCESS) { 6755 *flash_index = fw_idx; 6756 ret = QLA_SUCCESS; 6757 goto exit_find_st_idx; 6758 } 6759 } 6760 6761 for (idx = 0; idx < max_ddbs; idx++) { 6762 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6763 flash_ddb_entry_dma, idx); 6764 if (status == QLA_ERROR) 6765 continue; 6766 6767 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6768 if (status == QLA_SUCCESS) { 6769 *flash_index = idx; 6770 ret = QLA_SUCCESS; 6771 goto exit_find_st_idx; 6772 } 6773 } 6774 6775 if (idx == max_ddbs) 6776 ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n", 6777 fw_idx); 6778 6779exit_find_st_idx: 6780 if (flash_ddb_entry) 6781 dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry, 6782 flash_ddb_entry_dma); 6783 6784 return ret; 6785} 6786 6787static void qla4xxx_build_st_list(struct scsi_qla_host *ha, 6788 struct list_head *list_st) 6789{ 6790 struct qla_ddb_index *st_ddb_idx; 6791 int max_ddbs; 6792 int fw_idx_size; 6793 struct dev_db_entry *fw_ddb_entry; 6794 dma_addr_t fw_ddb_dma; 6795 int ret; 6796 uint32_t idx = 0, next_idx = 0; 6797 uint32_t state = 0, conn_err = 0; 6798 uint32_t flash_index = -1; 6799 uint16_t conn_id = 0; 6800 6801 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6802 &fw_ddb_dma); 6803 if (fw_ddb_entry == NULL) { 6804 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 6805 goto exit_st_list; 6806 } 6807 6808 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6809 MAX_DEV_DB_ENTRIES; 6810 fw_idx_size = sizeof(struct qla_ddb_index); 6811 6812 for (idx = 0; idx < max_ddbs; idx = next_idx) { 6813 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 6814 NULL, &next_idx, &state, 6815 &conn_err, NULL, &conn_id); 6816 if (ret == QLA_ERROR) 6817 break; 6818 6819 /* Ignore DDB if invalid state (unassigned) */ 6820 if (state == DDB_DS_UNASSIGNED) 6821 goto continue_next_st; 6822 6823 /* Check if ST, add to the list_st */ 6824 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) 6825 goto continue_next_st; 6826 6827 st_ddb_idx = vzalloc(fw_idx_size); 6828 if (!st_ddb_idx) 6829 break; 6830 6831 ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, 6832 &flash_index); 6833 if (ret == QLA_ERROR) { 6834 ql4_printk(KERN_ERR, ha, 6835 "No flash entry for ST at idx [%d]\n", idx); 6836 st_ddb_idx->flash_ddb_idx = idx; 6837 } else { 6838 ql4_printk(KERN_INFO, ha, 6839 "ST at idx [%d] is stored at flash [%d]\n", 6840 idx, flash_index); 6841 st_ddb_idx->flash_ddb_idx = flash_index; 6842 } 6843 6844 st_ddb_idx->fw_ddb_idx = idx; 6845 6846 list_add_tail(&st_ddb_idx->list, list_st); 6847continue_next_st: 6848 if (next_idx == 0) 6849 break; 6850 } 6851 6852exit_st_list: 6853 if (fw_ddb_entry) 6854 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 6855} 6856 6857/** 6858 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list 6859 * @ha: pointer to adapter structure 6860 * @list_ddb: List from which failed ddb to be removed 6861 * 6862 * Iterate over the list of DDBs and find and remove DDBs that are either in 6863 * no connection active state or failed state 6864 **/ 6865static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha, 6866 struct list_head *list_ddb) 6867{ 6868 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6869 uint32_t next_idx = 0; 6870 uint32_t state = 0, conn_err = 0; 6871 int ret; 6872 6873 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6874 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx, 6875 NULL, 0, NULL, &next_idx, &state, 6876 &conn_err, NULL, NULL); 6877 if (ret == QLA_ERROR) 6878 continue; 6879 6880 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 6881 state == DDB_DS_SESSION_FAILED) { 6882 list_del_init(&ddb_idx->list); 6883 vfree(ddb_idx); 6884 } 6885 } 6886} 6887 6888static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha, 6889 struct ddb_entry *ddb_entry, 6890 struct dev_db_entry *fw_ddb_entry) 6891{ 6892 struct iscsi_cls_session *cls_sess; 6893 struct iscsi_session *sess; 6894 uint32_t max_ddbs = 0; 6895 uint16_t ddb_link = -1; 6896 6897 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6898 MAX_DEV_DB_ENTRIES; 6899 6900 cls_sess = ddb_entry->sess; 6901 sess = cls_sess->dd_data; 6902 6903 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6904 if (ddb_link < max_ddbs) 6905 sess->discovery_parent_idx = ddb_link; 6906 else 6907 sess->discovery_parent_idx = DDB_NO_LINK; 6908} 6909 6910static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, 6911 struct dev_db_entry *fw_ddb_entry, 6912 int is_reset, uint16_t idx) 6913{ 6914 struct iscsi_cls_session *cls_sess; 6915 struct iscsi_session *sess; 6916 struct iscsi_cls_conn *cls_conn; 6917 struct iscsi_endpoint *ep; 6918 uint16_t cmds_max = 32; 6919 uint16_t conn_id = 0; 6920 uint32_t initial_cmdsn = 0; 6921 int ret = QLA_SUCCESS; 6922 6923 struct ddb_entry *ddb_entry = NULL; 6924 6925 /* Create session object, with INVALID_ENTRY, 6926 * the targer_id would get set when we issue the login 6927 */ 6928 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host, 6929 cmds_max, sizeof(struct ddb_entry), 6930 sizeof(struct ql4_task_data), 6931 initial_cmdsn, INVALID_ENTRY); 6932 if (!cls_sess) { 6933 ret = QLA_ERROR; 6934 goto exit_setup; 6935 } 6936 6937 /* 6938 * so calling module_put function to decrement the 6939 * reference count. 6940 **/ 6941 module_put(qla4xxx_iscsi_transport.owner); 6942 sess = cls_sess->dd_data; 6943 ddb_entry = sess->dd_data; 6944 ddb_entry->sess = cls_sess; 6945 6946 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 6947 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry, 6948 sizeof(struct dev_db_entry)); 6949 6950 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx); 6951 6952 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id); 6953 6954 if (!cls_conn) { 6955 ret = QLA_ERROR; 6956 goto exit_setup; 6957 } 6958 6959 ddb_entry->conn = cls_conn; 6960 6961 /* Setup ep, for displaying attributes in sysfs */ 6962 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry); 6963 if (ep) { 6964 ep->conn = cls_conn; 6965 cls_conn->ep = ep; 6966 } else { 6967 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n")); 6968 ret = QLA_ERROR; 6969 goto exit_setup; 6970 } 6971 6972 /* Update sess/conn params */ 6973 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 6974 qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); 6975 6976 if (is_reset == RESET_ADAPTER) { 6977 iscsi_block_session(cls_sess); 6978 /* Use the relogin path to discover new devices 6979 * by short-circuting the logic of setting 6980 * timer to relogin - instead set the flags 6981 * to initiate login right away. 6982 */ 6983 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 6984 set_bit(DF_RELOGIN, &ddb_entry->flags); 6985 } 6986 6987exit_setup: 6988 return ret; 6989} 6990 6991static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha, 6992 struct list_head *list_ddb, 6993 struct dev_db_entry *fw_ddb_entry) 6994{ 6995 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6996 uint16_t ddb_link; 6997 6998 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6999 7000 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 7001 if (ddb_idx->fw_ddb_idx == ddb_link) { 7002 DEBUG2(ql4_printk(KERN_INFO, ha, 7003 "Updating NT parent idx from [%d] to [%d]\n", 7004 ddb_link, ddb_idx->flash_ddb_idx)); 7005 fw_ddb_entry->ddb_link = 7006 cpu_to_le16(ddb_idx->flash_ddb_idx); 7007 return; 7008 } 7009 } 7010} 7011 7012static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, 7013 struct list_head *list_nt, 7014 struct list_head *list_st, 7015 int is_reset) 7016{ 7017 struct dev_db_entry *fw_ddb_entry; 7018 struct ddb_entry *ddb_entry = NULL; 7019 dma_addr_t fw_ddb_dma; 7020 int max_ddbs; 7021 int fw_idx_size; 7022 int ret; 7023 uint32_t idx = 0, next_idx = 0; 7024 uint32_t state = 0, conn_err = 0; 7025 uint32_t ddb_idx = -1; 7026 uint16_t conn_id = 0; 7027 uint16_t ddb_link = -1; 7028 struct qla_ddb_index *nt_ddb_idx; 7029 7030 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7031 &fw_ddb_dma); 7032 if (fw_ddb_entry == NULL) { 7033 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 7034 goto exit_nt_list; 7035 } 7036 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 7037 MAX_DEV_DB_ENTRIES; 7038 fw_idx_size = sizeof(struct qla_ddb_index); 7039 7040 for (idx = 0; idx < max_ddbs; idx = next_idx) { 7041 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7042 NULL, &next_idx, &state, 7043 &conn_err, NULL, &conn_id); 7044 if (ret == QLA_ERROR) 7045 break; 7046 7047 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS) 7048 goto continue_next_nt; 7049 7050 /* Check if NT, then add to list it */ 7051 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) 7052 goto continue_next_nt; 7053 7054 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 7055 if (ddb_link < max_ddbs) 7056 qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); 7057 7058 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || 7059 state == DDB_DS_SESSION_FAILED) && 7060 (is_reset == INIT_ADAPTER)) 7061 goto continue_next_nt; 7062 7063 DEBUG2(ql4_printk(KERN_INFO, ha, 7064 "Adding DDB to session = 0x%x\n", idx)); 7065 7066 if (is_reset == INIT_ADAPTER) { 7067 nt_ddb_idx = vmalloc(fw_idx_size); 7068 if (!nt_ddb_idx) 7069 break; 7070 7071 nt_ddb_idx->fw_ddb_idx = idx; 7072 7073 /* Copy original isid as it may get updated in function 7074 * qla4xxx_update_isid(). We need original isid in 7075 * function qla4xxx_compare_tuple_ddb to find duplicate 7076 * target */ 7077 memcpy(&nt_ddb_idx->flash_isid[0], 7078 &fw_ddb_entry->isid[0], 7079 sizeof(nt_ddb_idx->flash_isid)); 7080 7081 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt, 7082 fw_ddb_entry); 7083 if (ret == QLA_SUCCESS) { 7084 /* free nt_ddb_idx and do not add to list_nt */ 7085 vfree(nt_ddb_idx); 7086 goto continue_next_nt; 7087 } 7088 7089 /* Copy updated isid */ 7090 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, 7091 sizeof(struct dev_db_entry)); 7092 7093 list_add_tail(&nt_ddb_idx->list, list_nt); 7094 } else if (is_reset == RESET_ADAPTER) { 7095 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, 7096 &ddb_idx); 7097 if (ret == QLA_SUCCESS) { 7098 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, 7099 ddb_idx); 7100 if (ddb_entry != NULL) 7101 qla4xxx_update_sess_disc_idx(ha, 7102 ddb_entry, 7103 fw_ddb_entry); 7104 goto continue_next_nt; 7105 } 7106 } 7107 7108 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); 7109 if (ret == QLA_ERROR) 7110 goto exit_nt_list; 7111 7112continue_next_nt: 7113 if (next_idx == 0) 7114 break; 7115 } 7116 7117exit_nt_list: 7118 if (fw_ddb_entry) 7119 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7120} 7121 7122static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, 7123 struct list_head *list_nt, 7124 uint16_t target_id) 7125{ 7126 struct dev_db_entry *fw_ddb_entry; 7127 dma_addr_t fw_ddb_dma; 7128 int max_ddbs; 7129 int fw_idx_size; 7130 int ret; 7131 uint32_t idx = 0, next_idx = 0; 7132 uint32_t state = 0, conn_err = 0; 7133 uint16_t conn_id = 0; 7134 struct qla_ddb_index *nt_ddb_idx; 7135 7136 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7137 &fw_ddb_dma); 7138 if (fw_ddb_entry == NULL) { 7139 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 7140 goto exit_new_nt_list; 7141 } 7142 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 7143 MAX_DEV_DB_ENTRIES; 7144 fw_idx_size = sizeof(struct qla_ddb_index); 7145 7146 for (idx = 0; idx < max_ddbs; idx = next_idx) { 7147 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7148 NULL, &next_idx, &state, 7149 &conn_err, NULL, &conn_id); 7150 if (ret == QLA_ERROR) 7151 break; 7152 7153 /* Check if NT, then add it to list */ 7154 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7155 goto continue_next_new_nt; 7156 7157 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE)) 7158 goto continue_next_new_nt; 7159 7160 DEBUG2(ql4_printk(KERN_INFO, ha, 7161 "Adding DDB to session = 0x%x\n", idx)); 7162 7163 nt_ddb_idx = vmalloc(fw_idx_size); 7164 if (!nt_ddb_idx) 7165 break; 7166 7167 nt_ddb_idx->fw_ddb_idx = idx; 7168 7169 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7170 if (ret == QLA_SUCCESS) { 7171 /* free nt_ddb_idx and do not add to list_nt */ 7172 vfree(nt_ddb_idx); 7173 goto continue_next_new_nt; 7174 } 7175 7176 if (target_id < max_ddbs) 7177 fw_ddb_entry->ddb_link = cpu_to_le16(target_id); 7178 7179 list_add_tail(&nt_ddb_idx->list, list_nt); 7180 7181 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7182 idx); 7183 if (ret == QLA_ERROR) 7184 goto exit_new_nt_list; 7185 7186continue_next_new_nt: 7187 if (next_idx == 0) 7188 break; 7189 } 7190 7191exit_new_nt_list: 7192 if (fw_ddb_entry) 7193 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7194} 7195 7196/** 7197 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry 7198 * @dev: dev associated with the sysfs entry 7199 * @data: pointer to flashnode session object 7200 * 7201 * Returns: 7202 * 1: if flashnode entry is non-persistent 7203 * 0: if flashnode entry is persistent 7204 **/ 7205static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) 7206{ 7207 struct iscsi_bus_flash_session *fnode_sess; 7208 7209 if (!iscsi_flashnode_bus_match(dev, NULL)) 7210 return 0; 7211 7212 fnode_sess = iscsi_dev_to_flash_session(dev); 7213 7214 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT); 7215} 7216 7217/** 7218 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target 7219 * @ha: pointer to host 7220 * @fw_ddb_entry: flash ddb data 7221 * @idx: target index 7222 * @user: if set then this call is made from userland else from kernel 7223 * 7224 * Returns: 7225 * On sucess: QLA_SUCCESS 7226 * On failure: QLA_ERROR 7227 * 7228 * This create separate sysfs entries for session and connection attributes of 7229 * the given fw ddb entry. 7230 * If this is invoked as a result of a userspace call then the entry is marked 7231 * as nonpersistent using flash_state field. 7232 **/ 7233static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, 7234 struct dev_db_entry *fw_ddb_entry, 7235 uint16_t *idx, int user) 7236{ 7237 struct iscsi_bus_flash_session *fnode_sess = NULL; 7238 struct iscsi_bus_flash_conn *fnode_conn = NULL; 7239 int rc = QLA_ERROR; 7240 7241 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx, 7242 &qla4xxx_iscsi_transport, 0); 7243 if (!fnode_sess) { 7244 ql4_printk(KERN_ERR, ha, 7245 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n", 7246 __func__, *idx, ha->host_no); 7247 goto exit_tgt_create; 7248 } 7249 7250 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess, 7251 &qla4xxx_iscsi_transport, 0); 7252 if (!fnode_conn) { 7253 ql4_printk(KERN_ERR, ha, 7254 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n", 7255 __func__, *idx, ha->host_no); 7256 goto free_sess; 7257 } 7258 7259 if (user) { 7260 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT; 7261 } else { 7262 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7263 7264 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx) 7265 fnode_sess->is_boot_target = 1; 7266 else 7267 fnode_sess->is_boot_target = 0; 7268 } 7269 7270 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, 7271 fw_ddb_entry); 7272 if (rc) 7273 goto free_sess; 7274 7275 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7276 __func__, fnode_sess->dev.kobj.name); 7277 7278 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7279 __func__, fnode_conn->dev.kobj.name); 7280 7281 return QLA_SUCCESS; 7282 7283free_sess: 7284 iscsi_destroy_flashnode_sess(fnode_sess); 7285 7286exit_tgt_create: 7287 return QLA_ERROR; 7288} 7289 7290/** 7291 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash 7292 * @shost: pointer to host 7293 * @buf: type of ddb entry (ipv4/ipv6) 7294 * @len: length of buf 7295 * 7296 * This creates new ddb entry in the flash by finding first free index and 7297 * storing default ddb there. And then create sysfs entry for the new ddb entry. 7298 **/ 7299static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 7300 int len) 7301{ 7302 struct scsi_qla_host *ha = to_qla_host(shost); 7303 struct dev_db_entry *fw_ddb_entry = NULL; 7304 dma_addr_t fw_ddb_entry_dma; 7305 struct device *dev; 7306 uint16_t idx = 0; 7307 uint16_t max_ddbs = 0; 7308 uint32_t options = 0; 7309 uint32_t rval = QLA_ERROR; 7310 7311 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) && 7312 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) { 7313 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n", 7314 __func__)); 7315 goto exit_ddb_add; 7316 } 7317 7318 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 7319 MAX_DEV_DB_ENTRIES; 7320 7321 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7322 &fw_ddb_entry_dma, GFP_KERNEL); 7323 if (!fw_ddb_entry) { 7324 DEBUG2(ql4_printk(KERN_ERR, ha, 7325 "%s: Unable to allocate dma buffer\n", 7326 __func__)); 7327 goto exit_ddb_add; 7328 } 7329 7330 dev = iscsi_find_flashnode_sess(ha->host, NULL, 7331 qla4xxx_sysfs_ddb_is_non_persistent); 7332 if (dev) { 7333 ql4_printk(KERN_ERR, ha, 7334 "%s: A non-persistent entry %s found\n", 7335 __func__, dev->kobj.name); 7336 put_device(dev); 7337 goto exit_ddb_add; 7338 } 7339 7340 /* Index 0 and 1 are reserved for boot target entries */ 7341 for (idx = 2; idx < max_ddbs; idx++) { 7342 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, 7343 fw_ddb_entry_dma, idx)) 7344 break; 7345 } 7346 7347 if (idx == max_ddbs) 7348 goto exit_ddb_add; 7349 7350 if (!strncasecmp("ipv6", buf, 4)) 7351 options |= IPV6_DEFAULT_DDB_ENTRY; 7352 7353 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7354 if (rval == QLA_ERROR) 7355 goto exit_ddb_add; 7356 7357 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1); 7358 7359exit_ddb_add: 7360 if (fw_ddb_entry) 7361 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7362 fw_ddb_entry, fw_ddb_entry_dma); 7363 if (rval == QLA_SUCCESS) 7364 return idx; 7365 else 7366 return -EIO; 7367} 7368 7369/** 7370 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash 7371 * @fnode_sess: pointer to session attrs of flash ddb entry 7372 * @fnode_conn: pointer to connection attrs of flash ddb entry 7373 * 7374 * This writes the contents of target ddb buffer to Flash with a valid cookie 7375 * value in order to make the ddb entry persistent. 7376 **/ 7377static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess, 7378 struct iscsi_bus_flash_conn *fnode_conn) 7379{ 7380 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7381 struct scsi_qla_host *ha = to_qla_host(shost); 7382 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO; 7383 struct dev_db_entry *fw_ddb_entry = NULL; 7384 dma_addr_t fw_ddb_entry_dma; 7385 uint32_t options = 0; 7386 int rval = 0; 7387 7388 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7389 &fw_ddb_entry_dma, GFP_KERNEL); 7390 if (!fw_ddb_entry) { 7391 DEBUG2(ql4_printk(KERN_ERR, ha, 7392 "%s: Unable to allocate dma buffer\n", 7393 __func__)); 7394 rval = -ENOMEM; 7395 goto exit_ddb_apply; 7396 } 7397 7398 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7399 options |= IPV6_DEFAULT_DDB_ENTRY; 7400 7401 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7402 if (rval == QLA_ERROR) 7403 goto exit_ddb_apply; 7404 7405 dev_db_start_offset += (fnode_sess->target_id * 7406 sizeof(*fw_ddb_entry)); 7407 7408 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7409 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7410 7411 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 7412 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT); 7413 7414 if (rval == QLA_SUCCESS) { 7415 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7416 ql4_printk(KERN_INFO, ha, 7417 "%s: flash node %u of host %lu written to flash\n", 7418 __func__, fnode_sess->target_id, ha->host_no); 7419 } else { 7420 rval = -EIO; 7421 ql4_printk(KERN_ERR, ha, 7422 "%s: Error while writing flash node %u of host %lu to flash\n", 7423 __func__, fnode_sess->target_id, ha->host_no); 7424 } 7425 7426exit_ddb_apply: 7427 if (fw_ddb_entry) 7428 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7429 fw_ddb_entry, fw_ddb_entry_dma); 7430 return rval; 7431} 7432 7433static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha, 7434 struct dev_db_entry *fw_ddb_entry, 7435 uint16_t idx) 7436{ 7437 struct dev_db_entry *ddb_entry = NULL; 7438 dma_addr_t ddb_entry_dma; 7439 unsigned long wtime; 7440 uint32_t mbx_sts = 0; 7441 uint32_t state = 0, conn_err = 0; 7442 uint16_t tmo = 0; 7443 int ret = 0; 7444 7445 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7446 &ddb_entry_dma, GFP_KERNEL); 7447 if (!ddb_entry) { 7448 DEBUG2(ql4_printk(KERN_ERR, ha, 7449 "%s: Unable to allocate dma buffer\n", 7450 __func__)); 7451 return QLA_ERROR; 7452 } 7453 7454 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry)); 7455 7456 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts); 7457 if (ret != QLA_SUCCESS) { 7458 DEBUG2(ql4_printk(KERN_ERR, ha, 7459 "%s: Unable to set ddb entry for index %d\n", 7460 __func__, idx)); 7461 goto exit_ddb_conn_open; 7462 } 7463 7464 qla4xxx_conn_open(ha, idx); 7465 7466 /* To ensure that sendtargets is done, wait for at least 12 secs */ 7467 tmo = ((ha->def_timeout > LOGIN_TOV) && 7468 (ha->def_timeout < LOGIN_TOV * 10) ? 7469 ha->def_timeout : LOGIN_TOV); 7470 7471 DEBUG2(ql4_printk(KERN_INFO, ha, 7472 "Default time to wait for login to ddb %d\n", tmo)); 7473 7474 wtime = jiffies + (HZ * tmo); 7475 do { 7476 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, 7477 NULL, &state, &conn_err, NULL, 7478 NULL); 7479 if (ret == QLA_ERROR) 7480 continue; 7481 7482 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 7483 state == DDB_DS_SESSION_FAILED) 7484 break; 7485 7486 schedule_timeout_uninterruptible(HZ / 10); 7487 } while (time_after(wtime, jiffies)); 7488 7489exit_ddb_conn_open: 7490 if (ddb_entry) 7491 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7492 ddb_entry, ddb_entry_dma); 7493 return ret; 7494} 7495 7496static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, 7497 struct dev_db_entry *fw_ddb_entry, 7498 uint16_t target_id) 7499{ 7500 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 7501 struct list_head list_nt; 7502 uint16_t ddb_index; 7503 int ret = 0; 7504 7505 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) { 7506 ql4_printk(KERN_WARNING, ha, 7507 "%s: A discovery already in progress!\n", __func__); 7508 return QLA_ERROR; 7509 } 7510 7511 INIT_LIST_HEAD(&list_nt); 7512 7513 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7514 7515 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 7516 if (ret == QLA_ERROR) 7517 goto exit_login_st_clr_bit; 7518 7519 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index); 7520 if (ret == QLA_ERROR) 7521 goto exit_login_st; 7522 7523 qla4xxx_build_new_nt_list(ha, &list_nt, target_id); 7524 7525 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) { 7526 list_del_init(&ddb_idx->list); 7527 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx); 7528 vfree(ddb_idx); 7529 } 7530 7531exit_login_st: 7532 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) { 7533 ql4_printk(KERN_ERR, ha, 7534 "Unable to clear DDB index = 0x%x\n", ddb_index); 7535 } 7536 7537 clear_bit(ddb_index, ha->ddb_idx_map); 7538 7539exit_login_st_clr_bit: 7540 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7541 return ret; 7542} 7543 7544static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha, 7545 struct dev_db_entry *fw_ddb_entry, 7546 uint16_t idx) 7547{ 7548 int ret = QLA_ERROR; 7549 7550 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7551 if (ret != QLA_SUCCESS) 7552 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7553 idx); 7554 else 7555 ret = -EPERM; 7556 7557 return ret; 7558} 7559 7560/** 7561 * qla4xxx_sysfs_ddb_login - Login to the specified target 7562 * @fnode_sess: pointer to session attrs of flash ddb entry 7563 * @fnode_conn: pointer to connection attrs of flash ddb entry 7564 * 7565 * This logs in to the specified target 7566 **/ 7567static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 7568 struct iscsi_bus_flash_conn *fnode_conn) 7569{ 7570 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7571 struct scsi_qla_host *ha = to_qla_host(shost); 7572 struct dev_db_entry *fw_ddb_entry = NULL; 7573 dma_addr_t fw_ddb_entry_dma; 7574 uint32_t options = 0; 7575 int ret = 0; 7576 7577 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) { 7578 ql4_printk(KERN_ERR, ha, 7579 "%s: Target info is not persistent\n", __func__); 7580 ret = -EIO; 7581 goto exit_ddb_login; 7582 } 7583 7584 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7585 &fw_ddb_entry_dma, GFP_KERNEL); 7586 if (!fw_ddb_entry) { 7587 DEBUG2(ql4_printk(KERN_ERR, ha, 7588 "%s: Unable to allocate dma buffer\n", 7589 __func__)); 7590 ret = -ENOMEM; 7591 goto exit_ddb_login; 7592 } 7593 7594 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7595 options |= IPV6_DEFAULT_DDB_ENTRY; 7596 7597 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7598 if (ret == QLA_ERROR) 7599 goto exit_ddb_login; 7600 7601 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7602 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7603 7604 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7605 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, 7606 fnode_sess->target_id); 7607 else 7608 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, 7609 fnode_sess->target_id); 7610 7611 if (ret > 0) 7612 ret = -EIO; 7613 7614exit_ddb_login: 7615 if (fw_ddb_entry) 7616 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7617 fw_ddb_entry, fw_ddb_entry_dma); 7618 return ret; 7619} 7620 7621/** 7622 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target 7623 * @cls_sess: pointer to session to be logged out 7624 * 7625 * This performs session log out from the specified target 7626 **/ 7627static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess) 7628{ 7629 struct iscsi_session *sess; 7630 struct ddb_entry *ddb_entry = NULL; 7631 struct scsi_qla_host *ha; 7632 struct dev_db_entry *fw_ddb_entry = NULL; 7633 dma_addr_t fw_ddb_entry_dma; 7634 unsigned long flags; 7635 unsigned long wtime; 7636 uint32_t ddb_state; 7637 int options; 7638 int ret = 0; 7639 7640 sess = cls_sess->dd_data; 7641 ddb_entry = sess->dd_data; 7642 ha = ddb_entry->ha; 7643 7644 if (ddb_entry->ddb_type != FLASH_DDB) { 7645 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n", 7646 __func__); 7647 ret = -ENXIO; 7648 goto exit_ddb_logout; 7649 } 7650 7651 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 7652 ql4_printk(KERN_ERR, ha, 7653 "%s: Logout from boot target entry is not permitted.\n", 7654 __func__); 7655 ret = -EPERM; 7656 goto exit_ddb_logout; 7657 } 7658 7659 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7660 &fw_ddb_entry_dma, GFP_KERNEL); 7661 if (!fw_ddb_entry) { 7662 ql4_printk(KERN_ERR, ha, 7663 "%s: Unable to allocate dma buffer\n", __func__); 7664 ret = -ENOMEM; 7665 goto exit_ddb_logout; 7666 } 7667 7668 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 7669 goto ddb_logout_init; 7670 7671 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7672 fw_ddb_entry, fw_ddb_entry_dma, 7673 NULL, NULL, &ddb_state, NULL, 7674 NULL, NULL); 7675 if (ret == QLA_ERROR) 7676 goto ddb_logout_init; 7677 7678 if (ddb_state == DDB_DS_SESSION_ACTIVE) 7679 goto ddb_logout_init; 7680 7681 /* wait until next relogin is triggered using DF_RELOGIN and 7682 * clear DF_RELOGIN to avoid invocation of further relogin 7683 */ 7684 wtime = jiffies + (HZ * RELOGIN_TOV); 7685 do { 7686 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags)) 7687 goto ddb_logout_init; 7688 7689 schedule_timeout_uninterruptible(HZ); 7690 } while ((time_after(wtime, jiffies))); 7691 7692ddb_logout_init: 7693 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 7694 atomic_set(&ddb_entry->relogin_timer, 0); 7695 7696 options = LOGOUT_OPTION_CLOSE_SESSION; 7697 qla4xxx_session_logout_ddb(ha, ddb_entry, options); 7698 7699 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); 7700 wtime = jiffies + (HZ * LOGOUT_TOV); 7701 do { 7702 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7703 fw_ddb_entry, fw_ddb_entry_dma, 7704 NULL, NULL, &ddb_state, NULL, 7705 NULL, NULL); 7706 if (ret == QLA_ERROR) 7707 goto ddb_logout_clr_sess; 7708 7709 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 7710 (ddb_state == DDB_DS_SESSION_FAILED)) 7711 goto ddb_logout_clr_sess; 7712 7713 schedule_timeout_uninterruptible(HZ); 7714 } while ((time_after(wtime, jiffies))); 7715 7716ddb_logout_clr_sess: 7717 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 7718 /* 7719 * we have decremented the reference count of the driver 7720 * when we setup the session to have the driver unload 7721 * to be seamless without actually destroying the 7722 * session 7723 **/ 7724 try_module_get(qla4xxx_iscsi_transport.owner); 7725 iscsi_destroy_endpoint(ddb_entry->conn->ep); 7726 7727 spin_lock_irqsave(&ha->hardware_lock, flags); 7728 qla4xxx_free_ddb(ha, ddb_entry); 7729 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 7730 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7731 7732 iscsi_session_teardown(ddb_entry->sess); 7733 7734 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags); 7735 ret = QLA_SUCCESS; 7736 7737exit_ddb_logout: 7738 if (fw_ddb_entry) 7739 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7740 fw_ddb_entry, fw_ddb_entry_dma); 7741 return ret; 7742} 7743 7744/** 7745 * qla4xxx_sysfs_ddb_logout - Logout from the specified target 7746 * @fnode_sess: pointer to session attrs of flash ddb entry 7747 * @fnode_conn: pointer to connection attrs of flash ddb entry 7748 * 7749 * This performs log out from the specified target 7750 **/ 7751static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 7752 struct iscsi_bus_flash_conn *fnode_conn) 7753{ 7754 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7755 struct scsi_qla_host *ha = to_qla_host(shost); 7756 struct ql4_tuple_ddb *flash_tddb = NULL; 7757 struct ql4_tuple_ddb *tmp_tddb = NULL; 7758 struct dev_db_entry *fw_ddb_entry = NULL; 7759 struct ddb_entry *ddb_entry = NULL; 7760 dma_addr_t fw_ddb_dma; 7761 uint32_t next_idx = 0; 7762 uint32_t state = 0, conn_err = 0; 7763 uint16_t conn_id = 0; 7764 int idx, index; 7765 int status, ret = 0; 7766 7767 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7768 &fw_ddb_dma); 7769 if (fw_ddb_entry == NULL) { 7770 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__); 7771 ret = -ENOMEM; 7772 goto exit_ddb_logout; 7773 } 7774 7775 flash_tddb = vzalloc(sizeof(*flash_tddb)); 7776 if (!flash_tddb) { 7777 ql4_printk(KERN_WARNING, ha, 7778 "%s:Memory Allocation failed.\n", __func__); 7779 ret = -ENOMEM; 7780 goto exit_ddb_logout; 7781 } 7782 7783 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 7784 if (!tmp_tddb) { 7785 ql4_printk(KERN_WARNING, ha, 7786 "%s:Memory Allocation failed.\n", __func__); 7787 ret = -ENOMEM; 7788 goto exit_ddb_logout; 7789 } 7790 7791 if (!fnode_sess->targetname) { 7792 ql4_printk(KERN_ERR, ha, 7793 "%s:Cannot logout from SendTarget entry\n", 7794 __func__); 7795 ret = -EPERM; 7796 goto exit_ddb_logout; 7797 } 7798 7799 if (fnode_sess->is_boot_target) { 7800 ql4_printk(KERN_ERR, ha, 7801 "%s: Logout from boot target entry is not permitted.\n", 7802 __func__); 7803 ret = -EPERM; 7804 goto exit_ddb_logout; 7805 } 7806 7807 strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname, 7808 ISCSI_NAME_SIZE); 7809 7810 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7811 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress); 7812 else 7813 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress); 7814 7815 flash_tddb->tpgt = fnode_sess->tpgt; 7816 flash_tddb->port = fnode_conn->port; 7817 7818 COPY_ISID(flash_tddb->isid, fnode_sess->isid); 7819 7820 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 7821 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 7822 if (ddb_entry == NULL) 7823 continue; 7824 7825 if (ddb_entry->ddb_type != FLASH_DDB) 7826 continue; 7827 7828 index = ddb_entry->sess->target_id; 7829 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry, 7830 fw_ddb_dma, NULL, &next_idx, 7831 &state, &conn_err, NULL, 7832 &conn_id); 7833 if (status == QLA_ERROR) { 7834 ret = -ENOMEM; 7835 break; 7836 } 7837 7838 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL); 7839 7840 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb, 7841 true); 7842 if (status == QLA_SUCCESS) { 7843 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess); 7844 break; 7845 } 7846 } 7847 7848 if (idx == MAX_DDB_ENTRIES) 7849 ret = -ESRCH; 7850 7851exit_ddb_logout: 7852 if (flash_tddb) 7853 vfree(flash_tddb); 7854 if (tmp_tddb) 7855 vfree(tmp_tddb); 7856 if (fw_ddb_entry) 7857 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7858 7859 return ret; 7860} 7861 7862static int 7863qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 7864 int param, char *buf) 7865{ 7866 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7867 struct scsi_qla_host *ha = to_qla_host(shost); 7868 struct iscsi_bus_flash_conn *fnode_conn; 7869 struct ql4_chap_table chap_tbl; 7870 struct device *dev; 7871 int parent_type; 7872 int rc = 0; 7873 7874 dev = iscsi_find_flashnode_conn(fnode_sess); 7875 if (!dev) 7876 return -EIO; 7877 7878 fnode_conn = iscsi_dev_to_flash_conn(dev); 7879 7880 switch (param) { 7881 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 7882 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6); 7883 break; 7884 case ISCSI_FLASHNODE_PORTAL_TYPE: 7885 rc = sprintf(buf, "%s\n", fnode_sess->portal_type); 7886 break; 7887 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 7888 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable); 7889 break; 7890 case ISCSI_FLASHNODE_DISCOVERY_SESS: 7891 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess); 7892 break; 7893 case ISCSI_FLASHNODE_ENTRY_EN: 7894 rc = sprintf(buf, "%u\n", fnode_sess->entry_state); 7895 break; 7896 case ISCSI_FLASHNODE_HDR_DGST_EN: 7897 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en); 7898 break; 7899 case ISCSI_FLASHNODE_DATA_DGST_EN: 7900 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en); 7901 break; 7902 case ISCSI_FLASHNODE_IMM_DATA_EN: 7903 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en); 7904 break; 7905 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 7906 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en); 7907 break; 7908 case ISCSI_FLASHNODE_DATASEQ_INORDER: 7909 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en); 7910 break; 7911 case ISCSI_FLASHNODE_PDU_INORDER: 7912 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en); 7913 break; 7914 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 7915 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en); 7916 break; 7917 case ISCSI_FLASHNODE_SNACK_REQ_EN: 7918 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en); 7919 break; 7920 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 7921 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en); 7922 break; 7923 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 7924 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en); 7925 break; 7926 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 7927 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional); 7928 break; 7929 case ISCSI_FLASHNODE_ERL: 7930 rc = sprintf(buf, "%u\n", fnode_sess->erl); 7931 break; 7932 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 7933 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat); 7934 break; 7935 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 7936 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable); 7937 break; 7938 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 7939 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable); 7940 break; 7941 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 7942 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale); 7943 break; 7944 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 7945 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en); 7946 break; 7947 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 7948 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable); 7949 break; 7950 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 7951 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength); 7952 break; 7953 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 7954 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength); 7955 break; 7956 case ISCSI_FLASHNODE_FIRST_BURST: 7957 rc = sprintf(buf, "%u\n", fnode_sess->first_burst); 7958 break; 7959 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 7960 rc = sprintf(buf, "%u\n", fnode_sess->time2wait); 7961 break; 7962 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 7963 rc = sprintf(buf, "%u\n", fnode_sess->time2retain); 7964 break; 7965 case ISCSI_FLASHNODE_MAX_R2T: 7966 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t); 7967 break; 7968 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 7969 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout); 7970 break; 7971 case ISCSI_FLASHNODE_ISID: 7972 rc = sprintf(buf, "%pm\n", fnode_sess->isid); 7973 break; 7974 case ISCSI_FLASHNODE_TSID: 7975 rc = sprintf(buf, "%u\n", fnode_sess->tsid); 7976 break; 7977 case ISCSI_FLASHNODE_PORT: 7978 rc = sprintf(buf, "%d\n", fnode_conn->port); 7979 break; 7980 case ISCSI_FLASHNODE_MAX_BURST: 7981 rc = sprintf(buf, "%u\n", fnode_sess->max_burst); 7982 break; 7983 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 7984 rc = sprintf(buf, "%u\n", 7985 fnode_sess->default_taskmgmt_timeout); 7986 break; 7987 case ISCSI_FLASHNODE_IPADDR: 7988 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7989 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress); 7990 else 7991 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress); 7992 break; 7993 case ISCSI_FLASHNODE_ALIAS: 7994 if (fnode_sess->targetalias) 7995 rc = sprintf(buf, "%s\n", fnode_sess->targetalias); 7996 else 7997 rc = sprintf(buf, "\n"); 7998 break; 7999 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 8000 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 8001 rc = sprintf(buf, "%pI6\n", 8002 fnode_conn->redirect_ipaddr); 8003 else 8004 rc = sprintf(buf, "%pI4\n", 8005 fnode_conn->redirect_ipaddr); 8006 break; 8007 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 8008 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size); 8009 break; 8010 case ISCSI_FLASHNODE_LOCAL_PORT: 8011 rc = sprintf(buf, "%u\n", fnode_conn->local_port); 8012 break; 8013 case ISCSI_FLASHNODE_IPV4_TOS: 8014 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos); 8015 break; 8016 case ISCSI_FLASHNODE_IPV6_TC: 8017 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 8018 rc = sprintf(buf, "%u\n", 8019 fnode_conn->ipv6_traffic_class); 8020 else 8021 rc = sprintf(buf, "\n"); 8022 break; 8023 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 8024 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label); 8025 break; 8026 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 8027 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 8028 rc = sprintf(buf, "%pI6\n", 8029 fnode_conn->link_local_ipv6_addr); 8030 else 8031 rc = sprintf(buf, "\n"); 8032 break; 8033 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 8034 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx); 8035 break; 8036 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 8037 if (fnode_sess->discovery_parent_type == DDB_ISNS) 8038 parent_type = ISCSI_DISC_PARENT_ISNS; 8039 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) 8040 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 8041 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) 8042 parent_type = ISCSI_DISC_PARENT_SENDTGT; 8043 else 8044 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 8045 8046 rc = sprintf(buf, "%s\n", 8047 iscsi_get_discovery_parent_name(parent_type)); 8048 break; 8049 case ISCSI_FLASHNODE_NAME: 8050 if (fnode_sess->targetname) 8051 rc = sprintf(buf, "%s\n", fnode_sess->targetname); 8052 else 8053 rc = sprintf(buf, "\n"); 8054 break; 8055 case ISCSI_FLASHNODE_TPGT: 8056 rc = sprintf(buf, "%u\n", fnode_sess->tpgt); 8057 break; 8058 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8059 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf); 8060 break; 8061 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8062 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf); 8063 break; 8064 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8065 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx); 8066 break; 8067 case ISCSI_FLASHNODE_USERNAME: 8068 if (fnode_sess->chap_auth_en) { 8069 qla4xxx_get_uni_chap_at_index(ha, 8070 chap_tbl.name, 8071 chap_tbl.secret, 8072 fnode_sess->chap_out_idx); 8073 rc = sprintf(buf, "%s\n", chap_tbl.name); 8074 } else { 8075 rc = sprintf(buf, "\n"); 8076 } 8077 break; 8078 case ISCSI_FLASHNODE_PASSWORD: 8079 if (fnode_sess->chap_auth_en) { 8080 qla4xxx_get_uni_chap_at_index(ha, 8081 chap_tbl.name, 8082 chap_tbl.secret, 8083 fnode_sess->chap_out_idx); 8084 rc = sprintf(buf, "%s\n", chap_tbl.secret); 8085 } else { 8086 rc = sprintf(buf, "\n"); 8087 } 8088 break; 8089 case ISCSI_FLASHNODE_STATSN: 8090 rc = sprintf(buf, "%u\n", fnode_conn->statsn); 8091 break; 8092 case ISCSI_FLASHNODE_EXP_STATSN: 8093 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn); 8094 break; 8095 case ISCSI_FLASHNODE_IS_BOOT_TGT: 8096 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target); 8097 break; 8098 default: 8099 rc = -ENOSYS; 8100 break; 8101 } 8102 8103 put_device(dev); 8104 return rc; 8105} 8106 8107/** 8108 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry 8109 * @fnode_sess: pointer to session attrs of flash ddb entry 8110 * @fnode_conn: pointer to connection attrs of flash ddb entry 8111 * @data: Parameters and their values to update 8112 * @len: len of data 8113 * 8114 * This sets the parameter of flash ddb entry and writes them to flash 8115 **/ 8116static int 8117qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 8118 struct iscsi_bus_flash_conn *fnode_conn, 8119 void *data, int len) 8120{ 8121 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8122 struct scsi_qla_host *ha = to_qla_host(shost); 8123 struct iscsi_flashnode_param_info *fnode_param; 8124 struct ql4_chap_table chap_tbl; 8125 struct nlattr *attr; 8126 uint16_t chap_out_idx = INVALID_ENTRY; 8127 int rc = QLA_ERROR; 8128 uint32_t rem = len; 8129 8130 memset((void *)&chap_tbl, 0, sizeof(chap_tbl)); 8131 nla_for_each_attr(attr, data, len, rem) { 8132 if (nla_len(attr) < sizeof(*fnode_param)) { 8133 rc = -EINVAL; 8134 goto exit_set_param; 8135 } 8136 8137 fnode_param = nla_data(attr); 8138 8139 switch (fnode_param->param) { 8140 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 8141 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0]; 8142 break; 8143 case ISCSI_FLASHNODE_PORTAL_TYPE: 8144 memcpy(fnode_sess->portal_type, fnode_param->value, 8145 strlen(fnode_sess->portal_type)); 8146 break; 8147 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 8148 fnode_sess->auto_snd_tgt_disable = 8149 fnode_param->value[0]; 8150 break; 8151 case ISCSI_FLASHNODE_DISCOVERY_SESS: 8152 fnode_sess->discovery_sess = fnode_param->value[0]; 8153 break; 8154 case ISCSI_FLASHNODE_ENTRY_EN: 8155 fnode_sess->entry_state = fnode_param->value[0]; 8156 break; 8157 case ISCSI_FLASHNODE_HDR_DGST_EN: 8158 fnode_conn->hdrdgst_en = fnode_param->value[0]; 8159 break; 8160 case ISCSI_FLASHNODE_DATA_DGST_EN: 8161 fnode_conn->datadgst_en = fnode_param->value[0]; 8162 break; 8163 case ISCSI_FLASHNODE_IMM_DATA_EN: 8164 fnode_sess->imm_data_en = fnode_param->value[0]; 8165 break; 8166 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 8167 fnode_sess->initial_r2t_en = fnode_param->value[0]; 8168 break; 8169 case ISCSI_FLASHNODE_DATASEQ_INORDER: 8170 fnode_sess->dataseq_inorder_en = fnode_param->value[0]; 8171 break; 8172 case ISCSI_FLASHNODE_PDU_INORDER: 8173 fnode_sess->pdu_inorder_en = fnode_param->value[0]; 8174 break; 8175 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 8176 fnode_sess->chap_auth_en = fnode_param->value[0]; 8177 /* Invalidate chap index if chap auth is disabled */ 8178 if (!fnode_sess->chap_auth_en) 8179 fnode_sess->chap_out_idx = INVALID_ENTRY; 8180 8181 break; 8182 case ISCSI_FLASHNODE_SNACK_REQ_EN: 8183 fnode_conn->snack_req_en = fnode_param->value[0]; 8184 break; 8185 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 8186 fnode_sess->discovery_logout_en = fnode_param->value[0]; 8187 break; 8188 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 8189 fnode_sess->bidi_chap_en = fnode_param->value[0]; 8190 break; 8191 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 8192 fnode_sess->discovery_auth_optional = 8193 fnode_param->value[0]; 8194 break; 8195 case ISCSI_FLASHNODE_ERL: 8196 fnode_sess->erl = fnode_param->value[0]; 8197 break; 8198 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 8199 fnode_conn->tcp_timestamp_stat = fnode_param->value[0]; 8200 break; 8201 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 8202 fnode_conn->tcp_nagle_disable = fnode_param->value[0]; 8203 break; 8204 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 8205 fnode_conn->tcp_wsf_disable = fnode_param->value[0]; 8206 break; 8207 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 8208 fnode_conn->tcp_timer_scale = fnode_param->value[0]; 8209 break; 8210 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 8211 fnode_conn->tcp_timestamp_en = fnode_param->value[0]; 8212 break; 8213 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 8214 fnode_conn->fragment_disable = fnode_param->value[0]; 8215 break; 8216 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 8217 fnode_conn->max_recv_dlength = 8218 *(unsigned *)fnode_param->value; 8219 break; 8220 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 8221 fnode_conn->max_xmit_dlength = 8222 *(unsigned *)fnode_param->value; 8223 break; 8224 case ISCSI_FLASHNODE_FIRST_BURST: 8225 fnode_sess->first_burst = 8226 *(unsigned *)fnode_param->value; 8227 break; 8228 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 8229 fnode_sess->time2wait = *(uint16_t *)fnode_param->value; 8230 break; 8231 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 8232 fnode_sess->time2retain = 8233 *(uint16_t *)fnode_param->value; 8234 break; 8235 case ISCSI_FLASHNODE_MAX_R2T: 8236 fnode_sess->max_r2t = 8237 *(uint16_t *)fnode_param->value; 8238 break; 8239 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 8240 fnode_conn->keepalive_timeout = 8241 *(uint16_t *)fnode_param->value; 8242 break; 8243 case ISCSI_FLASHNODE_ISID: 8244 memcpy(fnode_sess->isid, fnode_param->value, 8245 sizeof(fnode_sess->isid)); 8246 break; 8247 case ISCSI_FLASHNODE_TSID: 8248 fnode_sess->tsid = *(uint16_t *)fnode_param->value; 8249 break; 8250 case ISCSI_FLASHNODE_PORT: 8251 fnode_conn->port = *(uint16_t *)fnode_param->value; 8252 break; 8253 case ISCSI_FLASHNODE_MAX_BURST: 8254 fnode_sess->max_burst = *(unsigned *)fnode_param->value; 8255 break; 8256 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 8257 fnode_sess->default_taskmgmt_timeout = 8258 *(uint16_t *)fnode_param->value; 8259 break; 8260 case ISCSI_FLASHNODE_IPADDR: 8261 memcpy(fnode_conn->ipaddress, fnode_param->value, 8262 IPv6_ADDR_LEN); 8263 break; 8264 case ISCSI_FLASHNODE_ALIAS: 8265 rc = iscsi_switch_str_param(&fnode_sess->targetalias, 8266 (char *)fnode_param->value); 8267 break; 8268 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 8269 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value, 8270 IPv6_ADDR_LEN); 8271 break; 8272 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 8273 fnode_conn->max_segment_size = 8274 *(unsigned *)fnode_param->value; 8275 break; 8276 case ISCSI_FLASHNODE_LOCAL_PORT: 8277 fnode_conn->local_port = 8278 *(uint16_t *)fnode_param->value; 8279 break; 8280 case ISCSI_FLASHNODE_IPV4_TOS: 8281 fnode_conn->ipv4_tos = fnode_param->value[0]; 8282 break; 8283 case ISCSI_FLASHNODE_IPV6_TC: 8284 fnode_conn->ipv6_traffic_class = fnode_param->value[0]; 8285 break; 8286 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 8287 fnode_conn->ipv6_flow_label = fnode_param->value[0]; 8288 break; 8289 case ISCSI_FLASHNODE_NAME: 8290 rc = iscsi_switch_str_param(&fnode_sess->targetname, 8291 (char *)fnode_param->value); 8292 break; 8293 case ISCSI_FLASHNODE_TPGT: 8294 fnode_sess->tpgt = *(uint16_t *)fnode_param->value; 8295 break; 8296 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 8297 memcpy(fnode_conn->link_local_ipv6_addr, 8298 fnode_param->value, IPv6_ADDR_LEN); 8299 break; 8300 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 8301 fnode_sess->discovery_parent_idx = 8302 *(uint16_t *)fnode_param->value; 8303 break; 8304 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8305 fnode_conn->tcp_xmit_wsf = 8306 *(uint8_t *)fnode_param->value; 8307 break; 8308 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8309 fnode_conn->tcp_recv_wsf = 8310 *(uint8_t *)fnode_param->value; 8311 break; 8312 case ISCSI_FLASHNODE_STATSN: 8313 fnode_conn->statsn = *(uint32_t *)fnode_param->value; 8314 break; 8315 case ISCSI_FLASHNODE_EXP_STATSN: 8316 fnode_conn->exp_statsn = 8317 *(uint32_t *)fnode_param->value; 8318 break; 8319 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8320 chap_out_idx = *(uint16_t *)fnode_param->value; 8321 if (!qla4xxx_get_uni_chap_at_index(ha, 8322 chap_tbl.name, 8323 chap_tbl.secret, 8324 chap_out_idx)) { 8325 fnode_sess->chap_out_idx = chap_out_idx; 8326 /* Enable chap auth if chap index is valid */ 8327 fnode_sess->chap_auth_en = QL4_PARAM_ENABLE; 8328 } 8329 break; 8330 default: 8331 ql4_printk(KERN_ERR, ha, 8332 "%s: No such sysfs attribute\n", __func__); 8333 rc = -ENOSYS; 8334 goto exit_set_param; 8335 } 8336 } 8337 8338 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn); 8339 8340exit_set_param: 8341 return rc; 8342} 8343 8344/** 8345 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry 8346 * @fnode_sess: pointer to session attrs of flash ddb entry 8347 * 8348 * This invalidates the flash ddb entry at the given index 8349 **/ 8350static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) 8351{ 8352 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8353 struct scsi_qla_host *ha = to_qla_host(shost); 8354 uint32_t dev_db_start_offset; 8355 uint32_t dev_db_end_offset; 8356 struct dev_db_entry *fw_ddb_entry = NULL; 8357 dma_addr_t fw_ddb_entry_dma; 8358 uint16_t *ddb_cookie = NULL; 8359 size_t ddb_size = 0; 8360 void *pddb = NULL; 8361 int target_id; 8362 int rc = 0; 8363 8364 if (fnode_sess->is_boot_target) { 8365 rc = -EPERM; 8366 DEBUG2(ql4_printk(KERN_ERR, ha, 8367 "%s: Deletion of boot target entry is not permitted.\n", 8368 __func__)); 8369 goto exit_ddb_del; 8370 } 8371 8372 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) 8373 goto sysfs_ddb_del; 8374 8375 if (is_qla40XX(ha)) { 8376 dev_db_start_offset = FLASH_OFFSET_DB_INFO; 8377 dev_db_end_offset = FLASH_OFFSET_DB_END; 8378 dev_db_start_offset += (fnode_sess->target_id * 8379 sizeof(*fw_ddb_entry)); 8380 ddb_size = sizeof(*fw_ddb_entry); 8381 } else { 8382 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR + 8383 (ha->hw.flt_region_ddb << 2); 8384 /* flt_ddb_size is DDB table size for both ports 8385 * so divide it by 2 to calculate the offset for second port 8386 */ 8387 if (ha->port_num == 1) 8388 dev_db_start_offset += (ha->hw.flt_ddb_size / 2); 8389 8390 dev_db_end_offset = dev_db_start_offset + 8391 (ha->hw.flt_ddb_size / 2); 8392 8393 dev_db_start_offset += (fnode_sess->target_id * 8394 sizeof(*fw_ddb_entry)); 8395 dev_db_start_offset += offsetof(struct dev_db_entry, cookie); 8396 8397 ddb_size = sizeof(*ddb_cookie); 8398 } 8399 8400 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n", 8401 __func__, dev_db_start_offset, dev_db_end_offset)); 8402 8403 if (dev_db_start_offset > dev_db_end_offset) { 8404 rc = -EIO; 8405 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n", 8406 __func__, fnode_sess->target_id)); 8407 goto exit_ddb_del; 8408 } 8409 8410 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size, 8411 &fw_ddb_entry_dma, GFP_KERNEL); 8412 if (!pddb) { 8413 rc = -ENOMEM; 8414 DEBUG2(ql4_printk(KERN_ERR, ha, 8415 "%s: Unable to allocate dma buffer\n", 8416 __func__)); 8417 goto exit_ddb_del; 8418 } 8419 8420 if (is_qla40XX(ha)) { 8421 fw_ddb_entry = pddb; 8422 memset(fw_ddb_entry, 0, ddb_size); 8423 ddb_cookie = &fw_ddb_entry->cookie; 8424 } else { 8425 ddb_cookie = pddb; 8426 } 8427 8428 /* invalidate the cookie */ 8429 *ddb_cookie = 0xFFEE; 8430 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 8431 ddb_size, FLASH_OPT_RMW_COMMIT); 8432 8433sysfs_ddb_del: 8434 target_id = fnode_sess->target_id; 8435 iscsi_destroy_flashnode_sess(fnode_sess); 8436 ql4_printk(KERN_INFO, ha, 8437 "%s: session and conn entries for flashnode %u of host %lu deleted\n", 8438 __func__, target_id, ha->host_no); 8439exit_ddb_del: 8440 if (pddb) 8441 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb, 8442 fw_ddb_entry_dma); 8443 return rc; 8444} 8445 8446/** 8447 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs 8448 * @ha: pointer to adapter structure 8449 * 8450 * Export the firmware DDB for all send targets and normal targets to sysfs. 8451 **/ 8452int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) 8453{ 8454 struct dev_db_entry *fw_ddb_entry = NULL; 8455 dma_addr_t fw_ddb_entry_dma; 8456 uint16_t max_ddbs; 8457 uint16_t idx = 0; 8458 int ret = QLA_SUCCESS; 8459 8460 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, 8461 sizeof(*fw_ddb_entry), 8462 &fw_ddb_entry_dma, GFP_KERNEL); 8463 if (!fw_ddb_entry) { 8464 DEBUG2(ql4_printk(KERN_ERR, ha, 8465 "%s: Unable to allocate dma buffer\n", 8466 __func__)); 8467 return -ENOMEM; 8468 } 8469 8470 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 8471 MAX_DEV_DB_ENTRIES; 8472 8473 for (idx = 0; idx < max_ddbs; idx++) { 8474 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, 8475 idx)) 8476 continue; 8477 8478 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0); 8479 if (ret) { 8480 ret = -EIO; 8481 break; 8482 } 8483 } 8484 8485 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, 8486 fw_ddb_entry_dma); 8487 8488 return ret; 8489} 8490 8491static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha) 8492{ 8493 iscsi_destroy_all_flashnode(ha->host); 8494} 8495 8496/** 8497 * qla4xxx_build_ddb_list - Build ddb list and setup sessions 8498 * @ha: pointer to adapter structure 8499 * @is_reset: Is this init path or reset path 8500 * 8501 * Create a list of sendtargets (st) from firmware DDBs, issue send targets 8502 * using connection open, then create the list of normal targets (nt) 8503 * from firmware DDBs. Based on the list of nt setup session and connection 8504 * objects. 8505 **/ 8506void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) 8507{ 8508 uint16_t tmo = 0; 8509 struct list_head list_st, list_nt; 8510 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp; 8511 unsigned long wtime; 8512 8513 if (!test_bit(AF_LINK_UP, &ha->flags)) { 8514 set_bit(AF_BUILD_DDB_LIST, &ha->flags); 8515 ha->is_reset = is_reset; 8516 return; 8517 } 8518 8519 INIT_LIST_HEAD(&list_st); 8520 INIT_LIST_HEAD(&list_nt); 8521 8522 qla4xxx_build_st_list(ha, &list_st); 8523 8524 /* Before issuing conn open mbox, ensure all IPs states are configured 8525 * Note, conn open fails if IPs are not configured 8526 */ 8527 qla4xxx_wait_for_ip_configuration(ha); 8528 8529 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ 8530 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { 8531 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); 8532 } 8533 8534 /* Wait to ensure all sendtargets are done for min 12 sec wait */ 8535 tmo = ((ha->def_timeout > LOGIN_TOV) && 8536 (ha->def_timeout < LOGIN_TOV * 10) ? 8537 ha->def_timeout : LOGIN_TOV); 8538 8539 DEBUG2(ql4_printk(KERN_INFO, ha, 8540 "Default time to wait for build ddb %d\n", tmo)); 8541 8542 wtime = jiffies + (HZ * tmo); 8543 do { 8544 if (list_empty(&list_st)) 8545 break; 8546 8547 qla4xxx_remove_failed_ddb(ha, &list_st); 8548 schedule_timeout_uninterruptible(HZ / 10); 8549 } while (time_after(wtime, jiffies)); 8550 8551 8552 qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset); 8553 8554 qla4xxx_free_ddb_list(&list_st); 8555 qla4xxx_free_ddb_list(&list_nt); 8556 8557 qla4xxx_free_ddb_index(ha); 8558} 8559 8560/** 8561 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login 8562 * response. 8563 * @ha: pointer to adapter structure 8564 * 8565 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be 8566 * set in DDB and we will wait for login response of boot targets during 8567 * probe. 8568 **/ 8569static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha) 8570{ 8571 struct ddb_entry *ddb_entry; 8572 struct dev_db_entry *fw_ddb_entry = NULL; 8573 dma_addr_t fw_ddb_entry_dma; 8574 unsigned long wtime; 8575 uint32_t ddb_state; 8576 int max_ddbs, idx, ret; 8577 8578 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 8579 MAX_DEV_DB_ENTRIES; 8580 8581 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8582 &fw_ddb_entry_dma, GFP_KERNEL); 8583 if (!fw_ddb_entry) { 8584 ql4_printk(KERN_ERR, ha, 8585 "%s: Unable to allocate dma buffer\n", __func__); 8586 goto exit_login_resp; 8587 } 8588 8589 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV); 8590 8591 for (idx = 0; idx < max_ddbs; idx++) { 8592 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8593 if (ddb_entry == NULL) 8594 continue; 8595 8596 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 8597 DEBUG2(ql4_printk(KERN_INFO, ha, 8598 "%s: DDB index [%d]\n", __func__, 8599 ddb_entry->fw_ddb_index)); 8600 do { 8601 ret = qla4xxx_get_fwddb_entry(ha, 8602 ddb_entry->fw_ddb_index, 8603 fw_ddb_entry, fw_ddb_entry_dma, 8604 NULL, NULL, &ddb_state, NULL, 8605 NULL, NULL); 8606 if (ret == QLA_ERROR) 8607 goto exit_login_resp; 8608 8609 if ((ddb_state == DDB_DS_SESSION_ACTIVE) || 8610 (ddb_state == DDB_DS_SESSION_FAILED)) 8611 break; 8612 8613 schedule_timeout_uninterruptible(HZ); 8614 8615 } while ((time_after(wtime, jiffies))); 8616 8617 if (!time_after(wtime, jiffies)) { 8618 DEBUG2(ql4_printk(KERN_INFO, ha, 8619 "%s: Login response wait timer expired\n", 8620 __func__)); 8621 goto exit_login_resp; 8622 } 8623 } 8624 } 8625 8626exit_login_resp: 8627 if (fw_ddb_entry) 8628 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8629 fw_ddb_entry, fw_ddb_entry_dma); 8630} 8631 8632/** 8633 * qla4xxx_probe_adapter - callback function to probe HBA 8634 * @pdev: pointer to pci_dev structure 8635 * @ent: pointer to pci_device entry 8636 * 8637 * This routine will probe for Qlogic 4xxx iSCSI host adapters. 8638 * It returns zero if successful. It also initializes all data necessary for 8639 * the driver. 8640 **/ 8641static int qla4xxx_probe_adapter(struct pci_dev *pdev, 8642 const struct pci_device_id *ent) 8643{ 8644 int ret = -ENODEV, status; 8645 struct Scsi_Host *host; 8646 struct scsi_qla_host *ha; 8647 uint8_t init_retry_count = 0; 8648 char buf[34]; 8649 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; 8650 uint32_t dev_state; 8651 8652 if (pci_enable_device(pdev)) 8653 return -1; 8654 8655 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0); 8656 if (host == NULL) { 8657 printk(KERN_WARNING 8658 "qla4xxx: Couldn't allocate host from scsi layer!\n"); 8659 goto probe_disable_device; 8660 } 8661 8662 /* Clear our data area */ 8663 ha = to_qla_host(host); 8664 memset(ha, 0, sizeof(*ha)); 8665 8666 /* Save the information from PCI BIOS. */ 8667 ha->pdev = pdev; 8668 ha->host = host; 8669 ha->host_no = host->host_no; 8670 ha->func_num = PCI_FUNC(ha->pdev->devfn); 8671 8672 pci_enable_pcie_error_reporting(pdev); 8673 8674 /* Setup Runtime configurable options */ 8675 if (is_qla8022(ha)) { 8676 ha->isp_ops = &qla4_82xx_isp_ops; 8677 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl; 8678 ha->qdr_sn_window = -1; 8679 ha->ddr_mn_window = -1; 8680 ha->curr_window = 255; 8681 nx_legacy_intr = &legacy_intr[ha->func_num]; 8682 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; 8683 ha->nx_legacy_intr.tgt_status_reg = 8684 nx_legacy_intr->tgt_status_reg; 8685 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; 8686 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 8687 } else if (is_qla8032(ha) || is_qla8042(ha)) { 8688 ha->isp_ops = &qla4_83xx_isp_ops; 8689 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl; 8690 } else { 8691 ha->isp_ops = &qla4xxx_isp_ops; 8692 } 8693 8694 if (is_qla80XX(ha)) { 8695 rwlock_init(&ha->hw_lock); 8696 ha->pf_bit = ha->func_num << 16; 8697 /* Set EEH reset type to fundamental if required by hba */ 8698 pdev->needs_freset = 1; 8699 } 8700 8701 /* Configure PCI I/O space. */ 8702 ret = ha->isp_ops->iospace_config(ha); 8703 if (ret) 8704 goto probe_failed_ioconfig; 8705 8706 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n", 8707 pdev->device, pdev->irq, ha->reg); 8708 8709 qla4xxx_config_dma_addressing(ha); 8710 8711 /* Initialize lists and spinlocks. */ 8712 INIT_LIST_HEAD(&ha->free_srb_q); 8713 8714 mutex_init(&ha->mbox_sem); 8715 mutex_init(&ha->chap_sem); 8716 init_completion(&ha->mbx_intr_comp); 8717 init_completion(&ha->disable_acb_comp); 8718 init_completion(&ha->idc_comp); 8719 init_completion(&ha->link_up_comp); 8720 8721 spin_lock_init(&ha->hardware_lock); 8722 spin_lock_init(&ha->work_lock); 8723 8724 /* Initialize work list */ 8725 INIT_LIST_HEAD(&ha->work_list); 8726 8727 /* Allocate dma buffers */ 8728 if (qla4xxx_mem_alloc(ha)) { 8729 ql4_printk(KERN_WARNING, ha, 8730 "[ERROR] Failed to allocate memory for adapter\n"); 8731 8732 ret = -ENOMEM; 8733 goto probe_failed; 8734 } 8735 8736 host->cmd_per_lun = 3; 8737 host->max_channel = 0; 8738 host->max_lun = MAX_LUNS - 1; 8739 host->max_id = MAX_TARGETS; 8740 host->max_cmd_len = IOCB_MAX_CDB_LEN; 8741 host->can_queue = MAX_SRBS ; 8742 host->transportt = qla4xxx_scsi_transport; 8743 8744 pci_set_drvdata(pdev, ha); 8745 8746 ret = scsi_add_host(host, &pdev->dev); 8747 if (ret) 8748 goto probe_failed; 8749 8750 if (is_qla80XX(ha)) 8751 qla4_8xxx_get_flash_info(ha); 8752 8753 if (is_qla8032(ha) || is_qla8042(ha)) { 8754 qla4_83xx_read_reset_template(ha); 8755 /* 8756 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0. 8757 * If DONRESET_BIT0 is set, drivers should not set dev_state 8758 * to NEED_RESET. But if NEED_RESET is set, drivers should 8759 * should honor the reset. 8760 */ 8761 if (ql4xdontresethba == 1) 8762 qla4_83xx_set_idc_dontreset(ha); 8763 } 8764 8765 /* 8766 * Initialize the Host adapter request/response queues and 8767 * firmware 8768 * NOTE: interrupts enabled upon successful completion 8769 */ 8770 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8771 8772 /* Dont retry adapter initialization if IRQ allocation failed */ 8773 if (is_qla80XX(ha) && (status == QLA_ERROR)) 8774 goto skip_retry_init; 8775 8776 while ((!test_bit(AF_ONLINE, &ha->flags)) && 8777 init_retry_count++ < MAX_INIT_RETRIES) { 8778 8779 if (is_qla80XX(ha)) { 8780 ha->isp_ops->idc_lock(ha); 8781 dev_state = qla4_8xxx_rd_direct(ha, 8782 QLA8XXX_CRB_DEV_STATE); 8783 ha->isp_ops->idc_unlock(ha); 8784 if (dev_state == QLA8XXX_DEV_FAILED) { 8785 ql4_printk(KERN_WARNING, ha, "%s: don't retry " 8786 "initialize adapter. H/W is in failed state\n", 8787 __func__); 8788 break; 8789 } 8790 } 8791 DEBUG2(printk("scsi: %s: retrying adapter initialization " 8792 "(%d)\n", __func__, init_retry_count)); 8793 8794 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR) 8795 continue; 8796 8797 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8798 if (is_qla80XX(ha) && (status == QLA_ERROR)) { 8799 if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR) 8800 goto skip_retry_init; 8801 } 8802 } 8803 8804skip_retry_init: 8805 if (!test_bit(AF_ONLINE, &ha->flags)) { 8806 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); 8807 8808 if ((is_qla8022(ha) && ql4xdontresethba) || 8809 ((is_qla8032(ha) || is_qla8042(ha)) && 8810 qla4_83xx_idc_dontreset(ha))) { 8811 /* Put the device in failed state. */ 8812 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); 8813 ha->isp_ops->idc_lock(ha); 8814 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 8815 QLA8XXX_DEV_FAILED); 8816 ha->isp_ops->idc_unlock(ha); 8817 } 8818 ret = -ENODEV; 8819 goto remove_host; 8820 } 8821 8822 /* Startup the kernel thread for this host adapter. */ 8823 DEBUG2(printk("scsi: %s: Starting kernel thread for " 8824 "qla4xxx_dpc\n", __func__)); 8825 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); 8826 ha->dpc_thread = create_singlethread_workqueue(buf); 8827 if (!ha->dpc_thread) { 8828 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); 8829 ret = -ENODEV; 8830 goto remove_host; 8831 } 8832 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); 8833 8834 ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1, 8835 ha->host_no); 8836 if (!ha->task_wq) { 8837 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); 8838 ret = -ENODEV; 8839 goto remove_host; 8840 } 8841 8842 /* 8843 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc 8844 * (which is called indirectly by qla4xxx_initialize_adapter), 8845 * so that irqs will be registered after crbinit but before 8846 * mbx_intr_enable. 8847 */ 8848 if (is_qla40XX(ha)) { 8849 ret = qla4xxx_request_irqs(ha); 8850 if (ret) { 8851 ql4_printk(KERN_WARNING, ha, "Failed to reserve " 8852 "interrupt %d already in use.\n", pdev->irq); 8853 goto remove_host; 8854 } 8855 } 8856 8857 pci_save_state(ha->pdev); 8858 ha->isp_ops->enable_intrs(ha); 8859 8860 /* Start timer thread. */ 8861 qla4xxx_start_timer(ha, 1); 8862 8863 set_bit(AF_INIT_DONE, &ha->flags); 8864 8865 qla4_8xxx_alloc_sysfs_attr(ha); 8866 8867 printk(KERN_INFO 8868 " QLogic iSCSI HBA Driver version: %s\n" 8869 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 8870 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), 8871 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor, 8872 ha->fw_info.fw_patch, ha->fw_info.fw_build); 8873 8874 /* Set the driver version */ 8875 if (is_qla80XX(ha)) 8876 qla4_8xxx_set_param(ha, SET_DRVR_VERSION); 8877 8878 if (qla4xxx_setup_boot_info(ha)) 8879 ql4_printk(KERN_ERR, ha, 8880 "%s: No iSCSI boot target configured\n", __func__); 8881 8882 set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags); 8883 /* Perform the build ddb list and login to each */ 8884 qla4xxx_build_ddb_list(ha, INIT_ADAPTER); 8885 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); 8886 qla4xxx_wait_login_resp_boot_tgt(ha); 8887 8888 qla4xxx_create_chap_list(ha); 8889 8890 qla4xxx_create_ifaces(ha); 8891 return 0; 8892 8893remove_host: 8894 scsi_remove_host(ha->host); 8895 8896probe_failed: 8897 qla4xxx_free_adapter(ha); 8898 8899probe_failed_ioconfig: 8900 pci_disable_pcie_error_reporting(pdev); 8901 scsi_host_put(ha->host); 8902 8903probe_disable_device: 8904 pci_disable_device(pdev); 8905 8906 return ret; 8907} 8908 8909/** 8910 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize 8911 * @ha: pointer to adapter structure 8912 * 8913 * Mark the other ISP-4xxx port to indicate that the driver is being removed, 8914 * so that the other port will not re-initialize while in the process of 8915 * removing the ha due to driver unload or hba hotplug. 8916 **/ 8917static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) 8918{ 8919 struct scsi_qla_host *other_ha = NULL; 8920 struct pci_dev *other_pdev = NULL; 8921 int fn = ISP4XXX_PCI_FN_2; 8922 8923 /*iscsi function numbers for ISP4xxx is 1 and 3*/ 8924 if (PCI_FUNC(ha->pdev->devfn) & BIT_1) 8925 fn = ISP4XXX_PCI_FN_1; 8926 8927 other_pdev = 8928 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 8929 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 8930 fn)); 8931 8932 /* Get other_ha if other_pdev is valid and state is enable*/ 8933 if (other_pdev) { 8934 if (atomic_read(&other_pdev->enable_cnt)) { 8935 other_ha = pci_get_drvdata(other_pdev); 8936 if (other_ha) { 8937 set_bit(AF_HA_REMOVAL, &other_ha->flags); 8938 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " 8939 "Prevent %s reinit\n", __func__, 8940 dev_name(&other_ha->pdev->dev))); 8941 } 8942 } 8943 pci_dev_put(other_pdev); 8944 } 8945} 8946 8947static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha, 8948 struct ddb_entry *ddb_entry) 8949{ 8950 struct dev_db_entry *fw_ddb_entry = NULL; 8951 dma_addr_t fw_ddb_entry_dma; 8952 unsigned long wtime; 8953 uint32_t ddb_state; 8954 int options; 8955 int status; 8956 8957 options = LOGOUT_OPTION_CLOSE_SESSION; 8958 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) { 8959 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 8960 goto clear_ddb; 8961 } 8962 8963 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8964 &fw_ddb_entry_dma, GFP_KERNEL); 8965 if (!fw_ddb_entry) { 8966 ql4_printk(KERN_ERR, ha, 8967 "%s: Unable to allocate dma buffer\n", __func__); 8968 goto clear_ddb; 8969 } 8970 8971 wtime = jiffies + (HZ * LOGOUT_TOV); 8972 do { 8973 status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 8974 fw_ddb_entry, fw_ddb_entry_dma, 8975 NULL, NULL, &ddb_state, NULL, 8976 NULL, NULL); 8977 if (status == QLA_ERROR) 8978 goto free_ddb; 8979 8980 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 8981 (ddb_state == DDB_DS_SESSION_FAILED)) 8982 goto free_ddb; 8983 8984 schedule_timeout_uninterruptible(HZ); 8985 } while ((time_after(wtime, jiffies))); 8986 8987free_ddb: 8988 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8989 fw_ddb_entry, fw_ddb_entry_dma); 8990clear_ddb: 8991 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 8992} 8993 8994static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) 8995{ 8996 struct ddb_entry *ddb_entry; 8997 int idx; 8998 8999 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 9000 9001 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 9002 if ((ddb_entry != NULL) && 9003 (ddb_entry->ddb_type == FLASH_DDB)) { 9004 9005 qla4xxx_destroy_ddb(ha, ddb_entry); 9006 /* 9007 * we have decremented the reference count of the driver 9008 * when we setup the session to have the driver unload 9009 * to be seamless without actually destroying the 9010 * session 9011 **/ 9012 try_module_get(qla4xxx_iscsi_transport.owner); 9013 iscsi_destroy_endpoint(ddb_entry->conn->ep); 9014 qla4xxx_free_ddb(ha, ddb_entry); 9015 iscsi_session_teardown(ddb_entry->sess); 9016 } 9017 } 9018} 9019/** 9020 * qla4xxx_remove_adapter - callback function to remove adapter. 9021 * @pdev: PCI device pointer 9022 **/ 9023static void qla4xxx_remove_adapter(struct pci_dev *pdev) 9024{ 9025 struct scsi_qla_host *ha; 9026 9027 /* 9028 * If the PCI device is disabled then it means probe_adapter had 9029 * failed and resources already cleaned up on probe_adapter exit. 9030 */ 9031 if (!pci_is_enabled(pdev)) 9032 return; 9033 9034 ha = pci_get_drvdata(pdev); 9035 9036 if (is_qla40XX(ha)) 9037 qla4xxx_prevent_other_port_reinit(ha); 9038 9039 /* destroy iface from sysfs */ 9040 qla4xxx_destroy_ifaces(ha); 9041 9042 if ((!ql4xdisablesysfsboot) && ha->boot_kset) 9043 iscsi_boot_destroy_kset(ha->boot_kset); 9044 9045 qla4xxx_destroy_fw_ddb_session(ha); 9046 qla4_8xxx_free_sysfs_attr(ha); 9047 9048 qla4xxx_sysfs_ddb_remove(ha); 9049 scsi_remove_host(ha->host); 9050 9051 qla4xxx_free_adapter(ha); 9052 9053 scsi_host_put(ha->host); 9054 9055 pci_disable_pcie_error_reporting(pdev); 9056 pci_disable_device(pdev); 9057} 9058 9059/** 9060 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method. 9061 * @ha: HA context 9062 */ 9063static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) 9064{ 9065 /* Update our PCI device dma_mask for full 64 bit mask */ 9066 if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) { 9067 dev_dbg(&ha->pdev->dev, 9068 "Failed to set 64 bit PCI consistent mask; " 9069 "using 32 bit.\n"); 9070 dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(32)); 9071 } 9072} 9073 9074static int qla4xxx_slave_alloc(struct scsi_device *sdev) 9075{ 9076 struct iscsi_cls_session *cls_sess; 9077 struct iscsi_session *sess; 9078 struct ddb_entry *ddb; 9079 int queue_depth = QL4_DEF_QDEPTH; 9080 9081 cls_sess = starget_to_session(sdev->sdev_target); 9082 sess = cls_sess->dd_data; 9083 ddb = sess->dd_data; 9084 9085 sdev->hostdata = ddb; 9086 9087 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) 9088 queue_depth = ql4xmaxqdepth; 9089 9090 scsi_change_queue_depth(sdev, queue_depth); 9091 return 0; 9092} 9093 9094/** 9095 * qla4xxx_del_from_active_array - returns an active srb 9096 * @ha: Pointer to host adapter structure. 9097 * @index: index into the active_array 9098 * 9099 * This routine removes and returns the srb at the specified index 9100 **/ 9101struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, 9102 uint32_t index) 9103{ 9104 struct srb *srb = NULL; 9105 struct scsi_cmnd *cmd = NULL; 9106 9107 cmd = scsi_host_find_tag(ha->host, index); 9108 if (!cmd) 9109 return srb; 9110 9111 srb = (struct srb *)CMD_SP(cmd); 9112 if (!srb) 9113 return srb; 9114 9115 /* update counters */ 9116 if (srb->flags & SRB_DMA_VALID) { 9117 ha->iocb_cnt -= srb->iocb_cnt; 9118 if (srb->cmd) 9119 srb->cmd->host_scribble = 9120 (unsigned char *)(unsigned long) MAX_SRBS; 9121 } 9122 return srb; 9123} 9124 9125/** 9126 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware 9127 * @ha: Pointer to host adapter structure. 9128 * @cmd: Scsi Command to wait on. 9129 * 9130 * This routine waits for the command to be returned by the Firmware 9131 * for some max time. 9132 **/ 9133static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha, 9134 struct scsi_cmnd *cmd) 9135{ 9136 int done = 0; 9137 struct srb *rp; 9138 uint32_t max_wait_time = EH_WAIT_CMD_TOV; 9139 int ret = SUCCESS; 9140 9141 /* Dont wait on command if PCI error is being handled 9142 * by PCI AER driver 9143 */ 9144 if (unlikely(pci_channel_offline(ha->pdev)) || 9145 (test_bit(AF_EEH_BUSY, &ha->flags))) { 9146 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n", 9147 ha->host_no, __func__); 9148 return ret; 9149 } 9150 9151 do { 9152 /* Checking to see if its returned to OS */ 9153 rp = (struct srb *) CMD_SP(cmd); 9154 if (rp == NULL) { 9155 done++; 9156 break; 9157 } 9158 9159 msleep(2000); 9160 } while (max_wait_time--); 9161 9162 return done; 9163} 9164 9165/** 9166 * qla4xxx_wait_for_hba_online - waits for HBA to come online 9167 * @ha: Pointer to host adapter structure 9168 **/ 9169static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) 9170{ 9171 unsigned long wait_online; 9172 9173 wait_online = jiffies + (HBA_ONLINE_TOV * HZ); 9174 while (time_before(jiffies, wait_online)) { 9175 9176 if (adapter_up(ha)) 9177 return QLA_SUCCESS; 9178 9179 msleep(2000); 9180 } 9181 9182 return QLA_ERROR; 9183} 9184 9185/** 9186 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish. 9187 * @ha: pointer to HBA 9188 * @stgt: pointer to SCSI target 9189 * @sdev: pointer to SCSI device 9190 * 9191 * This function waits for all outstanding commands to a lun to complete. It 9192 * returns 0 if all pending commands are returned and 1 otherwise. 9193 **/ 9194static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha, 9195 struct scsi_target *stgt, 9196 struct scsi_device *sdev) 9197{ 9198 int cnt; 9199 int status = 0; 9200 struct scsi_cmnd *cmd; 9201 9202 /* 9203 * Waiting for all commands for the designated target or dev 9204 * in the active array 9205 */ 9206 for (cnt = 0; cnt < ha->host->can_queue; cnt++) { 9207 cmd = scsi_host_find_tag(ha->host, cnt); 9208 if (cmd && stgt == scsi_target(cmd->device) && 9209 (!sdev || sdev == cmd->device)) { 9210 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9211 status++; 9212 break; 9213 } 9214 } 9215 } 9216 return status; 9217} 9218 9219/** 9220 * qla4xxx_eh_abort - callback for abort task. 9221 * @cmd: Pointer to Linux's SCSI command structure 9222 * 9223 * This routine is called by the Linux OS to abort the specified 9224 * command. 9225 **/ 9226static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) 9227{ 9228 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9229 unsigned int id = cmd->device->id; 9230 uint64_t lun = cmd->device->lun; 9231 unsigned long flags; 9232 struct srb *srb = NULL; 9233 int ret = SUCCESS; 9234 int wait = 0; 9235 int rval; 9236 9237 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", 9238 ha->host_no, id, lun, cmd, cmd->cmnd[0]); 9239 9240 rval = qla4xxx_isp_check_reg(ha); 9241 if (rval != QLA_SUCCESS) { 9242 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9243 return FAILED; 9244 } 9245 9246 spin_lock_irqsave(&ha->hardware_lock, flags); 9247 srb = (struct srb *) CMD_SP(cmd); 9248 if (!srb) { 9249 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9250 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n", 9251 ha->host_no, id, lun); 9252 return SUCCESS; 9253 } 9254 kref_get(&srb->srb_ref); 9255 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9256 9257 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { 9258 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n", 9259 ha->host_no, id, lun)); 9260 ret = FAILED; 9261 } else { 9262 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n", 9263 ha->host_no, id, lun)); 9264 wait = 1; 9265 } 9266 9267 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 9268 9269 /* Wait for command to complete */ 9270 if (wait) { 9271 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9272 DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n", 9273 ha->host_no, id, lun)); 9274 ret = FAILED; 9275 } 9276 } 9277 9278 ql4_printk(KERN_INFO, ha, 9279 "scsi%ld:%d:%llu: Abort command - %s\n", 9280 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed"); 9281 9282 return ret; 9283} 9284 9285/** 9286 * qla4xxx_eh_device_reset - callback for target reset. 9287 * @cmd: Pointer to Linux's SCSI command structure 9288 * 9289 * This routine is called by the Linux OS to reset all luns on the 9290 * specified target. 9291 **/ 9292static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) 9293{ 9294 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9295 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9296 int ret = FAILED, stat; 9297 int rval; 9298 9299 if (!ddb_entry) 9300 return ret; 9301 9302 ret = iscsi_block_scsi_eh(cmd); 9303 if (ret) 9304 return ret; 9305 ret = FAILED; 9306 9307 ql4_printk(KERN_INFO, ha, 9308 "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no, 9309 cmd->device->channel, cmd->device->id, cmd->device->lun); 9310 9311 DEBUG2(printk(KERN_INFO 9312 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," 9313 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, 9314 cmd, jiffies, cmd->request->timeout / HZ, 9315 ha->dpc_flags, cmd->result, cmd->allowed)); 9316 9317 rval = qla4xxx_isp_check_reg(ha); 9318 if (rval != QLA_SUCCESS) { 9319 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9320 return FAILED; 9321 } 9322 9323 /* FIXME: wait for hba to go online */ 9324 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); 9325 if (stat != QLA_SUCCESS) { 9326 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat); 9327 goto eh_dev_reset_done; 9328 } 9329 9330 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9331 cmd->device)) { 9332 ql4_printk(KERN_INFO, ha, 9333 "DEVICE RESET FAILED - waiting for " 9334 "commands.\n"); 9335 goto eh_dev_reset_done; 9336 } 9337 9338 /* Send marker. */ 9339 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9340 MM_LUN_RESET) != QLA_SUCCESS) 9341 goto eh_dev_reset_done; 9342 9343 ql4_printk(KERN_INFO, ha, 9344 "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n", 9345 ha->host_no, cmd->device->channel, cmd->device->id, 9346 cmd->device->lun); 9347 9348 ret = SUCCESS; 9349 9350eh_dev_reset_done: 9351 9352 return ret; 9353} 9354 9355/** 9356 * qla4xxx_eh_target_reset - callback for target reset. 9357 * @cmd: Pointer to Linux's SCSI command structure 9358 * 9359 * This routine is called by the Linux OS to reset the target. 9360 **/ 9361static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) 9362{ 9363 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9364 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9365 int stat, ret; 9366 int rval; 9367 9368 if (!ddb_entry) 9369 return FAILED; 9370 9371 ret = iscsi_block_scsi_eh(cmd); 9372 if (ret) 9373 return ret; 9374 9375 starget_printk(KERN_INFO, scsi_target(cmd->device), 9376 "WARM TARGET RESET ISSUED.\n"); 9377 9378 DEBUG2(printk(KERN_INFO 9379 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " 9380 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", 9381 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, 9382 ha->dpc_flags, cmd->result, cmd->allowed)); 9383 9384 rval = qla4xxx_isp_check_reg(ha); 9385 if (rval != QLA_SUCCESS) { 9386 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9387 return FAILED; 9388 } 9389 9390 stat = qla4xxx_reset_target(ha, ddb_entry); 9391 if (stat != QLA_SUCCESS) { 9392 starget_printk(KERN_INFO, scsi_target(cmd->device), 9393 "WARM TARGET RESET FAILED.\n"); 9394 return FAILED; 9395 } 9396 9397 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9398 NULL)) { 9399 starget_printk(KERN_INFO, scsi_target(cmd->device), 9400 "WARM TARGET DEVICE RESET FAILED - " 9401 "waiting for commands.\n"); 9402 return FAILED; 9403 } 9404 9405 /* Send marker. */ 9406 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9407 MM_TGT_WARM_RESET) != QLA_SUCCESS) { 9408 starget_printk(KERN_INFO, scsi_target(cmd->device), 9409 "WARM TARGET DEVICE RESET FAILED - " 9410 "marker iocb failed.\n"); 9411 return FAILED; 9412 } 9413 9414 starget_printk(KERN_INFO, scsi_target(cmd->device), 9415 "WARM TARGET RESET SUCCEEDED.\n"); 9416 return SUCCESS; 9417} 9418 9419/** 9420 * qla4xxx_is_eh_active - check if error handler is running 9421 * @shost: Pointer to SCSI Host struct 9422 * 9423 * This routine finds that if reset host is called in EH 9424 * scenario or from some application like sg_reset 9425 **/ 9426static int qla4xxx_is_eh_active(struct Scsi_Host *shost) 9427{ 9428 if (shost->shost_state == SHOST_RECOVERY) 9429 return 1; 9430 return 0; 9431} 9432 9433/** 9434 * qla4xxx_eh_host_reset - kernel callback 9435 * @cmd: Pointer to Linux's SCSI command structure 9436 * 9437 * This routine is invoked by the Linux kernel to perform fatal error 9438 * recovery on the specified adapter. 9439 **/ 9440static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) 9441{ 9442 int return_status = FAILED; 9443 struct scsi_qla_host *ha; 9444 int rval; 9445 9446 ha = to_qla_host(cmd->device->host); 9447 9448 rval = qla4xxx_isp_check_reg(ha); 9449 if (rval != QLA_SUCCESS) { 9450 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9451 return FAILED; 9452 } 9453 9454 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) 9455 qla4_83xx_set_idc_dontreset(ha); 9456 9457 /* 9458 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other 9459 * protocol drivers, we should not set device_state to NEED_RESET 9460 */ 9461 if (ql4xdontresethba || 9462 ((is_qla8032(ha) || is_qla8042(ha)) && 9463 qla4_83xx_idc_dontreset(ha))) { 9464 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 9465 ha->host_no, __func__)); 9466 9467 /* Clear outstanding srb in queues */ 9468 if (qla4xxx_is_eh_active(cmd->device->host)) 9469 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16); 9470 9471 return FAILED; 9472 } 9473 9474 ql4_printk(KERN_INFO, ha, 9475 "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no, 9476 cmd->device->channel, cmd->device->id, cmd->device->lun); 9477 9478 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { 9479 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter " 9480 "DEAD.\n", ha->host_no, cmd->device->channel, 9481 __func__)); 9482 9483 return FAILED; 9484 } 9485 9486 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9487 if (is_qla80XX(ha)) 9488 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 9489 else 9490 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9491 } 9492 9493 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS) 9494 return_status = SUCCESS; 9495 9496 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n", 9497 return_status == FAILED ? "FAILED" : "SUCCEEDED"); 9498 9499 return return_status; 9500} 9501 9502static int qla4xxx_context_reset(struct scsi_qla_host *ha) 9503{ 9504 uint32_t mbox_cmd[MBOX_REG_COUNT]; 9505 uint32_t mbox_sts[MBOX_REG_COUNT]; 9506 struct addr_ctrl_blk_def *acb = NULL; 9507 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def); 9508 int rval = QLA_SUCCESS; 9509 dma_addr_t acb_dma; 9510 9511 acb = dma_alloc_coherent(&ha->pdev->dev, 9512 sizeof(struct addr_ctrl_blk_def), 9513 &acb_dma, GFP_KERNEL); 9514 if (!acb) { 9515 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", 9516 __func__); 9517 rval = -ENOMEM; 9518 goto exit_port_reset; 9519 } 9520 9521 memset(acb, 0, acb_len); 9522 9523 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len); 9524 if (rval != QLA_SUCCESS) { 9525 rval = -EIO; 9526 goto exit_free_acb; 9527 } 9528 9529 rval = qla4xxx_disable_acb(ha); 9530 if (rval != QLA_SUCCESS) { 9531 rval = -EIO; 9532 goto exit_free_acb; 9533 } 9534 9535 wait_for_completion_timeout(&ha->disable_acb_comp, 9536 DISABLE_ACB_TOV * HZ); 9537 9538 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); 9539 if (rval != QLA_SUCCESS) { 9540 rval = -EIO; 9541 goto exit_free_acb; 9542 } 9543 9544exit_free_acb: 9545 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def), 9546 acb, acb_dma); 9547exit_port_reset: 9548 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__, 9549 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); 9550 return rval; 9551} 9552 9553static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) 9554{ 9555 struct scsi_qla_host *ha = to_qla_host(shost); 9556 int rval = QLA_SUCCESS; 9557 uint32_t idc_ctrl; 9558 9559 if (ql4xdontresethba) { 9560 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n", 9561 __func__)); 9562 rval = -EPERM; 9563 goto exit_host_reset; 9564 } 9565 9566 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 9567 goto recover_adapter; 9568 9569 switch (reset_type) { 9570 case SCSI_ADAPTER_RESET: 9571 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9572 break; 9573 case SCSI_FIRMWARE_RESET: 9574 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9575 if (is_qla80XX(ha)) 9576 /* set firmware context reset */ 9577 set_bit(DPC_RESET_HA_FW_CONTEXT, 9578 &ha->dpc_flags); 9579 else { 9580 rval = qla4xxx_context_reset(ha); 9581 goto exit_host_reset; 9582 } 9583 } 9584 break; 9585 } 9586 9587recover_adapter: 9588 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if 9589 * reset is issued by application */ 9590 if ((is_qla8032(ha) || is_qla8042(ha)) && 9591 test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9592 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); 9593 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, 9594 (idc_ctrl | GRACEFUL_RESET_BIT1)); 9595 } 9596 9597 rval = qla4xxx_recover_adapter(ha); 9598 if (rval != QLA_SUCCESS) { 9599 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n", 9600 __func__)); 9601 rval = -EIO; 9602 } 9603 9604exit_host_reset: 9605 return rval; 9606} 9607 9608/* PCI AER driver recovers from all correctable errors w/o 9609 * driver intervention. For uncorrectable errors PCI AER 9610 * driver calls the following device driver's callbacks 9611 * 9612 * - Fatal Errors - link_reset 9613 * - Non-Fatal Errors - driver's error_detected() which 9614 * returns CAN_RECOVER, NEED_RESET or DISCONNECT. 9615 * 9616 * PCI AER driver calls 9617 * CAN_RECOVER - driver's mmio_enabled(), mmio_enabled() 9618 * returns RECOVERED or NEED_RESET if fw_hung 9619 * NEED_RESET - driver's slot_reset() 9620 * DISCONNECT - device is dead & cannot recover 9621 * RECOVERED - driver's resume() 9622 */ 9623static pci_ers_result_t 9624qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9625{ 9626 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9627 9628 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n", 9629 ha->host_no, __func__, state); 9630 9631 if (!is_aer_supported(ha)) 9632 return PCI_ERS_RESULT_NONE; 9633 9634 switch (state) { 9635 case pci_channel_io_normal: 9636 clear_bit(AF_EEH_BUSY, &ha->flags); 9637 return PCI_ERS_RESULT_CAN_RECOVER; 9638 case pci_channel_io_frozen: 9639 set_bit(AF_EEH_BUSY, &ha->flags); 9640 qla4xxx_mailbox_premature_completion(ha); 9641 qla4xxx_free_irqs(ha); 9642 pci_disable_device(pdev); 9643 /* Return back all IOs */ 9644 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 9645 return PCI_ERS_RESULT_NEED_RESET; 9646 case pci_channel_io_perm_failure: 9647 set_bit(AF_EEH_BUSY, &ha->flags); 9648 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags); 9649 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 9650 return PCI_ERS_RESULT_DISCONNECT; 9651 } 9652 return PCI_ERS_RESULT_NEED_RESET; 9653} 9654 9655/** 9656 * qla4xxx_pci_mmio_enabled() gets called if 9657 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER 9658 * and read/write to the device still works. 9659 * @pdev: PCI device pointer 9660 **/ 9661static pci_ers_result_t 9662qla4xxx_pci_mmio_enabled(struct pci_dev *pdev) 9663{ 9664 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9665 9666 if (!is_aer_supported(ha)) 9667 return PCI_ERS_RESULT_NONE; 9668 9669 return PCI_ERS_RESULT_RECOVERED; 9670} 9671 9672static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) 9673{ 9674 uint32_t rval = QLA_ERROR; 9675 int fn; 9676 struct pci_dev *other_pdev = NULL; 9677 9678 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__); 9679 9680 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9681 9682 if (test_bit(AF_ONLINE, &ha->flags)) { 9683 clear_bit(AF_ONLINE, &ha->flags); 9684 clear_bit(AF_LINK_UP, &ha->flags); 9685 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 9686 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 9687 } 9688 9689 fn = PCI_FUNC(ha->pdev->devfn); 9690 if (is_qla8022(ha)) { 9691 while (fn > 0) { 9692 fn--; 9693 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n", 9694 ha->host_no, __func__, fn); 9695 /* Get the pci device given the domain, bus, 9696 * slot/function number */ 9697 other_pdev = pci_get_domain_bus_and_slot( 9698 pci_domain_nr(ha->pdev->bus), 9699 ha->pdev->bus->number, 9700 PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 9701 fn)); 9702 9703 if (!other_pdev) 9704 continue; 9705 9706 if (atomic_read(&other_pdev->enable_cnt)) { 9707 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n", 9708 ha->host_no, __func__, fn); 9709 pci_dev_put(other_pdev); 9710 break; 9711 } 9712 pci_dev_put(other_pdev); 9713 } 9714 } else { 9715 /* this case is meant for ISP83xx/ISP84xx only */ 9716 if (qla4_83xx_can_perform_reset(ha)) { 9717 /* reset fn as iSCSI is going to perform the reset */ 9718 fn = 0; 9719 } 9720 } 9721 9722 /* The first function on the card, the reset owner will 9723 * start & initialize the firmware. The other functions 9724 * on the card will reset the firmware context 9725 */ 9726 if (!fn) { 9727 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset " 9728 "0x%x is the owner\n", ha->host_no, __func__, 9729 ha->pdev->devfn); 9730 9731 ha->isp_ops->idc_lock(ha); 9732 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9733 QLA8XXX_DEV_COLD); 9734 ha->isp_ops->idc_unlock(ha); 9735 9736 rval = qla4_8xxx_update_idc_reg(ha); 9737 if (rval == QLA_ERROR) { 9738 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n", 9739 ha->host_no, __func__); 9740 ha->isp_ops->idc_lock(ha); 9741 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9742 QLA8XXX_DEV_FAILED); 9743 ha->isp_ops->idc_unlock(ha); 9744 goto exit_error_recovery; 9745 } 9746 9747 clear_bit(AF_FW_RECOVERY, &ha->flags); 9748 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9749 9750 if (rval != QLA_SUCCESS) { 9751 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9752 "FAILED\n", ha->host_no, __func__); 9753 qla4xxx_free_irqs(ha); 9754 ha->isp_ops->idc_lock(ha); 9755 qla4_8xxx_clear_drv_active(ha); 9756 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9757 QLA8XXX_DEV_FAILED); 9758 ha->isp_ops->idc_unlock(ha); 9759 } else { 9760 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9761 "READY\n", ha->host_no, __func__); 9762 ha->isp_ops->idc_lock(ha); 9763 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9764 QLA8XXX_DEV_READY); 9765 /* Clear driver state register */ 9766 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0); 9767 qla4_8xxx_set_drv_active(ha); 9768 ha->isp_ops->idc_unlock(ha); 9769 ha->isp_ops->enable_intrs(ha); 9770 } 9771 } else { 9772 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " 9773 "the reset owner\n", ha->host_no, __func__, 9774 ha->pdev->devfn); 9775 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) == 9776 QLA8XXX_DEV_READY)) { 9777 clear_bit(AF_FW_RECOVERY, &ha->flags); 9778 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9779 if (rval == QLA_SUCCESS) 9780 ha->isp_ops->enable_intrs(ha); 9781 else 9782 qla4xxx_free_irqs(ha); 9783 9784 ha->isp_ops->idc_lock(ha); 9785 qla4_8xxx_set_drv_active(ha); 9786 ha->isp_ops->idc_unlock(ha); 9787 } 9788 } 9789exit_error_recovery: 9790 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9791 return rval; 9792} 9793 9794static pci_ers_result_t 9795qla4xxx_pci_slot_reset(struct pci_dev *pdev) 9796{ 9797 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 9798 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9799 int rc; 9800 9801 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n", 9802 ha->host_no, __func__); 9803 9804 if (!is_aer_supported(ha)) 9805 return PCI_ERS_RESULT_NONE; 9806 9807 /* Restore the saved state of PCIe device - 9808 * BAR registers, PCI Config space, PCIX, MSI, 9809 * IOV states 9810 */ 9811 pci_restore_state(pdev); 9812 9813 /* pci_restore_state() clears the saved_state flag of the device 9814 * save restored state which resets saved_state flag 9815 */ 9816 pci_save_state(pdev); 9817 9818 /* Initialize device or resume if in suspended state */ 9819 rc = pci_enable_device(pdev); 9820 if (rc) { 9821 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable " 9822 "device after reset\n", ha->host_no, __func__); 9823 goto exit_slot_reset; 9824 } 9825 9826 ha->isp_ops->disable_intrs(ha); 9827 9828 if (is_qla80XX(ha)) { 9829 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { 9830 ret = PCI_ERS_RESULT_RECOVERED; 9831 goto exit_slot_reset; 9832 } else 9833 goto exit_slot_reset; 9834 } 9835 9836exit_slot_reset: 9837 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n" 9838 "device after reset\n", ha->host_no, __func__, ret); 9839 return ret; 9840} 9841 9842static void 9843qla4xxx_pci_resume(struct pci_dev *pdev) 9844{ 9845 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9846 int ret; 9847 9848 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n", 9849 ha->host_no, __func__); 9850 9851 ret = qla4xxx_wait_for_hba_online(ha); 9852 if (ret != QLA_SUCCESS) { 9853 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to " 9854 "resume I/O from slot/link_reset\n", ha->host_no, 9855 __func__); 9856 } 9857 9858 clear_bit(AF_EEH_BUSY, &ha->flags); 9859} 9860 9861static const struct pci_error_handlers qla4xxx_err_handler = { 9862 .error_detected = qla4xxx_pci_error_detected, 9863 .mmio_enabled = qla4xxx_pci_mmio_enabled, 9864 .slot_reset = qla4xxx_pci_slot_reset, 9865 .resume = qla4xxx_pci_resume, 9866}; 9867 9868static struct pci_device_id qla4xxx_pci_tbl[] = { 9869 { 9870 .vendor = PCI_VENDOR_ID_QLOGIC, 9871 .device = PCI_DEVICE_ID_QLOGIC_ISP4010, 9872 .subvendor = PCI_ANY_ID, 9873 .subdevice = PCI_ANY_ID, 9874 }, 9875 { 9876 .vendor = PCI_VENDOR_ID_QLOGIC, 9877 .device = PCI_DEVICE_ID_QLOGIC_ISP4022, 9878 .subvendor = PCI_ANY_ID, 9879 .subdevice = PCI_ANY_ID, 9880 }, 9881 { 9882 .vendor = PCI_VENDOR_ID_QLOGIC, 9883 .device = PCI_DEVICE_ID_QLOGIC_ISP4032, 9884 .subvendor = PCI_ANY_ID, 9885 .subdevice = PCI_ANY_ID, 9886 }, 9887 { 9888 .vendor = PCI_VENDOR_ID_QLOGIC, 9889 .device = PCI_DEVICE_ID_QLOGIC_ISP8022, 9890 .subvendor = PCI_ANY_ID, 9891 .subdevice = PCI_ANY_ID, 9892 }, 9893 { 9894 .vendor = PCI_VENDOR_ID_QLOGIC, 9895 .device = PCI_DEVICE_ID_QLOGIC_ISP8324, 9896 .subvendor = PCI_ANY_ID, 9897 .subdevice = PCI_ANY_ID, 9898 }, 9899 { 9900 .vendor = PCI_VENDOR_ID_QLOGIC, 9901 .device = PCI_DEVICE_ID_QLOGIC_ISP8042, 9902 .subvendor = PCI_ANY_ID, 9903 .subdevice = PCI_ANY_ID, 9904 }, 9905 {0, 0}, 9906}; 9907MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 9908 9909static struct pci_driver qla4xxx_pci_driver = { 9910 .name = DRIVER_NAME, 9911 .id_table = qla4xxx_pci_tbl, 9912 .probe = qla4xxx_probe_adapter, 9913 .remove = qla4xxx_remove_adapter, 9914 .err_handler = &qla4xxx_err_handler, 9915}; 9916 9917static int __init qla4xxx_module_init(void) 9918{ 9919 int ret; 9920 9921 if (ql4xqfulltracking) 9922 qla4xxx_driver_template.track_queue_depth = 1; 9923 9924 /* Allocate cache for SRBs. */ 9925 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, 9926 SLAB_HWCACHE_ALIGN, NULL); 9927 if (srb_cachep == NULL) { 9928 printk(KERN_ERR 9929 "%s: Unable to allocate SRB cache..." 9930 "Failing load!\n", DRIVER_NAME); 9931 ret = -ENOMEM; 9932 goto no_srp_cache; 9933 } 9934 9935 /* Derive version string. */ 9936 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION); 9937 if (ql4xextended_error_logging) 9938 strcat(qla4xxx_version_str, "-debug"); 9939 9940 qla4xxx_scsi_transport = 9941 iscsi_register_transport(&qla4xxx_iscsi_transport); 9942 if (!qla4xxx_scsi_transport){ 9943 ret = -ENODEV; 9944 goto release_srb_cache; 9945 } 9946 9947 ret = pci_register_driver(&qla4xxx_pci_driver); 9948 if (ret) 9949 goto unregister_transport; 9950 9951 printk(KERN_INFO "QLogic iSCSI HBA Driver\n"); 9952 return 0; 9953 9954unregister_transport: 9955 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9956release_srb_cache: 9957 kmem_cache_destroy(srb_cachep); 9958no_srp_cache: 9959 return ret; 9960} 9961 9962static void __exit qla4xxx_module_exit(void) 9963{ 9964 pci_unregister_driver(&qla4xxx_pci_driver); 9965 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9966 kmem_cache_destroy(srb_cachep); 9967} 9968 9969module_init(qla4xxx_module_init); 9970module_exit(qla4xxx_module_exit); 9971 9972MODULE_AUTHOR("QLogic Corporation"); 9973MODULE_DESCRIPTION("QLogic iSCSI HBA Driver"); 9974MODULE_LICENSE("GPL"); 9975MODULE_VERSION(QLA4XXX_DRIVER_VERSION); 9976