1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7
8 #include <linux/kthread.h>
9 #include <linux/vmalloc.h>
10 #include <linux/delay.h>
11 #include <linux/bsg-lib.h>
12
qla2xxx_free_fcport_work(struct work_struct *work)13 static void qla2xxx_free_fcport_work(struct work_struct *work)
14 {
15 struct fc_port *fcport = container_of(work, typeof(*fcport),
16 free_work);
17
18 qla2x00_free_fcport(fcport);
19 }
20
21 /* BSG support for ELS/CT pass through */
qla2x00_bsg_job_done(srb_t *sp, int res)22 void qla2x00_bsg_job_done(srb_t *sp, int res)
23 {
24 struct bsg_job *bsg_job = sp->u.bsg_job;
25 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
26
27 sp->free(sp);
28
29 bsg_reply->result = res;
30 bsg_job_done(bsg_job, bsg_reply->result,
31 bsg_reply->reply_payload_rcv_len);
32 }
33
qla2x00_bsg_sp_free(srb_t *sp)34 void qla2x00_bsg_sp_free(srb_t *sp)
35 {
36 struct qla_hw_data *ha = sp->vha->hw;
37 struct bsg_job *bsg_job = sp->u.bsg_job;
38 struct fc_bsg_request *bsg_request = bsg_job->request;
39 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
40
41 if (sp->type == SRB_FXIOCB_BCMD) {
42 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
43 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
44
45 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
46 dma_unmap_sg(&ha->pdev->dev,
47 bsg_job->request_payload.sg_list,
48 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
49
50 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
51 dma_unmap_sg(&ha->pdev->dev,
52 bsg_job->reply_payload.sg_list,
53 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
54 } else {
55 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
56 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
57
58 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
59 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
60 }
61
62 if (sp->type == SRB_CT_CMD ||
63 sp->type == SRB_FXIOCB_BCMD ||
64 sp->type == SRB_ELS_CMD_HST) {
65 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
66 queue_work(ha->wq, &sp->fcport->free_work);
67 }
68
69 qla2x00_rel_sp(sp);
70 }
71
72 int
qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)73 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
74 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
75 {
76 int i, ret, num_valid;
77 uint8_t *bcode;
78 struct qla_fcp_prio_entry *pri_entry;
79 uint32_t *bcode_val_ptr, bcode_val;
80
81 ret = 1;
82 num_valid = 0;
83 bcode = (uint8_t *)pri_cfg;
84 bcode_val_ptr = (uint32_t *)pri_cfg;
85 bcode_val = (uint32_t)(*bcode_val_ptr);
86
87 if (bcode_val == 0xFFFFFFFF) {
88 /* No FCP Priority config data in flash */
89 ql_dbg(ql_dbg_user, vha, 0x7051,
90 "No FCP Priority config data.\n");
91 return 0;
92 }
93
94 if (memcmp(bcode, "HQOS", 4)) {
95 /* Invalid FCP priority data header*/
96 ql_dbg(ql_dbg_user, vha, 0x7052,
97 "Invalid FCP Priority data header. bcode=0x%x.\n",
98 bcode_val);
99 return 0;
100 }
101 if (flag != 1)
102 return ret;
103
104 pri_entry = &pri_cfg->entry[0];
105 for (i = 0; i < pri_cfg->num_entries; i++) {
106 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
107 num_valid++;
108 pri_entry++;
109 }
110
111 if (num_valid == 0) {
112 /* No valid FCP priority data entries */
113 ql_dbg(ql_dbg_user, vha, 0x7053,
114 "No valid FCP Priority data entries.\n");
115 ret = 0;
116 } else {
117 /* FCP priority data is valid */
118 ql_dbg(ql_dbg_user, vha, 0x7054,
119 "Valid FCP priority data. num entries = %d.\n",
120 num_valid);
121 }
122
123 return ret;
124 }
125
126 static int
qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)127 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
128 {
129 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
130 struct fc_bsg_request *bsg_request = bsg_job->request;
131 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
132 scsi_qla_host_t *vha = shost_priv(host);
133 struct qla_hw_data *ha = vha->hw;
134 int ret = 0;
135 uint32_t len;
136 uint32_t oper;
137
138 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
139 ret = -EINVAL;
140 goto exit_fcp_prio_cfg;
141 }
142
143 /* Get the sub command */
144 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
145
146 /* Only set config is allowed if config memory is not allocated */
147 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
148 ret = -EINVAL;
149 goto exit_fcp_prio_cfg;
150 }
151 switch (oper) {
152 case QLFC_FCP_PRIO_DISABLE:
153 if (ha->flags.fcp_prio_enabled) {
154 ha->flags.fcp_prio_enabled = 0;
155 ha->fcp_prio_cfg->attributes &=
156 ~FCP_PRIO_ATTR_ENABLE;
157 qla24xx_update_all_fcp_prio(vha);
158 bsg_reply->result = DID_OK;
159 } else {
160 ret = -EINVAL;
161 bsg_reply->result = (DID_ERROR << 16);
162 goto exit_fcp_prio_cfg;
163 }
164 break;
165
166 case QLFC_FCP_PRIO_ENABLE:
167 if (!ha->flags.fcp_prio_enabled) {
168 if (ha->fcp_prio_cfg) {
169 ha->flags.fcp_prio_enabled = 1;
170 ha->fcp_prio_cfg->attributes |=
171 FCP_PRIO_ATTR_ENABLE;
172 qla24xx_update_all_fcp_prio(vha);
173 bsg_reply->result = DID_OK;
174 } else {
175 ret = -EINVAL;
176 bsg_reply->result = (DID_ERROR << 16);
177 goto exit_fcp_prio_cfg;
178 }
179 }
180 break;
181
182 case QLFC_FCP_PRIO_GET_CONFIG:
183 len = bsg_job->reply_payload.payload_len;
184 if (!len || len > FCP_PRIO_CFG_SIZE) {
185 ret = -EINVAL;
186 bsg_reply->result = (DID_ERROR << 16);
187 goto exit_fcp_prio_cfg;
188 }
189
190 bsg_reply->result = DID_OK;
191 bsg_reply->reply_payload_rcv_len =
192 sg_copy_from_buffer(
193 bsg_job->reply_payload.sg_list,
194 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
195 len);
196
197 break;
198
199 case QLFC_FCP_PRIO_SET_CONFIG:
200 len = bsg_job->request_payload.payload_len;
201 if (!len || len > FCP_PRIO_CFG_SIZE) {
202 bsg_reply->result = (DID_ERROR << 16);
203 ret = -EINVAL;
204 goto exit_fcp_prio_cfg;
205 }
206
207 if (!ha->fcp_prio_cfg) {
208 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
209 if (!ha->fcp_prio_cfg) {
210 ql_log(ql_log_warn, vha, 0x7050,
211 "Unable to allocate memory for fcp prio "
212 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
213 bsg_reply->result = (DID_ERROR << 16);
214 ret = -ENOMEM;
215 goto exit_fcp_prio_cfg;
216 }
217 }
218
219 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
220 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
221 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
222 FCP_PRIO_CFG_SIZE);
223
224 /* validate fcp priority data */
225
226 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
227 bsg_reply->result = (DID_ERROR << 16);
228 ret = -EINVAL;
229 /* If buffer was invalidatic int
230 * fcp_prio_cfg is of no use
231 */
232 vfree(ha->fcp_prio_cfg);
233 ha->fcp_prio_cfg = NULL;
234 goto exit_fcp_prio_cfg;
235 }
236
237 ha->flags.fcp_prio_enabled = 0;
238 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
239 ha->flags.fcp_prio_enabled = 1;
240 qla24xx_update_all_fcp_prio(vha);
241 bsg_reply->result = DID_OK;
242 break;
243 default:
244 ret = -EINVAL;
245 break;
246 }
247 exit_fcp_prio_cfg:
248 if (!ret)
249 bsg_job_done(bsg_job, bsg_reply->result,
250 bsg_reply->reply_payload_rcv_len);
251 return ret;
252 }
253
254 static int
qla2x00_process_els(struct bsg_job *bsg_job)255 qla2x00_process_els(struct bsg_job *bsg_job)
256 {
257 struct fc_bsg_request *bsg_request = bsg_job->request;
258 struct fc_rport *rport;
259 fc_port_t *fcport = NULL;
260 struct Scsi_Host *host;
261 scsi_qla_host_t *vha;
262 struct qla_hw_data *ha;
263 srb_t *sp;
264 const char *type;
265 int req_sg_cnt, rsp_sg_cnt;
266 int rval = (DID_ERROR << 16);
267 uint16_t nextlid = 0;
268
269 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
270 rport = fc_bsg_to_rport(bsg_job);
271 if (!rport) {
272 rval = -ENOMEM;
273 goto done;
274 }
275 fcport = *(fc_port_t **) rport->dd_data;
276 host = rport_to_shost(rport);
277 vha = shost_priv(host);
278 ha = vha->hw;
279 type = "FC_BSG_RPT_ELS";
280 } else {
281 host = fc_bsg_to_shost(bsg_job);
282 vha = shost_priv(host);
283 ha = vha->hw;
284 type = "FC_BSG_HST_ELS_NOLOGIN";
285 }
286
287 if (!vha->flags.online) {
288 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
289 rval = -EIO;
290 goto done;
291 }
292
293 /* pass through is supported only for ISP 4Gb or higher */
294 if (!IS_FWI2_CAPABLE(ha)) {
295 ql_dbg(ql_dbg_user, vha, 0x7001,
296 "ELS passthru not supported for ISP23xx based adapters.\n");
297 rval = -EPERM;
298 goto done;
299 }
300
301 /* Multiple SG's are not supported for ELS requests */
302 if (bsg_job->request_payload.sg_cnt > 1 ||
303 bsg_job->reply_payload.sg_cnt > 1) {
304 ql_dbg(ql_dbg_user, vha, 0x7002,
305 "Multiple SG's are not supported for ELS requests, "
306 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
307 bsg_job->request_payload.sg_cnt,
308 bsg_job->reply_payload.sg_cnt);
309 rval = -EPERM;
310 goto done;
311 }
312
313 /* ELS request for rport */
314 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
315 /* make sure the rport is logged in,
316 * if not perform fabric login
317 */
318 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
319 ql_dbg(ql_dbg_user, vha, 0x7003,
320 "Failed to login port %06X for ELS passthru.\n",
321 fcport->d_id.b24);
322 rval = -EIO;
323 goto done;
324 }
325 } else {
326 /* Allocate a dummy fcport structure, since functions
327 * preparing the IOCB and mailbox command retrieves port
328 * specific information from fcport structure. For Host based
329 * ELS commands there will be no fcport structure allocated
330 */
331 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
332 if (!fcport) {
333 rval = -ENOMEM;
334 goto done;
335 }
336
337 /* Initialize all required fields of fcport */
338 fcport->vha = vha;
339 fcport->d_id.b.al_pa =
340 bsg_request->rqst_data.h_els.port_id[0];
341 fcport->d_id.b.area =
342 bsg_request->rqst_data.h_els.port_id[1];
343 fcport->d_id.b.domain =
344 bsg_request->rqst_data.h_els.port_id[2];
345 fcport->loop_id =
346 (fcport->d_id.b.al_pa == 0xFD) ?
347 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
348 }
349
350 req_sg_cnt =
351 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
352 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
353 if (!req_sg_cnt) {
354 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
355 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
356 rval = -ENOMEM;
357 goto done_free_fcport;
358 }
359
360 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
361 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
362 if (!rsp_sg_cnt) {
363 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
364 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
365 rval = -ENOMEM;
366 goto done_free_fcport;
367 }
368
369 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
370 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
371 ql_log(ql_log_warn, vha, 0x7008,
372 "dma mapping resulted in different sg counts, "
373 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
374 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
375 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
376 rval = -EAGAIN;
377 goto done_unmap_sg;
378 }
379
380 /* Alloc SRB structure */
381 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
382 if (!sp) {
383 rval = -ENOMEM;
384 goto done_unmap_sg;
385 }
386
387 sp->type =
388 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
389 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
390 sp->name =
391 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
392 "bsg_els_rpt" : "bsg_els_hst");
393 sp->u.bsg_job = bsg_job;
394 sp->free = qla2x00_bsg_sp_free;
395 sp->done = qla2x00_bsg_job_done;
396
397 ql_dbg(ql_dbg_user, vha, 0x700a,
398 "bsg rqst type: %s els type: %x - loop-id=%x "
399 "portid=%-2x%02x%02x.\n", type,
400 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
401 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
402
403 rval = qla2x00_start_sp(sp);
404 if (rval != QLA_SUCCESS) {
405 ql_log(ql_log_warn, vha, 0x700e,
406 "qla2x00_start_sp failed = %d\n", rval);
407 qla2x00_rel_sp(sp);
408 rval = -EIO;
409 goto done_unmap_sg;
410 }
411 return rval;
412
413 done_unmap_sg:
414 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
415 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
416 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
417 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
418 goto done_free_fcport;
419
420 done_free_fcport:
421 if (bsg_request->msgcode != FC_BSG_RPT_ELS)
422 qla2x00_free_fcport(fcport);
423 done:
424 return rval;
425 }
426
427 static inline uint16_t
qla24xx_calc_ct_iocbs(uint16_t dsds)428 qla24xx_calc_ct_iocbs(uint16_t dsds)
429 {
430 uint16_t iocbs;
431
432 iocbs = 1;
433 if (dsds > 2) {
434 iocbs += (dsds - 2) / 5;
435 if ((dsds - 2) % 5)
436 iocbs++;
437 }
438 return iocbs;
439 }
440
441 static int
qla2x00_process_ct(struct bsg_job *bsg_job)442 qla2x00_process_ct(struct bsg_job *bsg_job)
443 {
444 srb_t *sp;
445 struct fc_bsg_request *bsg_request = bsg_job->request;
446 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
447 scsi_qla_host_t *vha = shost_priv(host);
448 struct qla_hw_data *ha = vha->hw;
449 int rval = (DID_ERROR << 16);
450 int req_sg_cnt, rsp_sg_cnt;
451 uint16_t loop_id;
452 struct fc_port *fcport;
453 char *type = "FC_BSG_HST_CT";
454
455 req_sg_cnt =
456 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
457 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
458 if (!req_sg_cnt) {
459 ql_log(ql_log_warn, vha, 0x700f,
460 "dma_map_sg return %d for request\n", req_sg_cnt);
461 rval = -ENOMEM;
462 goto done;
463 }
464
465 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
466 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
467 if (!rsp_sg_cnt) {
468 ql_log(ql_log_warn, vha, 0x7010,
469 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
470 rval = -ENOMEM;
471 goto done;
472 }
473
474 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
475 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
476 ql_log(ql_log_warn, vha, 0x7011,
477 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
478 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
479 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
480 rval = -EAGAIN;
481 goto done_unmap_sg;
482 }
483
484 if (!vha->flags.online) {
485 ql_log(ql_log_warn, vha, 0x7012,
486 "Host is not online.\n");
487 rval = -EIO;
488 goto done_unmap_sg;
489 }
490
491 loop_id =
492 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
493 >> 24;
494 switch (loop_id) {
495 case 0xFC:
496 loop_id = NPH_SNS;
497 break;
498 case 0xFA:
499 loop_id = vha->mgmt_svr_loop_id;
500 break;
501 default:
502 ql_dbg(ql_dbg_user, vha, 0x7013,
503 "Unknown loop id: %x.\n", loop_id);
504 rval = -EINVAL;
505 goto done_unmap_sg;
506 }
507
508 /* Allocate a dummy fcport structure, since functions preparing the
509 * IOCB and mailbox command retrieves port specific information
510 * from fcport structure. For Host based ELS commands there will be
511 * no fcport structure allocated
512 */
513 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
514 if (!fcport) {
515 ql_log(ql_log_warn, vha, 0x7014,
516 "Failed to allocate fcport.\n");
517 rval = -ENOMEM;
518 goto done_unmap_sg;
519 }
520
521 /* Initialize all required fields of fcport */
522 fcport->vha = vha;
523 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
524 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
525 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
526 fcport->loop_id = loop_id;
527
528 /* Alloc SRB structure */
529 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
530 if (!sp) {
531 ql_log(ql_log_warn, vha, 0x7015,
532 "qla2x00_get_sp failed.\n");
533 rval = -ENOMEM;
534 goto done_free_fcport;
535 }
536
537 sp->type = SRB_CT_CMD;
538 sp->name = "bsg_ct";
539 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
540 sp->u.bsg_job = bsg_job;
541 sp->free = qla2x00_bsg_sp_free;
542 sp->done = qla2x00_bsg_job_done;
543
544 ql_dbg(ql_dbg_user, vha, 0x7016,
545 "bsg rqst type: %s else type: %x - "
546 "loop-id=%x portid=%02x%02x%02x.\n", type,
547 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
548 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
549 fcport->d_id.b.al_pa);
550
551 rval = qla2x00_start_sp(sp);
552 if (rval != QLA_SUCCESS) {
553 ql_log(ql_log_warn, vha, 0x7017,
554 "qla2x00_start_sp failed=%d.\n", rval);
555 qla2x00_rel_sp(sp);
556 rval = -EIO;
557 goto done_free_fcport;
558 }
559 return rval;
560
561 done_free_fcport:
562 qla2x00_free_fcport(fcport);
563 done_unmap_sg:
564 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
565 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
566 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
567 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
568 done:
569 return rval;
570 }
571
572 /* Disable loopback mode */
573 static inline int
qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, int wait, int wait2)574 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
575 int wait, int wait2)
576 {
577 int ret = 0;
578 int rval = 0;
579 uint16_t new_config[4];
580 struct qla_hw_data *ha = vha->hw;
581
582 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
583 goto done_reset_internal;
584
585 memset(new_config, 0 , sizeof(new_config));
586 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
587 ENABLE_INTERNAL_LOOPBACK ||
588 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
589 ENABLE_EXTERNAL_LOOPBACK) {
590 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
591 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
592 (new_config[0] & INTERNAL_LOOPBACK_MASK));
593 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
594
595 ha->notify_dcbx_comp = wait;
596 ha->notify_lb_portup_comp = wait2;
597
598 ret = qla81xx_set_port_config(vha, new_config);
599 if (ret != QLA_SUCCESS) {
600 ql_log(ql_log_warn, vha, 0x7025,
601 "Set port config failed.\n");
602 ha->notify_dcbx_comp = 0;
603 ha->notify_lb_portup_comp = 0;
604 rval = -EINVAL;
605 goto done_reset_internal;
606 }
607
608 /* Wait for DCBX complete event */
609 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
610 (DCBX_COMP_TIMEOUT * HZ))) {
611 ql_dbg(ql_dbg_user, vha, 0x7026,
612 "DCBX completion not received.\n");
613 ha->notify_dcbx_comp = 0;
614 ha->notify_lb_portup_comp = 0;
615 rval = -EINVAL;
616 goto done_reset_internal;
617 } else
618 ql_dbg(ql_dbg_user, vha, 0x7027,
619 "DCBX completion received.\n");
620
621 if (wait2 &&
622 !wait_for_completion_timeout(&ha->lb_portup_comp,
623 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
624 ql_dbg(ql_dbg_user, vha, 0x70c5,
625 "Port up completion not received.\n");
626 ha->notify_lb_portup_comp = 0;
627 rval = -EINVAL;
628 goto done_reset_internal;
629 } else
630 ql_dbg(ql_dbg_user, vha, 0x70c6,
631 "Port up completion received.\n");
632
633 ha->notify_dcbx_comp = 0;
634 ha->notify_lb_portup_comp = 0;
635 }
636 done_reset_internal:
637 return rval;
638 }
639
640 /*
641 * Set the port configuration to enable the internal or external loopback
642 * depending on the loopback mode.
643 */
644 static inline int
qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, uint16_t *new_config, uint16_t mode)645 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
646 uint16_t *new_config, uint16_t mode)
647 {
648 int ret = 0;
649 int rval = 0;
650 unsigned long rem_tmo = 0, current_tmo = 0;
651 struct qla_hw_data *ha = vha->hw;
652
653 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
654 goto done_set_internal;
655
656 if (mode == INTERNAL_LOOPBACK)
657 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
658 else if (mode == EXTERNAL_LOOPBACK)
659 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
660 ql_dbg(ql_dbg_user, vha, 0x70be,
661 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
662
663 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
664
665 ha->notify_dcbx_comp = 1;
666 ret = qla81xx_set_port_config(vha, new_config);
667 if (ret != QLA_SUCCESS) {
668 ql_log(ql_log_warn, vha, 0x7021,
669 "set port config failed.\n");
670 ha->notify_dcbx_comp = 0;
671 rval = -EINVAL;
672 goto done_set_internal;
673 }
674
675 /* Wait for DCBX complete event */
676 current_tmo = DCBX_COMP_TIMEOUT * HZ;
677 while (1) {
678 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
679 current_tmo);
680 if (!ha->idc_extend_tmo || rem_tmo) {
681 ha->idc_extend_tmo = 0;
682 break;
683 }
684 current_tmo = ha->idc_extend_tmo * HZ;
685 ha->idc_extend_tmo = 0;
686 }
687
688 if (!rem_tmo) {
689 ql_dbg(ql_dbg_user, vha, 0x7022,
690 "DCBX completion not received.\n");
691 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
692 /*
693 * If the reset of the loopback mode doesn't work take a FCoE
694 * dump and reset the chip.
695 */
696 if (ret) {
697 qla2xxx_dump_fw(vha);
698 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
699 }
700 rval = -EINVAL;
701 } else {
702 if (ha->flags.idc_compl_status) {
703 ql_dbg(ql_dbg_user, vha, 0x70c3,
704 "Bad status in IDC Completion AEN\n");
705 rval = -EINVAL;
706 ha->flags.idc_compl_status = 0;
707 } else
708 ql_dbg(ql_dbg_user, vha, 0x7023,
709 "DCBX completion received.\n");
710 }
711
712 ha->notify_dcbx_comp = 0;
713 ha->idc_extend_tmo = 0;
714
715 done_set_internal:
716 return rval;
717 }
718
719 static int
qla2x00_process_loopback(struct bsg_job *bsg_job)720 qla2x00_process_loopback(struct bsg_job *bsg_job)
721 {
722 struct fc_bsg_request *bsg_request = bsg_job->request;
723 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
724 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
725 scsi_qla_host_t *vha = shost_priv(host);
726 struct qla_hw_data *ha = vha->hw;
727 int rval;
728 uint8_t command_sent;
729 char *type;
730 struct msg_echo_lb elreq;
731 uint16_t response[MAILBOX_REGISTER_COUNT];
732 uint16_t config[4], new_config[4];
733 uint8_t *fw_sts_ptr;
734 void *req_data = NULL;
735 dma_addr_t req_data_dma;
736 uint32_t req_data_len;
737 uint8_t *rsp_data = NULL;
738 dma_addr_t rsp_data_dma;
739 uint32_t rsp_data_len;
740
741 if (!vha->flags.online) {
742 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
743 return -EIO;
744 }
745
746 memset(&elreq, 0, sizeof(elreq));
747
748 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
749 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
750 DMA_TO_DEVICE);
751
752 if (!elreq.req_sg_cnt) {
753 ql_log(ql_log_warn, vha, 0x701a,
754 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
755 return -ENOMEM;
756 }
757
758 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
759 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
760 DMA_FROM_DEVICE);
761
762 if (!elreq.rsp_sg_cnt) {
763 ql_log(ql_log_warn, vha, 0x701b,
764 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
765 rval = -ENOMEM;
766 goto done_unmap_req_sg;
767 }
768
769 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
770 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
771 ql_log(ql_log_warn, vha, 0x701c,
772 "dma mapping resulted in different sg counts, "
773 "request_sg_cnt: %x dma_request_sg_cnt: %x "
774 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
775 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
776 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
777 rval = -EAGAIN;
778 goto done_unmap_sg;
779 }
780 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
781 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
782 &req_data_dma, GFP_KERNEL);
783 if (!req_data) {
784 ql_log(ql_log_warn, vha, 0x701d,
785 "dma alloc failed for req_data.\n");
786 rval = -ENOMEM;
787 goto done_unmap_sg;
788 }
789
790 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
791 &rsp_data_dma, GFP_KERNEL);
792 if (!rsp_data) {
793 ql_log(ql_log_warn, vha, 0x7004,
794 "dma alloc failed for rsp_data.\n");
795 rval = -ENOMEM;
796 goto done_free_dma_req;
797 }
798
799 /* Copy the request buffer in req_data now */
800 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
801 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
802
803 elreq.send_dma = req_data_dma;
804 elreq.rcv_dma = rsp_data_dma;
805 elreq.transfer_size = req_data_len;
806
807 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
808 elreq.iteration_count =
809 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
810
811 if (atomic_read(&vha->loop_state) == LOOP_READY &&
812 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
813 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
814 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
815 req_data_len == MAX_ELS_FRAME_PAYLOAD &&
816 elreq.options == EXTERNAL_LOOPBACK))) {
817 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
818 ql_dbg(ql_dbg_user, vha, 0x701e,
819 "BSG request type: %s.\n", type);
820 command_sent = INT_DEF_LB_ECHO_CMD;
821 rval = qla2x00_echo_test(vha, &elreq, response);
822 } else {
823 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
824 memset(config, 0, sizeof(config));
825 memset(new_config, 0, sizeof(new_config));
826
827 if (qla81xx_get_port_config(vha, config)) {
828 ql_log(ql_log_warn, vha, 0x701f,
829 "Get port config failed.\n");
830 rval = -EPERM;
831 goto done_free_dma_rsp;
832 }
833
834 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
835 ql_dbg(ql_dbg_user, vha, 0x70c4,
836 "Loopback operation already in "
837 "progress.\n");
838 rval = -EAGAIN;
839 goto done_free_dma_rsp;
840 }
841
842 ql_dbg(ql_dbg_user, vha, 0x70c0,
843 "elreq.options=%04x\n", elreq.options);
844
845 if (elreq.options == EXTERNAL_LOOPBACK)
846 if (IS_QLA8031(ha) || IS_QLA8044(ha))
847 rval = qla81xx_set_loopback_mode(vha,
848 config, new_config, elreq.options);
849 else
850 rval = qla81xx_reset_loopback_mode(vha,
851 config, 1, 0);
852 else
853 rval = qla81xx_set_loopback_mode(vha, config,
854 new_config, elreq.options);
855
856 if (rval) {
857 rval = -EPERM;
858 goto done_free_dma_rsp;
859 }
860
861 type = "FC_BSG_HST_VENDOR_LOOPBACK";
862 ql_dbg(ql_dbg_user, vha, 0x7028,
863 "BSG request type: %s.\n", type);
864
865 command_sent = INT_DEF_LB_LOOPBACK_CMD;
866 rval = qla2x00_loopback_test(vha, &elreq, response);
867
868 if (response[0] == MBS_COMMAND_ERROR &&
869 response[1] == MBS_LB_RESET) {
870 ql_log(ql_log_warn, vha, 0x7029,
871 "MBX command error, Aborting ISP.\n");
872 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
873 qla2xxx_wake_dpc(vha);
874 qla2x00_wait_for_chip_reset(vha);
875 /* Also reset the MPI */
876 if (IS_QLA81XX(ha)) {
877 if (qla81xx_restart_mpi_firmware(vha) !=
878 QLA_SUCCESS) {
879 ql_log(ql_log_warn, vha, 0x702a,
880 "MPI reset failed.\n");
881 }
882 }
883
884 rval = -EIO;
885 goto done_free_dma_rsp;
886 }
887
888 if (new_config[0]) {
889 int ret;
890
891 /* Revert back to original port config
892 * Also clear internal loopback
893 */
894 ret = qla81xx_reset_loopback_mode(vha,
895 new_config, 0, 1);
896 if (ret) {
897 /*
898 * If the reset of the loopback mode
899 * doesn't work take FCoE dump and then
900 * reset the chip.
901 */
902 qla2xxx_dump_fw(vha);
903 set_bit(ISP_ABORT_NEEDED,
904 &vha->dpc_flags);
905 }
906
907 }
908
909 } else {
910 type = "FC_BSG_HST_VENDOR_LOOPBACK";
911 ql_dbg(ql_dbg_user, vha, 0x702b,
912 "BSG request type: %s.\n", type);
913 command_sent = INT_DEF_LB_LOOPBACK_CMD;
914 rval = qla2x00_loopback_test(vha, &elreq, response);
915 }
916 }
917
918 if (rval) {
919 ql_log(ql_log_warn, vha, 0x702c,
920 "Vendor request %s failed.\n", type);
921
922 rval = 0;
923 bsg_reply->result = (DID_ERROR << 16);
924 bsg_reply->reply_payload_rcv_len = 0;
925 } else {
926 ql_dbg(ql_dbg_user, vha, 0x702d,
927 "Vendor request %s completed.\n", type);
928 bsg_reply->result = (DID_OK << 16);
929 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
930 bsg_job->reply_payload.sg_cnt, rsp_data,
931 rsp_data_len);
932 }
933
934 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
935 sizeof(response) + sizeof(uint8_t);
936 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
937 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
938 sizeof(response));
939 fw_sts_ptr += sizeof(response);
940 *fw_sts_ptr = command_sent;
941
942 done_free_dma_rsp:
943 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
944 rsp_data, rsp_data_dma);
945 done_free_dma_req:
946 dma_free_coherent(&ha->pdev->dev, req_data_len,
947 req_data, req_data_dma);
948 done_unmap_sg:
949 dma_unmap_sg(&ha->pdev->dev,
950 bsg_job->reply_payload.sg_list,
951 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
952 done_unmap_req_sg:
953 dma_unmap_sg(&ha->pdev->dev,
954 bsg_job->request_payload.sg_list,
955 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
956 if (!rval)
957 bsg_job_done(bsg_job, bsg_reply->result,
958 bsg_reply->reply_payload_rcv_len);
959 return rval;
960 }
961
962 static int
qla84xx_reset(struct bsg_job *bsg_job)963 qla84xx_reset(struct bsg_job *bsg_job)
964 {
965 struct fc_bsg_request *bsg_request = bsg_job->request;
966 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
967 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
968 scsi_qla_host_t *vha = shost_priv(host);
969 struct qla_hw_data *ha = vha->hw;
970 int rval = 0;
971 uint32_t flag;
972
973 if (!IS_QLA84XX(ha)) {
974 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
975 return -EINVAL;
976 }
977
978 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
979
980 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
981
982 if (rval) {
983 ql_log(ql_log_warn, vha, 0x7030,
984 "Vendor request 84xx reset failed.\n");
985 rval = (DID_ERROR << 16);
986
987 } else {
988 ql_dbg(ql_dbg_user, vha, 0x7031,
989 "Vendor request 84xx reset completed.\n");
990 bsg_reply->result = DID_OK;
991 bsg_job_done(bsg_job, bsg_reply->result,
992 bsg_reply->reply_payload_rcv_len);
993 }
994
995 return rval;
996 }
997
998 static int
qla84xx_updatefw(struct bsg_job *bsg_job)999 qla84xx_updatefw(struct bsg_job *bsg_job)
1000 {
1001 struct fc_bsg_request *bsg_request = bsg_job->request;
1002 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1003 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1004 scsi_qla_host_t *vha = shost_priv(host);
1005 struct qla_hw_data *ha = vha->hw;
1006 struct verify_chip_entry_84xx *mn = NULL;
1007 dma_addr_t mn_dma, fw_dma;
1008 void *fw_buf = NULL;
1009 int rval = 0;
1010 uint32_t sg_cnt;
1011 uint32_t data_len;
1012 uint16_t options;
1013 uint32_t flag;
1014 uint32_t fw_ver;
1015
1016 if (!IS_QLA84XX(ha)) {
1017 ql_dbg(ql_dbg_user, vha, 0x7032,
1018 "Not 84xx, exiting.\n");
1019 return -EINVAL;
1020 }
1021
1022 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1023 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1024 if (!sg_cnt) {
1025 ql_log(ql_log_warn, vha, 0x7033,
1026 "dma_map_sg returned %d for request.\n", sg_cnt);
1027 return -ENOMEM;
1028 }
1029
1030 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1031 ql_log(ql_log_warn, vha, 0x7034,
1032 "DMA mapping resulted in different sg counts, "
1033 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1034 bsg_job->request_payload.sg_cnt, sg_cnt);
1035 rval = -EAGAIN;
1036 goto done_unmap_sg;
1037 }
1038
1039 data_len = bsg_job->request_payload.payload_len;
1040 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1041 &fw_dma, GFP_KERNEL);
1042 if (!fw_buf) {
1043 ql_log(ql_log_warn, vha, 0x7035,
1044 "DMA alloc failed for fw_buf.\n");
1045 rval = -ENOMEM;
1046 goto done_unmap_sg;
1047 }
1048
1049 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1050 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1051
1052 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1053 if (!mn) {
1054 ql_log(ql_log_warn, vha, 0x7036,
1055 "DMA alloc failed for fw buffer.\n");
1056 rval = -ENOMEM;
1057 goto done_free_fw_buf;
1058 }
1059
1060 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1061 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1062
1063 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1064 mn->entry_count = 1;
1065
1066 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1067 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1068 options |= VCO_DIAG_FW;
1069
1070 mn->options = cpu_to_le16(options);
1071 mn->fw_ver = cpu_to_le32(fw_ver);
1072 mn->fw_size = cpu_to_le32(data_len);
1073 mn->fw_seq_size = cpu_to_le32(data_len);
1074 put_unaligned_le64(fw_dma, &mn->dsd.address);
1075 mn->dsd.length = cpu_to_le32(data_len);
1076 mn->data_seg_cnt = cpu_to_le16(1);
1077
1078 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1079
1080 if (rval) {
1081 ql_log(ql_log_warn, vha, 0x7037,
1082 "Vendor request 84xx updatefw failed.\n");
1083
1084 rval = (DID_ERROR << 16);
1085 } else {
1086 ql_dbg(ql_dbg_user, vha, 0x7038,
1087 "Vendor request 84xx updatefw completed.\n");
1088
1089 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1090 bsg_reply->result = DID_OK;
1091 }
1092
1093 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1094
1095 done_free_fw_buf:
1096 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1097
1098 done_unmap_sg:
1099 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1100 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1101
1102 if (!rval)
1103 bsg_job_done(bsg_job, bsg_reply->result,
1104 bsg_reply->reply_payload_rcv_len);
1105 return rval;
1106 }
1107
1108 static int
qla84xx_mgmt_cmd(struct bsg_job *bsg_job)1109 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1110 {
1111 struct fc_bsg_request *bsg_request = bsg_job->request;
1112 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1113 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1114 scsi_qla_host_t *vha = shost_priv(host);
1115 struct qla_hw_data *ha = vha->hw;
1116 struct access_chip_84xx *mn = NULL;
1117 dma_addr_t mn_dma, mgmt_dma;
1118 void *mgmt_b = NULL;
1119 int rval = 0;
1120 struct qla_bsg_a84_mgmt *ql84_mgmt;
1121 uint32_t sg_cnt;
1122 uint32_t data_len = 0;
1123 uint32_t dma_direction = DMA_NONE;
1124
1125 if (!IS_QLA84XX(ha)) {
1126 ql_log(ql_log_warn, vha, 0x703a,
1127 "Not 84xx, exiting.\n");
1128 return -EINVAL;
1129 }
1130
1131 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1132 if (!mn) {
1133 ql_log(ql_log_warn, vha, 0x703c,
1134 "DMA alloc failed for fw buffer.\n");
1135 return -ENOMEM;
1136 }
1137
1138 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1139 mn->entry_count = 1;
1140 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1141 switch (ql84_mgmt->mgmt.cmd) {
1142 case QLA84_MGMT_READ_MEM:
1143 case QLA84_MGMT_GET_INFO:
1144 sg_cnt = dma_map_sg(&ha->pdev->dev,
1145 bsg_job->reply_payload.sg_list,
1146 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1147 if (!sg_cnt) {
1148 ql_log(ql_log_warn, vha, 0x703d,
1149 "dma_map_sg returned %d for reply.\n", sg_cnt);
1150 rval = -ENOMEM;
1151 goto exit_mgmt;
1152 }
1153
1154 dma_direction = DMA_FROM_DEVICE;
1155
1156 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1157 ql_log(ql_log_warn, vha, 0x703e,
1158 "DMA mapping resulted in different sg counts, "
1159 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1160 bsg_job->reply_payload.sg_cnt, sg_cnt);
1161 rval = -EAGAIN;
1162 goto done_unmap_sg;
1163 }
1164
1165 data_len = bsg_job->reply_payload.payload_len;
1166
1167 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1168 &mgmt_dma, GFP_KERNEL);
1169 if (!mgmt_b) {
1170 ql_log(ql_log_warn, vha, 0x703f,
1171 "DMA alloc failed for mgmt_b.\n");
1172 rval = -ENOMEM;
1173 goto done_unmap_sg;
1174 }
1175
1176 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1177 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1178 mn->parameter1 =
1179 cpu_to_le32(
1180 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1181
1182 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1183 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1184 mn->parameter1 =
1185 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1186
1187 mn->parameter2 =
1188 cpu_to_le32(
1189 ql84_mgmt->mgmt.mgmtp.u.info.context);
1190 }
1191 break;
1192
1193 case QLA84_MGMT_WRITE_MEM:
1194 sg_cnt = dma_map_sg(&ha->pdev->dev,
1195 bsg_job->request_payload.sg_list,
1196 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1197
1198 if (!sg_cnt) {
1199 ql_log(ql_log_warn, vha, 0x7040,
1200 "dma_map_sg returned %d.\n", sg_cnt);
1201 rval = -ENOMEM;
1202 goto exit_mgmt;
1203 }
1204
1205 dma_direction = DMA_TO_DEVICE;
1206
1207 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1208 ql_log(ql_log_warn, vha, 0x7041,
1209 "DMA mapping resulted in different sg counts, "
1210 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1211 bsg_job->request_payload.sg_cnt, sg_cnt);
1212 rval = -EAGAIN;
1213 goto done_unmap_sg;
1214 }
1215
1216 data_len = bsg_job->request_payload.payload_len;
1217 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1218 &mgmt_dma, GFP_KERNEL);
1219 if (!mgmt_b) {
1220 ql_log(ql_log_warn, vha, 0x7042,
1221 "DMA alloc failed for mgmt_b.\n");
1222 rval = -ENOMEM;
1223 goto done_unmap_sg;
1224 }
1225
1226 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1227 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1228
1229 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1230 mn->parameter1 =
1231 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1232 break;
1233
1234 case QLA84_MGMT_CHNG_CONFIG:
1235 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1236 mn->parameter1 =
1237 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1238
1239 mn->parameter2 =
1240 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1241
1242 mn->parameter3 =
1243 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1244 break;
1245
1246 default:
1247 rval = -EIO;
1248 goto exit_mgmt;
1249 }
1250
1251 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1252 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1253 mn->dseg_count = cpu_to_le16(1);
1254 put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1255 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1256 }
1257
1258 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1259
1260 if (rval) {
1261 ql_log(ql_log_warn, vha, 0x7043,
1262 "Vendor request 84xx mgmt failed.\n");
1263
1264 rval = (DID_ERROR << 16);
1265
1266 } else {
1267 ql_dbg(ql_dbg_user, vha, 0x7044,
1268 "Vendor request 84xx mgmt completed.\n");
1269
1270 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1271 bsg_reply->result = DID_OK;
1272
1273 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1274 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1275 bsg_reply->reply_payload_rcv_len =
1276 bsg_job->reply_payload.payload_len;
1277
1278 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1279 bsg_job->reply_payload.sg_cnt, mgmt_b,
1280 data_len);
1281 }
1282 }
1283
1284 done_unmap_sg:
1285 if (mgmt_b)
1286 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1287
1288 if (dma_direction == DMA_TO_DEVICE)
1289 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1290 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1291 else if (dma_direction == DMA_FROM_DEVICE)
1292 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1293 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1294
1295 exit_mgmt:
1296 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1297
1298 if (!rval)
1299 bsg_job_done(bsg_job, bsg_reply->result,
1300 bsg_reply->reply_payload_rcv_len);
1301 return rval;
1302 }
1303
1304 static int
qla24xx_iidma(struct bsg_job *bsg_job)1305 qla24xx_iidma(struct bsg_job *bsg_job)
1306 {
1307 struct fc_bsg_request *bsg_request = bsg_job->request;
1308 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1309 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1310 scsi_qla_host_t *vha = shost_priv(host);
1311 int rval = 0;
1312 struct qla_port_param *port_param = NULL;
1313 fc_port_t *fcport = NULL;
1314 int found = 0;
1315 uint16_t mb[MAILBOX_REGISTER_COUNT];
1316 uint8_t *rsp_ptr = NULL;
1317
1318 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1319 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1320 return -EINVAL;
1321 }
1322
1323 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1324 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1325 ql_log(ql_log_warn, vha, 0x7048,
1326 "Invalid destination type.\n");
1327 return -EINVAL;
1328 }
1329
1330 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1331 if (fcport->port_type != FCT_TARGET)
1332 continue;
1333
1334 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1335 fcport->port_name, sizeof(fcport->port_name)))
1336 continue;
1337
1338 found = 1;
1339 break;
1340 }
1341
1342 if (!found) {
1343 ql_log(ql_log_warn, vha, 0x7049,
1344 "Failed to find port.\n");
1345 return -EINVAL;
1346 }
1347
1348 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1349 ql_log(ql_log_warn, vha, 0x704a,
1350 "Port is not online.\n");
1351 return -EINVAL;
1352 }
1353
1354 if (fcport->flags & FCF_LOGIN_NEEDED) {
1355 ql_log(ql_log_warn, vha, 0x704b,
1356 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1357 return -EINVAL;
1358 }
1359
1360 if (port_param->mode)
1361 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1362 port_param->speed, mb);
1363 else
1364 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1365 &port_param->speed, mb);
1366
1367 if (rval) {
1368 ql_log(ql_log_warn, vha, 0x704c,
1369 "iiDMA cmd failed for %8phN -- "
1370 "%04x %x %04x %04x.\n", fcport->port_name,
1371 rval, fcport->fp_speed, mb[0], mb[1]);
1372 rval = (DID_ERROR << 16);
1373 } else {
1374 if (!port_param->mode) {
1375 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1376 sizeof(struct qla_port_param);
1377
1378 rsp_ptr = ((uint8_t *)bsg_reply) +
1379 sizeof(struct fc_bsg_reply);
1380
1381 memcpy(rsp_ptr, port_param,
1382 sizeof(struct qla_port_param));
1383 }
1384
1385 bsg_reply->result = DID_OK;
1386 bsg_job_done(bsg_job, bsg_reply->result,
1387 bsg_reply->reply_payload_rcv_len);
1388 }
1389
1390 return rval;
1391 }
1392
1393 static int
qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, uint8_t is_update)1394 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1395 uint8_t is_update)
1396 {
1397 struct fc_bsg_request *bsg_request = bsg_job->request;
1398 uint32_t start = 0;
1399 int valid = 0;
1400 struct qla_hw_data *ha = vha->hw;
1401
1402 if (unlikely(pci_channel_offline(ha->pdev)))
1403 return -EINVAL;
1404
1405 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1406 if (start > ha->optrom_size) {
1407 ql_log(ql_log_warn, vha, 0x7055,
1408 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1409 return -EINVAL;
1410 }
1411
1412 if (ha->optrom_state != QLA_SWAITING) {
1413 ql_log(ql_log_info, vha, 0x7056,
1414 "optrom_state %d.\n", ha->optrom_state);
1415 return -EBUSY;
1416 }
1417
1418 ha->optrom_region_start = start;
1419 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1420 if (is_update) {
1421 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1422 valid = 1;
1423 else if (start == (ha->flt_region_boot * 4) ||
1424 start == (ha->flt_region_fw * 4))
1425 valid = 1;
1426 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1427 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1428 IS_QLA28XX(ha))
1429 valid = 1;
1430 if (!valid) {
1431 ql_log(ql_log_warn, vha, 0x7058,
1432 "Invalid start region 0x%x/0x%x.\n", start,
1433 bsg_job->request_payload.payload_len);
1434 return -EINVAL;
1435 }
1436
1437 ha->optrom_region_size = start +
1438 bsg_job->request_payload.payload_len > ha->optrom_size ?
1439 ha->optrom_size - start :
1440 bsg_job->request_payload.payload_len;
1441 ha->optrom_state = QLA_SWRITING;
1442 } else {
1443 ha->optrom_region_size = start +
1444 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1445 ha->optrom_size - start :
1446 bsg_job->reply_payload.payload_len;
1447 ha->optrom_state = QLA_SREADING;
1448 }
1449
1450 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1451 if (!ha->optrom_buffer) {
1452 ql_log(ql_log_warn, vha, 0x7059,
1453 "Read: Unable to allocate memory for optrom retrieval "
1454 "(%x)\n", ha->optrom_region_size);
1455
1456 ha->optrom_state = QLA_SWAITING;
1457 return -ENOMEM;
1458 }
1459
1460 return 0;
1461 }
1462
1463 static int
qla2x00_read_optrom(struct bsg_job *bsg_job)1464 qla2x00_read_optrom(struct bsg_job *bsg_job)
1465 {
1466 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1467 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1468 scsi_qla_host_t *vha = shost_priv(host);
1469 struct qla_hw_data *ha = vha->hw;
1470 int rval = 0;
1471
1472 if (ha->flags.nic_core_reset_hdlr_active)
1473 return -EBUSY;
1474
1475 mutex_lock(&ha->optrom_mutex);
1476 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1477 if (rval) {
1478 mutex_unlock(&ha->optrom_mutex);
1479 return rval;
1480 }
1481
1482 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1483 ha->optrom_region_start, ha->optrom_region_size);
1484
1485 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1486 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1487 ha->optrom_region_size);
1488
1489 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1490 bsg_reply->result = DID_OK;
1491 vfree(ha->optrom_buffer);
1492 ha->optrom_buffer = NULL;
1493 ha->optrom_state = QLA_SWAITING;
1494 mutex_unlock(&ha->optrom_mutex);
1495 bsg_job_done(bsg_job, bsg_reply->result,
1496 bsg_reply->reply_payload_rcv_len);
1497 return rval;
1498 }
1499
1500 static int
qla2x00_update_optrom(struct bsg_job *bsg_job)1501 qla2x00_update_optrom(struct bsg_job *bsg_job)
1502 {
1503 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1504 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1505 scsi_qla_host_t *vha = shost_priv(host);
1506 struct qla_hw_data *ha = vha->hw;
1507 int rval = 0;
1508
1509 mutex_lock(&ha->optrom_mutex);
1510 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1511 if (rval) {
1512 mutex_unlock(&ha->optrom_mutex);
1513 return rval;
1514 }
1515
1516 /* Set the isp82xx_no_md_cap not to capture minidump */
1517 ha->flags.isp82xx_no_md_cap = 1;
1518
1519 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1520 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1521 ha->optrom_region_size);
1522
1523 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1524 ha->optrom_region_start, ha->optrom_region_size);
1525
1526 if (rval) {
1527 bsg_reply->result = -EINVAL;
1528 rval = -EINVAL;
1529 } else {
1530 bsg_reply->result = DID_OK;
1531 }
1532 vfree(ha->optrom_buffer);
1533 ha->optrom_buffer = NULL;
1534 ha->optrom_state = QLA_SWAITING;
1535 mutex_unlock(&ha->optrom_mutex);
1536 bsg_job_done(bsg_job, bsg_reply->result,
1537 bsg_reply->reply_payload_rcv_len);
1538 return rval;
1539 }
1540
1541 static int
qla2x00_update_fru_versions(struct bsg_job *bsg_job)1542 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1543 {
1544 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1545 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1546 scsi_qla_host_t *vha = shost_priv(host);
1547 struct qla_hw_data *ha = vha->hw;
1548 int rval = 0;
1549 uint8_t bsg[DMA_POOL_SIZE];
1550 struct qla_image_version_list *list = (void *)bsg;
1551 struct qla_image_version *image;
1552 uint32_t count;
1553 dma_addr_t sfp_dma;
1554 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1555
1556 if (!sfp) {
1557 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1558 EXT_STATUS_NO_MEMORY;
1559 goto done;
1560 }
1561
1562 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1563 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1564
1565 image = list->version;
1566 count = list->count;
1567 while (count--) {
1568 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1569 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1570 image->field_address.device, image->field_address.offset,
1571 sizeof(image->field_info), image->field_address.option);
1572 if (rval) {
1573 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1574 EXT_STATUS_MAILBOX;
1575 goto dealloc;
1576 }
1577 image++;
1578 }
1579
1580 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1581
1582 dealloc:
1583 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1584
1585 done:
1586 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1587 bsg_reply->result = DID_OK << 16;
1588 bsg_job_done(bsg_job, bsg_reply->result,
1589 bsg_reply->reply_payload_rcv_len);
1590
1591 return 0;
1592 }
1593
1594 static int
qla2x00_read_fru_status(struct bsg_job *bsg_job)1595 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1596 {
1597 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1598 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1599 scsi_qla_host_t *vha = shost_priv(host);
1600 struct qla_hw_data *ha = vha->hw;
1601 int rval = 0;
1602 uint8_t bsg[DMA_POOL_SIZE];
1603 struct qla_status_reg *sr = (void *)bsg;
1604 dma_addr_t sfp_dma;
1605 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1606
1607 if (!sfp) {
1608 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1609 EXT_STATUS_NO_MEMORY;
1610 goto done;
1611 }
1612
1613 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1614 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1615
1616 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1617 sr->field_address.device, sr->field_address.offset,
1618 sizeof(sr->status_reg), sr->field_address.option);
1619 sr->status_reg = *sfp;
1620
1621 if (rval) {
1622 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1623 EXT_STATUS_MAILBOX;
1624 goto dealloc;
1625 }
1626
1627 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1628 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1629
1630 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1631
1632 dealloc:
1633 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1634
1635 done:
1636 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1637 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1638 bsg_reply->result = DID_OK << 16;
1639 bsg_job_done(bsg_job, bsg_reply->result,
1640 bsg_reply->reply_payload_rcv_len);
1641
1642 return 0;
1643 }
1644
1645 static int
qla2x00_write_fru_status(struct bsg_job *bsg_job)1646 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1647 {
1648 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1649 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1650 scsi_qla_host_t *vha = shost_priv(host);
1651 struct qla_hw_data *ha = vha->hw;
1652 int rval = 0;
1653 uint8_t bsg[DMA_POOL_SIZE];
1654 struct qla_status_reg *sr = (void *)bsg;
1655 dma_addr_t sfp_dma;
1656 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1657
1658 if (!sfp) {
1659 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1660 EXT_STATUS_NO_MEMORY;
1661 goto done;
1662 }
1663
1664 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1665 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1666
1667 *sfp = sr->status_reg;
1668 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1669 sr->field_address.device, sr->field_address.offset,
1670 sizeof(sr->status_reg), sr->field_address.option);
1671
1672 if (rval) {
1673 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1674 EXT_STATUS_MAILBOX;
1675 goto dealloc;
1676 }
1677
1678 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1679
1680 dealloc:
1681 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1682
1683 done:
1684 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1685 bsg_reply->result = DID_OK << 16;
1686 bsg_job_done(bsg_job, bsg_reply->result,
1687 bsg_reply->reply_payload_rcv_len);
1688
1689 return 0;
1690 }
1691
1692 static int
qla2x00_write_i2c(struct bsg_job *bsg_job)1693 qla2x00_write_i2c(struct bsg_job *bsg_job)
1694 {
1695 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1696 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1697 scsi_qla_host_t *vha = shost_priv(host);
1698 struct qla_hw_data *ha = vha->hw;
1699 int rval = 0;
1700 uint8_t bsg[DMA_POOL_SIZE];
1701 struct qla_i2c_access *i2c = (void *)bsg;
1702 dma_addr_t sfp_dma;
1703 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1704
1705 if (!sfp) {
1706 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1707 EXT_STATUS_NO_MEMORY;
1708 goto done;
1709 }
1710
1711 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1712 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1713
1714 memcpy(sfp, i2c->buffer, i2c->length);
1715 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1716 i2c->device, i2c->offset, i2c->length, i2c->option);
1717
1718 if (rval) {
1719 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1720 EXT_STATUS_MAILBOX;
1721 goto dealloc;
1722 }
1723
1724 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1725
1726 dealloc:
1727 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1728
1729 done:
1730 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1731 bsg_reply->result = DID_OK << 16;
1732 bsg_job_done(bsg_job, bsg_reply->result,
1733 bsg_reply->reply_payload_rcv_len);
1734
1735 return 0;
1736 }
1737
1738 static int
qla2x00_read_i2c(struct bsg_job *bsg_job)1739 qla2x00_read_i2c(struct bsg_job *bsg_job)
1740 {
1741 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1742 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1743 scsi_qla_host_t *vha = shost_priv(host);
1744 struct qla_hw_data *ha = vha->hw;
1745 int rval = 0;
1746 uint8_t bsg[DMA_POOL_SIZE];
1747 struct qla_i2c_access *i2c = (void *)bsg;
1748 dma_addr_t sfp_dma;
1749 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1750
1751 if (!sfp) {
1752 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1753 EXT_STATUS_NO_MEMORY;
1754 goto done;
1755 }
1756
1757 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1758 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1759
1760 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1761 i2c->device, i2c->offset, i2c->length, i2c->option);
1762
1763 if (rval) {
1764 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1765 EXT_STATUS_MAILBOX;
1766 goto dealloc;
1767 }
1768
1769 memcpy(i2c->buffer, sfp, i2c->length);
1770 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1771 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1772
1773 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1774
1775 dealloc:
1776 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1777
1778 done:
1779 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1780 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1781 bsg_reply->result = DID_OK << 16;
1782 bsg_job_done(bsg_job, bsg_reply->result,
1783 bsg_reply->reply_payload_rcv_len);
1784
1785 return 0;
1786 }
1787
1788 static int
qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)1789 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1790 {
1791 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1792 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1793 scsi_qla_host_t *vha = shost_priv(host);
1794 struct qla_hw_data *ha = vha->hw;
1795 uint32_t rval = EXT_STATUS_OK;
1796 uint16_t req_sg_cnt = 0;
1797 uint16_t rsp_sg_cnt = 0;
1798 uint16_t nextlid = 0;
1799 uint32_t tot_dsds;
1800 srb_t *sp = NULL;
1801 uint32_t req_data_len;
1802 uint32_t rsp_data_len;
1803
1804 /* Check the type of the adapter */
1805 if (!IS_BIDI_CAPABLE(ha)) {
1806 ql_log(ql_log_warn, vha, 0x70a0,
1807 "This adapter is not supported\n");
1808 rval = EXT_STATUS_NOT_SUPPORTED;
1809 goto done;
1810 }
1811
1812 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1813 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1814 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1815 rval = EXT_STATUS_BUSY;
1816 goto done;
1817 }
1818
1819 /* Check if host is online */
1820 if (!vha->flags.online) {
1821 ql_log(ql_log_warn, vha, 0x70a1,
1822 "Host is not online\n");
1823 rval = EXT_STATUS_DEVICE_OFFLINE;
1824 goto done;
1825 }
1826
1827 /* Check if cable is plugged in or not */
1828 if (vha->device_flags & DFLG_NO_CABLE) {
1829 ql_log(ql_log_warn, vha, 0x70a2,
1830 "Cable is unplugged...\n");
1831 rval = EXT_STATUS_INVALID_CFG;
1832 goto done;
1833 }
1834
1835 /* Check if the switch is connected or not */
1836 if (ha->current_topology != ISP_CFG_F) {
1837 ql_log(ql_log_warn, vha, 0x70a3,
1838 "Host is not connected to the switch\n");
1839 rval = EXT_STATUS_INVALID_CFG;
1840 goto done;
1841 }
1842
1843 /* Check if operating mode is P2P */
1844 if (ha->operating_mode != P2P) {
1845 ql_log(ql_log_warn, vha, 0x70a4,
1846 "Host operating mode is not P2p\n");
1847 rval = EXT_STATUS_INVALID_CFG;
1848 goto done;
1849 }
1850
1851 mutex_lock(&ha->selflogin_lock);
1852 if (vha->self_login_loop_id == 0) {
1853 /* Initialize all required fields of fcport */
1854 vha->bidir_fcport.vha = vha;
1855 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1856 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1857 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1858 vha->bidir_fcport.loop_id = vha->loop_id;
1859
1860 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1861 ql_log(ql_log_warn, vha, 0x70a7,
1862 "Failed to login port %06X for bidirectional IOCB\n",
1863 vha->bidir_fcport.d_id.b24);
1864 mutex_unlock(&ha->selflogin_lock);
1865 rval = EXT_STATUS_MAILBOX;
1866 goto done;
1867 }
1868 vha->self_login_loop_id = nextlid - 1;
1869
1870 }
1871 /* Assign the self login loop id to fcport */
1872 mutex_unlock(&ha->selflogin_lock);
1873
1874 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1875
1876 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1877 bsg_job->request_payload.sg_list,
1878 bsg_job->request_payload.sg_cnt,
1879 DMA_TO_DEVICE);
1880
1881 if (!req_sg_cnt) {
1882 rval = EXT_STATUS_NO_MEMORY;
1883 goto done;
1884 }
1885
1886 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1887 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1888 DMA_FROM_DEVICE);
1889
1890 if (!rsp_sg_cnt) {
1891 rval = EXT_STATUS_NO_MEMORY;
1892 goto done_unmap_req_sg;
1893 }
1894
1895 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1896 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1897 ql_dbg(ql_dbg_user, vha, 0x70a9,
1898 "Dma mapping resulted in different sg counts "
1899 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1900 "%x dma_reply_sg_cnt: %x]\n",
1901 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1902 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1903 rval = EXT_STATUS_NO_MEMORY;
1904 goto done_unmap_sg;
1905 }
1906
1907 req_data_len = bsg_job->request_payload.payload_len;
1908 rsp_data_len = bsg_job->reply_payload.payload_len;
1909
1910 if (req_data_len != rsp_data_len) {
1911 rval = EXT_STATUS_BUSY;
1912 ql_log(ql_log_warn, vha, 0x70aa,
1913 "req_data_len != rsp_data_len\n");
1914 goto done_unmap_sg;
1915 }
1916
1917 /* Alloc SRB structure */
1918 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1919 if (!sp) {
1920 ql_dbg(ql_dbg_user, vha, 0x70ac,
1921 "Alloc SRB structure failed\n");
1922 rval = EXT_STATUS_NO_MEMORY;
1923 goto done_unmap_sg;
1924 }
1925
1926 /*Populate srb->ctx with bidir ctx*/
1927 sp->u.bsg_job = bsg_job;
1928 sp->free = qla2x00_bsg_sp_free;
1929 sp->type = SRB_BIDI_CMD;
1930 sp->done = qla2x00_bsg_job_done;
1931
1932 /* Add the read and write sg count */
1933 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1934
1935 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1936 if (rval != EXT_STATUS_OK)
1937 goto done_free_srb;
1938 /* the bsg request will be completed in the interrupt handler */
1939 return rval;
1940
1941 done_free_srb:
1942 mempool_free(sp, ha->srb_mempool);
1943 done_unmap_sg:
1944 dma_unmap_sg(&ha->pdev->dev,
1945 bsg_job->reply_payload.sg_list,
1946 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1947 done_unmap_req_sg:
1948 dma_unmap_sg(&ha->pdev->dev,
1949 bsg_job->request_payload.sg_list,
1950 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1951 done:
1952
1953 /* Return an error vendor specific response
1954 * and complete the bsg request
1955 */
1956 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1957 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1958 bsg_reply->reply_payload_rcv_len = 0;
1959 bsg_reply->result = (DID_OK) << 16;
1960 bsg_job_done(bsg_job, bsg_reply->result,
1961 bsg_reply->reply_payload_rcv_len);
1962 /* Always return success, vendor rsp carries correct status */
1963 return 0;
1964 }
1965
1966 static int
qlafx00_mgmt_cmd(struct bsg_job *bsg_job)1967 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1968 {
1969 struct fc_bsg_request *bsg_request = bsg_job->request;
1970 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1971 scsi_qla_host_t *vha = shost_priv(host);
1972 struct qla_hw_data *ha = vha->hw;
1973 int rval = (DID_ERROR << 16);
1974 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1975 srb_t *sp;
1976 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1977 struct fc_port *fcport;
1978 char *type = "FC_BSG_HST_FX_MGMT";
1979
1980 /* Copy the IOCB specific information */
1981 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1982 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1983
1984 /* Dump the vendor information */
1985 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1986 piocb_rqst, sizeof(*piocb_rqst));
1987
1988 if (!vha->flags.online) {
1989 ql_log(ql_log_warn, vha, 0x70d0,
1990 "Host is not online.\n");
1991 rval = -EIO;
1992 goto done;
1993 }
1994
1995 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1996 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1997 bsg_job->request_payload.sg_list,
1998 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1999 if (!req_sg_cnt) {
2000 ql_log(ql_log_warn, vha, 0x70c7,
2001 "dma_map_sg return %d for request\n", req_sg_cnt);
2002 rval = -ENOMEM;
2003 goto done;
2004 }
2005 }
2006
2007 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2008 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2009 bsg_job->reply_payload.sg_list,
2010 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2011 if (!rsp_sg_cnt) {
2012 ql_log(ql_log_warn, vha, 0x70c8,
2013 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2014 rval = -ENOMEM;
2015 goto done_unmap_req_sg;
2016 }
2017 }
2018
2019 ql_dbg(ql_dbg_user, vha, 0x70c9,
2020 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2021 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2022 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2023
2024 /* Allocate a dummy fcport structure, since functions preparing the
2025 * IOCB and mailbox command retrieves port specific information
2026 * from fcport structure. For Host based ELS commands there will be
2027 * no fcport structure allocated
2028 */
2029 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2030 if (!fcport) {
2031 ql_log(ql_log_warn, vha, 0x70ca,
2032 "Failed to allocate fcport.\n");
2033 rval = -ENOMEM;
2034 goto done_unmap_rsp_sg;
2035 }
2036
2037 /* Alloc SRB structure */
2038 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2039 if (!sp) {
2040 ql_log(ql_log_warn, vha, 0x70cb,
2041 "qla2x00_get_sp failed.\n");
2042 rval = -ENOMEM;
2043 goto done_free_fcport;
2044 }
2045
2046 /* Initialize all required fields of fcport */
2047 fcport->vha = vha;
2048 fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
2049
2050 sp->type = SRB_FXIOCB_BCMD;
2051 sp->name = "bsg_fx_mgmt";
2052 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2053 sp->u.bsg_job = bsg_job;
2054 sp->free = qla2x00_bsg_sp_free;
2055 sp->done = qla2x00_bsg_job_done;
2056
2057 ql_dbg(ql_dbg_user, vha, 0x70cc,
2058 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2059 type, piocb_rqst->func_type, fcport->loop_id);
2060
2061 rval = qla2x00_start_sp(sp);
2062 if (rval != QLA_SUCCESS) {
2063 ql_log(ql_log_warn, vha, 0x70cd,
2064 "qla2x00_start_sp failed=%d.\n", rval);
2065 mempool_free(sp, ha->srb_mempool);
2066 rval = -EIO;
2067 goto done_free_fcport;
2068 }
2069 return rval;
2070
2071 done_free_fcport:
2072 qla2x00_free_fcport(fcport);
2073
2074 done_unmap_rsp_sg:
2075 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2076 dma_unmap_sg(&ha->pdev->dev,
2077 bsg_job->reply_payload.sg_list,
2078 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2079 done_unmap_req_sg:
2080 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2081 dma_unmap_sg(&ha->pdev->dev,
2082 bsg_job->request_payload.sg_list,
2083 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2084
2085 done:
2086 return rval;
2087 }
2088
2089 static int
qla26xx_serdes_op(struct bsg_job *bsg_job)2090 qla26xx_serdes_op(struct bsg_job *bsg_job)
2091 {
2092 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2093 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2094 scsi_qla_host_t *vha = shost_priv(host);
2095 int rval = 0;
2096 struct qla_serdes_reg sr;
2097
2098 memset(&sr, 0, sizeof(sr));
2099
2100 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2101 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2102
2103 switch (sr.cmd) {
2104 case INT_SC_SERDES_WRITE_REG:
2105 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2106 bsg_reply->reply_payload_rcv_len = 0;
2107 break;
2108 case INT_SC_SERDES_READ_REG:
2109 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2110 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2111 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2112 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2113 break;
2114 default:
2115 ql_dbg(ql_dbg_user, vha, 0x708c,
2116 "Unknown serdes cmd %x.\n", sr.cmd);
2117 rval = -EINVAL;
2118 break;
2119 }
2120
2121 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2122 rval ? EXT_STATUS_MAILBOX : 0;
2123
2124 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2125 bsg_reply->result = DID_OK << 16;
2126 bsg_job_done(bsg_job, bsg_reply->result,
2127 bsg_reply->reply_payload_rcv_len);
2128 return 0;
2129 }
2130
2131 static int
qla8044_serdes_op(struct bsg_job *bsg_job)2132 qla8044_serdes_op(struct bsg_job *bsg_job)
2133 {
2134 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2135 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2136 scsi_qla_host_t *vha = shost_priv(host);
2137 int rval = 0;
2138 struct qla_serdes_reg_ex sr;
2139
2140 memset(&sr, 0, sizeof(sr));
2141
2142 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2143 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2144
2145 switch (sr.cmd) {
2146 case INT_SC_SERDES_WRITE_REG:
2147 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2148 bsg_reply->reply_payload_rcv_len = 0;
2149 break;
2150 case INT_SC_SERDES_READ_REG:
2151 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2152 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2153 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2154 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2155 break;
2156 default:
2157 ql_dbg(ql_dbg_user, vha, 0x7020,
2158 "Unknown serdes cmd %x.\n", sr.cmd);
2159 rval = -EINVAL;
2160 break;
2161 }
2162
2163 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2164 rval ? EXT_STATUS_MAILBOX : 0;
2165
2166 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2167 bsg_reply->result = DID_OK << 16;
2168 bsg_job_done(bsg_job, bsg_reply->result,
2169 bsg_reply->reply_payload_rcv_len);
2170 return 0;
2171 }
2172
2173 static int
qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)2174 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2175 {
2176 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2177 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2178 scsi_qla_host_t *vha = shost_priv(host);
2179 struct qla_hw_data *ha = vha->hw;
2180 struct qla_flash_update_caps cap;
2181
2182 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2183 return -EPERM;
2184
2185 memset(&cap, 0, sizeof(cap));
2186 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2187 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2188 (uint64_t)ha->fw_attributes_h << 16 |
2189 (uint64_t)ha->fw_attributes;
2190
2191 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2192 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2193 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2194
2195 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2196 EXT_STATUS_OK;
2197
2198 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2199 bsg_reply->result = DID_OK << 16;
2200 bsg_job_done(bsg_job, bsg_reply->result,
2201 bsg_reply->reply_payload_rcv_len);
2202 return 0;
2203 }
2204
2205 static int
qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)2206 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2207 {
2208 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2209 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2210 scsi_qla_host_t *vha = shost_priv(host);
2211 struct qla_hw_data *ha = vha->hw;
2212 uint64_t online_fw_attr = 0;
2213 struct qla_flash_update_caps cap;
2214
2215 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2216 return -EPERM;
2217
2218 memset(&cap, 0, sizeof(cap));
2219 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2220 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2221
2222 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2223 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2224 (uint64_t)ha->fw_attributes_h << 16 |
2225 (uint64_t)ha->fw_attributes;
2226
2227 if (online_fw_attr != cap.capabilities) {
2228 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2229 EXT_STATUS_INVALID_PARAM;
2230 return -EINVAL;
2231 }
2232
2233 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2234 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2235 EXT_STATUS_INVALID_PARAM;
2236 return -EINVAL;
2237 }
2238
2239 bsg_reply->reply_payload_rcv_len = 0;
2240
2241 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2242 EXT_STATUS_OK;
2243
2244 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2245 bsg_reply->result = DID_OK << 16;
2246 bsg_job_done(bsg_job, bsg_reply->result,
2247 bsg_reply->reply_payload_rcv_len);
2248 return 0;
2249 }
2250
2251 static int
qla27xx_get_bbcr_data(struct bsg_job *bsg_job)2252 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2253 {
2254 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2255 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2256 scsi_qla_host_t *vha = shost_priv(host);
2257 struct qla_hw_data *ha = vha->hw;
2258 struct qla_bbcr_data bbcr;
2259 uint16_t loop_id, topo, sw_cap;
2260 uint8_t domain, area, al_pa, state;
2261 int rval;
2262
2263 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2264 return -EPERM;
2265
2266 memset(&bbcr, 0, sizeof(bbcr));
2267
2268 if (vha->flags.bbcr_enable)
2269 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2270 else
2271 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2272
2273 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2274 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2275 &area, &domain, &topo, &sw_cap);
2276 if (rval != QLA_SUCCESS) {
2277 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2278 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2279 bbcr.mbx1 = loop_id;
2280 goto done;
2281 }
2282
2283 state = (vha->bbcr >> 12) & 0x1;
2284
2285 if (state) {
2286 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2287 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2288 } else {
2289 bbcr.state = QLA_BBCR_STATE_ONLINE;
2290 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2291 }
2292
2293 bbcr.configured_bbscn = vha->bbcr & 0xf;
2294 }
2295
2296 done:
2297 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2298 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2299 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2300
2301 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2302
2303 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2304 bsg_reply->result = DID_OK << 16;
2305 bsg_job_done(bsg_job, bsg_reply->result,
2306 bsg_reply->reply_payload_rcv_len);
2307 return 0;
2308 }
2309
2310 static int
qla2x00_get_priv_stats(struct bsg_job *bsg_job)2311 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2312 {
2313 struct fc_bsg_request *bsg_request = bsg_job->request;
2314 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2315 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2316 scsi_qla_host_t *vha = shost_priv(host);
2317 struct qla_hw_data *ha = vha->hw;
2318 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2319 struct link_statistics *stats = NULL;
2320 dma_addr_t stats_dma;
2321 int rval;
2322 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2323 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2324
2325 if (test_bit(UNLOADING, &vha->dpc_flags))
2326 return -ENODEV;
2327
2328 if (unlikely(pci_channel_offline(ha->pdev)))
2329 return -ENODEV;
2330
2331 if (qla2x00_reset_active(vha))
2332 return -EBUSY;
2333
2334 if (!IS_FWI2_CAPABLE(ha))
2335 return -EPERM;
2336
2337 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2338 GFP_KERNEL);
2339 if (!stats) {
2340 ql_log(ql_log_warn, vha, 0x70e2,
2341 "Failed to allocate memory for stats.\n");
2342 return -ENOMEM;
2343 }
2344
2345 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2346
2347 if (rval == QLA_SUCCESS) {
2348 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2349 stats, sizeof(*stats));
2350 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2351 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2352 }
2353
2354 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2355 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2356 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2357
2358 bsg_job->reply_len = sizeof(*bsg_reply);
2359 bsg_reply->result = DID_OK << 16;
2360 bsg_job_done(bsg_job, bsg_reply->result,
2361 bsg_reply->reply_payload_rcv_len);
2362
2363 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2364 stats, stats_dma);
2365
2366 return 0;
2367 }
2368
2369 static int
qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)2370 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2371 {
2372 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2373 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2374 scsi_qla_host_t *vha = shost_priv(host);
2375 int rval;
2376 struct qla_dport_diag *dd;
2377
2378 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2379 !IS_QLA28XX(vha->hw))
2380 return -EPERM;
2381
2382 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2383 if (!dd) {
2384 ql_log(ql_log_warn, vha, 0x70db,
2385 "Failed to allocate memory for dport.\n");
2386 return -ENOMEM;
2387 }
2388
2389 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2390 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2391
2392 rval = qla26xx_dport_diagnostics(
2393 vha, dd->buf, sizeof(dd->buf), dd->options);
2394 if (rval == QLA_SUCCESS) {
2395 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2396 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2397 }
2398
2399 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2400 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2401 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2402
2403 bsg_job->reply_len = sizeof(*bsg_reply);
2404 bsg_reply->result = DID_OK << 16;
2405 bsg_job_done(bsg_job, bsg_reply->result,
2406 bsg_reply->reply_payload_rcv_len);
2407
2408 kfree(dd);
2409
2410 return 0;
2411 }
2412
2413 static int
qla2x00_get_flash_image_status(struct bsg_job *bsg_job)2414 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2415 {
2416 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2417 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2418 struct qla_hw_data *ha = vha->hw;
2419 struct qla_active_regions regions = { };
2420 struct active_regions active_regions = { };
2421
2422 qla27xx_get_active_image(vha, &active_regions);
2423 regions.global_image = active_regions.global;
2424
2425 if (IS_QLA28XX(ha)) {
2426 qla28xx_get_aux_images(vha, &active_regions);
2427 regions.board_config = active_regions.aux.board_config;
2428 regions.vpd_nvram = active_regions.aux.vpd_nvram;
2429 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2430 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2431 }
2432
2433 ql_dbg(ql_dbg_user, vha, 0x70e1,
2434 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
2435 __func__, vha->host_no, regions.global_image,
2436 regions.board_config, regions.vpd_nvram,
2437 regions.npiv_config_0_1, regions.npiv_config_2_3);
2438
2439 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2440 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions));
2441
2442 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2443 bsg_reply->reply_payload_rcv_len = sizeof(regions);
2444 bsg_reply->result = DID_OK << 16;
2445 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2446 bsg_job_done(bsg_job, bsg_reply->result,
2447 bsg_reply->reply_payload_rcv_len);
2448
2449 return 0;
2450 }
2451
2452 static int
qla2x00_process_vendor_specific(struct bsg_job *bsg_job)2453 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2454 {
2455 struct fc_bsg_request *bsg_request = bsg_job->request;
2456
2457 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2458 case QL_VND_LOOPBACK:
2459 return qla2x00_process_loopback(bsg_job);
2460
2461 case QL_VND_A84_RESET:
2462 return qla84xx_reset(bsg_job);
2463
2464 case QL_VND_A84_UPDATE_FW:
2465 return qla84xx_updatefw(bsg_job);
2466
2467 case QL_VND_A84_MGMT_CMD:
2468 return qla84xx_mgmt_cmd(bsg_job);
2469
2470 case QL_VND_IIDMA:
2471 return qla24xx_iidma(bsg_job);
2472
2473 case QL_VND_FCP_PRIO_CFG_CMD:
2474 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2475
2476 case QL_VND_READ_FLASH:
2477 return qla2x00_read_optrom(bsg_job);
2478
2479 case QL_VND_UPDATE_FLASH:
2480 return qla2x00_update_optrom(bsg_job);
2481
2482 case QL_VND_SET_FRU_VERSION:
2483 return qla2x00_update_fru_versions(bsg_job);
2484
2485 case QL_VND_READ_FRU_STATUS:
2486 return qla2x00_read_fru_status(bsg_job);
2487
2488 case QL_VND_WRITE_FRU_STATUS:
2489 return qla2x00_write_fru_status(bsg_job);
2490
2491 case QL_VND_WRITE_I2C:
2492 return qla2x00_write_i2c(bsg_job);
2493
2494 case QL_VND_READ_I2C:
2495 return qla2x00_read_i2c(bsg_job);
2496
2497 case QL_VND_DIAG_IO_CMD:
2498 return qla24xx_process_bidir_cmd(bsg_job);
2499
2500 case QL_VND_FX00_MGMT_CMD:
2501 return qlafx00_mgmt_cmd(bsg_job);
2502
2503 case QL_VND_SERDES_OP:
2504 return qla26xx_serdes_op(bsg_job);
2505
2506 case QL_VND_SERDES_OP_EX:
2507 return qla8044_serdes_op(bsg_job);
2508
2509 case QL_VND_GET_FLASH_UPDATE_CAPS:
2510 return qla27xx_get_flash_upd_cap(bsg_job);
2511
2512 case QL_VND_SET_FLASH_UPDATE_CAPS:
2513 return qla27xx_set_flash_upd_cap(bsg_job);
2514
2515 case QL_VND_GET_BBCR_DATA:
2516 return qla27xx_get_bbcr_data(bsg_job);
2517
2518 case QL_VND_GET_PRIV_STATS:
2519 case QL_VND_GET_PRIV_STATS_EX:
2520 return qla2x00_get_priv_stats(bsg_job);
2521
2522 case QL_VND_DPORT_DIAGNOSTICS:
2523 return qla2x00_do_dport_diagnostics(bsg_job);
2524
2525 case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2526 return qla2x00_get_flash_image_status(bsg_job);
2527
2528 default:
2529 return -ENOSYS;
2530 }
2531 }
2532
2533 int
qla24xx_bsg_request(struct bsg_job *bsg_job)2534 qla24xx_bsg_request(struct bsg_job *bsg_job)
2535 {
2536 struct fc_bsg_request *bsg_request = bsg_job->request;
2537 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2538 int ret = -EINVAL;
2539 struct fc_rport *rport;
2540 struct Scsi_Host *host;
2541 scsi_qla_host_t *vha;
2542
2543 /* In case no data transferred. */
2544 bsg_reply->reply_payload_rcv_len = 0;
2545
2546 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2547 rport = fc_bsg_to_rport(bsg_job);
2548 if (!rport)
2549 return ret;
2550 host = rport_to_shost(rport);
2551 vha = shost_priv(host);
2552 } else {
2553 host = fc_bsg_to_shost(bsg_job);
2554 vha = shost_priv(host);
2555 }
2556
2557 if (qla2x00_chip_is_down(vha)) {
2558 ql_dbg(ql_dbg_user, vha, 0x709f,
2559 "BSG: ISP abort active/needed -- cmd=%d.\n",
2560 bsg_request->msgcode);
2561 return -EBUSY;
2562 }
2563
2564 ql_dbg(ql_dbg_user, vha, 0x7000,
2565 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2566
2567 switch (bsg_request->msgcode) {
2568 case FC_BSG_RPT_ELS:
2569 case FC_BSG_HST_ELS_NOLOGIN:
2570 ret = qla2x00_process_els(bsg_job);
2571 break;
2572 case FC_BSG_HST_CT:
2573 ret = qla2x00_process_ct(bsg_job);
2574 break;
2575 case FC_BSG_HST_VENDOR:
2576 ret = qla2x00_process_vendor_specific(bsg_job);
2577 break;
2578 case FC_BSG_HST_ADD_RPORT:
2579 case FC_BSG_HST_DEL_RPORT:
2580 case FC_BSG_RPT_CT:
2581 default:
2582 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2583 break;
2584 }
2585 return ret;
2586 }
2587
2588 int
qla24xx_bsg_timeout(struct bsg_job *bsg_job)2589 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2590 {
2591 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2592 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2593 struct qla_hw_data *ha = vha->hw;
2594 srb_t *sp;
2595 int cnt, que;
2596 unsigned long flags;
2597 struct req_que *req;
2598
2599 /* find the bsg job from the active list of commands */
2600 spin_lock_irqsave(&ha->hardware_lock, flags);
2601 for (que = 0; que < ha->max_req_queues; que++) {
2602 req = ha->req_q_map[que];
2603 if (!req)
2604 continue;
2605
2606 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2607 sp = req->outstanding_cmds[cnt];
2608 if (sp) {
2609 if (((sp->type == SRB_CT_CMD) ||
2610 (sp->type == SRB_ELS_CMD_HST) ||
2611 (sp->type == SRB_FXIOCB_BCMD))
2612 && (sp->u.bsg_job == bsg_job)) {
2613 req->outstanding_cmds[cnt] = NULL;
2614 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2615 if (ha->isp_ops->abort_command(sp)) {
2616 ql_log(ql_log_warn, vha, 0x7089,
2617 "mbx abort_command "
2618 "failed.\n");
2619 bsg_reply->result = -EIO;
2620 } else {
2621 ql_dbg(ql_dbg_user, vha, 0x708a,
2622 "mbx abort_command "
2623 "success.\n");
2624 bsg_reply->result = 0;
2625 }
2626 spin_lock_irqsave(&ha->hardware_lock, flags);
2627 goto done;
2628 }
2629 }
2630 }
2631 }
2632 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2633 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2634 bsg_reply->result = -ENXIO;
2635 return 0;
2636
2637 done:
2638 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2639 sp->free(sp);
2640 return 0;
2641 }
2642