Lines Matching defs:bfad

38 	struct bfad_s         *bfad = drv;
45 bfa_trc(bfad, scsi_status);
49 bfa_trc(bfad, sns_len);
56 bfa_trc(bfad, residue);
61 bfa_trc(bfad, 0);
84 bfa_trc(bfad, cmnd->result);
134 struct bfad_s *bfad = drv;
142 bfa_trc(bfad, cmnd->result);
147 bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
174 struct bfad_s *bfad = im_port->bfad;
179 bfad->pci_name, BFAD_DRIVER_VERSION);
195 struct bfad_s *bfad = im_port->bfad;
201 spin_lock_irqsave(&bfad->bfad_lock, flags);
213 bfa_trc(bfad, hal_io->iotag);
214 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
218 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
230 bfa_trc(bfad, hal_io->iotag);
231 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
236 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
241 bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
249 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
251 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
271 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
297 struct bfad_s *bfad = im_port->bfad;
307 spin_lock_irqsave(&bfad->bfad_lock, flags);
310 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
315 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
317 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
319 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
339 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
341 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
348 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
354 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
373 struct bfad_s *bfad = im_port->bfad;
380 spin_lock_irqsave(&bfad->bfad_lock, flags);
384 rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
387 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
390 spin_lock_irqsave(&bfad->bfad_lock, flags);
394 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
401 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
425 bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
432 (*itnim_drv)->im = bfad->im;
440 bfad->bfad_flags |= BFAD_RPORT_ONLINE;
449 bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
472 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
511 struct bfad_s *bfad;
515 bfad = port->bfad;
516 if ((bfad->pport.flags & BFAD_PORT_DELETE) ||
534 bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
550 im_port->shost = bfad_scsi_host_alloc(im_port, bfad);
563 im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth;
570 error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev);
590 bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
592 bfa_trc(bfad, bfad->inst_no);
593 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n",
619 bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port)
631 im_port->bfad = bfad;
642 bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port)
646 queue_work(bfad->im->drv_workq,
655 struct bfad_s *bfad = im_port->bfad;
657 spin_lock_irqsave(&bfad->bfad_lock, flags);
667 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
675 struct bfad_s *bfad = im->bfad;
676 struct Scsi_Host *shost = bfad->pport.im_port->shost;
680 while (!list_empty(&bfad->active_aen_q)) {
681 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
682 bfa_q_deq(&bfad->active_aen_q, &aen_entry);
683 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
689 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
690 list_add_tail(&aen_entry->qe, &bfad->free_aen_q);
691 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
696 bfad_im_probe(struct bfad_s *bfad)
704 bfad->im = im;
705 im->bfad = bfad;
707 if (bfad_thread_workq(bfad) != BFA_STATUS_OK) {
717 bfad_im_probe_undo(struct bfad_s *bfad)
719 if (bfad->im) {
720 bfad_destroy_workq(bfad->im);
721 kfree(bfad->im);
722 bfad->im = NULL;
727 bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
739 sht->sg_tablesize = bfad->cfg_data.io_max_sge;
745 bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
748 flush_workqueue(bfad->im->drv_workq);
749 bfad_im_scsi_host_free(im_port->bfad, im_port);
764 bfad_thread_workq(struct bfad_s *bfad)
766 struct bfad_im_s *im = bfad->im;
768 bfa_trc(bfad, 0);
770 bfad->inst_no);
1028 struct bfad_s *bfad = im_port->bfad;
1031 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
1037 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
1049 strscpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
1053 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
1109 struct bfad_s *bfad = im->bfad;
1117 spin_lock_irqsave(&bfad->bfad_lock, flags);
1119 bfa_trc(bfad, itnim->state);
1123 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1125 spin_lock_irqsave(&bfad->bfad_lock, flags);
1132 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
1154 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1156 bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
1158 spin_lock_irqsave(&bfad->bfad_lock, flags);
1165 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
1180 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1182 bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
1184 spin_lock_irqsave(&bfad->bfad_lock, flags);
1196 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1207 struct bfad_s *bfad = im_port->bfad;
1223 if (bfad->bfad_flags & BFAD_EEH_BUSY) {
1224 if (bfad->bfad_flags & BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE)
1236 spin_lock_irqsave(&bfad->bfad_lock, flags);
1237 if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) {
1239 "bfad%d, queuecommand %p %x failed, BFA stopped\n",
1240 bfad->inst_no, cmnd, cmnd->cmnd[0]);
1252 hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd,
1256 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1263 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1268 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1279 bfad_rport_online_wait(struct bfad_s *bfad)
1284 for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE)
1290 if (bfad->bfad_flags & BFAD_PORT_ONLINE) {
1293 for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE)
1299 if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) {
1307 bfad_get_linkup_delay(struct bfad_s *bfad)
1320 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns);