Lines Matching refs:psb
85 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
87 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
254 struct lpfc_io_buf *psb;
273 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
274 if (!psb)
283 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
284 GFP_KERNEL, &psb->dma_handle);
285 if (!psb->data) {
286 kfree(psb);
291 /* Allocate iotag for psb->cur_iocbq. */
292 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
295 psb->data, psb->dma_handle);
296 kfree(psb);
299 psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP;
301 psb->fcp_cmnd = psb->data;
302 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
303 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
307 bpl = (struct ulp_bde64 *)psb->dma_sgl;
308 pdma_phys_fcp_cmd = psb->dma_handle;
309 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
310 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
335 iocb = &psb->cur_iocbq.iocb;
368 psb->status = IOSTAT_SUCCESS;
370 psb->cur_iocbq.io_buf = psb;
371 spin_lock_init(&psb->buf_lock);
372 lpfc_release_scsi_buf_s3(phba, psb);
390 struct lpfc_io_buf *psb, *next_psb;
403 list_for_each_entry_safe(psb, next_psb,
405 if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME)
408 if (psb->rdata && psb->rdata->pnode &&
409 psb->rdata->pnode->vport == vport)
410 psb->rdata = NULL;
432 struct lpfc_io_buf *psb, *next_psb;
453 list_for_each_entry_safe(psb, next_psb,
456 xri = psb->cur_iocbq.sli4_xritag;
457 if (psb->cur_iocbq.sli4_xritag == xri) {
458 list_del_init(&psb->list);
459 psb->flags &= ~LPFC_SBUF_XBUSY;
460 psb->status = IOSTAT_SUCCESS;
461 if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) {
467 psb);
470 lpfc_sli4_nvme_pci_offline_aborted(phba, psb);
478 if (psb->rdata && psb->rdata->pnode)
479 ndlp = psb->rdata->pnode;
487 psb->cur_iocbq.sli4_lxritag, rxid, 1);
492 spin_lock_irqsave(&psb->buf_lock, iflag);
493 cmd = psb->pCmd;
494 psb->pCmd = NULL;
495 spin_unlock_irqrestore(&psb->buf_lock, iflag);
507 spin_lock_irqsave(&psb->buf_lock, iflag);
508 psb->cur_iocbq.cmd_flag &=
510 if (psb->waitq)
511 wake_up(psb->waitq);
512 spin_unlock_irqrestore(&psb->buf_lock, iflag);
515 lpfc_release_scsi_buf_s4(phba, psb);
535 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
536 psb->flags &= ~LPFC_SBUF_XBUSY;
705 * @psb: The scsi buffer which is being released.
707 * This routine releases @psb scsi buffer by adding it to tail of @phba
711 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
715 psb->seg_cnt = 0;
716 psb->prot_seg_cnt = 0;
719 psb->pCmd = NULL;
720 psb->cur_iocbq.cmd_flag = LPFC_IO_FCP;
721 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
728 * @psb: The scsi buffer which is being released.
730 * This routine releases @psb scsi buffer by adding it to tail of @hdwq
736 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
741 psb->seg_cnt = 0;
742 psb->prot_seg_cnt = 0;
744 qp = psb->hdwq;
745 if (psb->flags & LPFC_SBUF_XBUSY) {
748 psb->pCmd = NULL;
749 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
753 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
760 * @psb: The scsi buffer which is being released.
762 * This routine releases @psb scsi buffer by adding it to tail of @phba
766 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
768 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
769 atomic_dec(&psb->ndlp->cmd_pending);
771 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
772 phba->lpfc_release_scsi_buf(phba, psb);
3647 * @psb: The scsi buffer which is going to be un-mapped.
3653 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3661 if (psb->seg_cnt > 0)
3662 scsi_dma_unmap(psb->pCmd);
3663 if (psb->prot_seg_cnt > 0)
3664 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3665 scsi_prot_sg_count(psb->pCmd),
3666 psb->pCmd->sc_data_direction);