Lines Matching refs:fcport

24 	struct qedf_rport *fcport;
26 fcport = io_req->fcport;
27 if (io_req->fcport == NULL) {
28 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
32 qedf = fcport->qedf;
63 qedf_restart_rport(fcport);
298 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
300 struct qedf_ctx *qedf = fcport->qedf;
309 free_sqes = atomic_read(&fcport->free_sqes);
319 if ((atomic_read(&fcport->num_active_ios) >=
323 atomic_read(&fcport->num_active_ios));
362 atomic_inc(&fcport->num_active_ios);
363 atomic_dec(&fcport->free_sqes);
368 io_req->fcport = fcport;
406 struct qedf_ctx *qedf = io_req->fcport->qedf;
437 struct qedf_rport *fcport = io_req->fcport;
441 QEDF_WARN(&fcport->qedf->dbg_ctx,
452 atomic_dec(&fcport->num_active_ios);
454 if (atomic_read(&fcport->num_active_ios) < 0) {
455 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
461 io_req->fcport = NULL;
466 io_req->fcport = NULL;
586 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
596 struct qedf_ctx *qedf = fcport->qedf;
628 io_req->task_params->conn_cid = fcport->fw_cid;
631 io_req->task_params->is_tape_device = fcport->dev_type;
680 struct qedf_rport *fcport = io_req->fcport;
681 struct qedf_ctx *qedf = io_req->fcport->qedf;
708 io_req->task_params->conn_cid = fcport->fw_cid;
712 io_req->task_params->is_tape_device = fcport->dev_type;
757 /* Presumed that fcport->rport_lock is held */
758 u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
760 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
763 rval = fcport->sq_prod_idx;
766 fcport->sq_prod_idx++;
767 fcport->fw_sq_prod_idx++;
768 if (fcport->sq_prod_idx == total_sqe)
769 fcport->sq_prod_idx = 0;
774 void qedf_ring_doorbell(struct qedf_rport *fcport)
785 dbell.sq_prod = fcport->fw_sq_prod_idx;
791 writel(*(u32 *)&dbell, fcport->p_doorbell);
800 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
803 struct qedf_ctx *qedf = fcport->qedf;
813 io_log->port_id = fcport->rdata->ids.port_id;
846 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
887 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
888 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
900 sqe_idx = qedf_get_sqe_idx(fcport);
901 sqe = &fcport->sq[sqe_idx];
915 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
918 qedf_ring_doorbell(fcport);
924 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
936 struct qedf_rport *fcport;
997 fcport = (struct qedf_rport *)&rp[1];
999 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
1000 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1009 atomic_inc(&fcport->ios_to_queue);
1011 if (fcport->retry_delay_timestamp) {
1012 /* Take fcport->rport_lock for resetting the delay_timestamp */
1013 spin_lock_irqsave(&fcport->rport_lock, flags);
1014 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
1015 fcport->retry_delay_timestamp = 0;
1017 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1020 atomic_dec(&fcport->ios_to_queue);
1023 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1026 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1029 atomic_dec(&fcport->ios_to_queue);
1035 /* Take fcport->rport_lock for posting to fcport send queue */
1036 spin_lock_irqsave(&fcport->rport_lock, flags);
1037 if (qedf_post_io_req(fcport, io_req)) {
1040 atomic_inc(&fcport->free_sqes);
1043 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1044 atomic_dec(&fcport->ios_to_queue);
1054 struct qedf_ctx *qedf = io_req->fcport->qedf;
1122 struct qedf_rport *fcport;
1169 fcport = io_req->fcport;
1175 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1176 (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
1177 sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
1179 "Dropping good completion xid=0x%x as fcport is flushing",
1280 /* Take fcport->rport_lock to
1283 spin_lock_irqsave(&fcport->rport_lock, flags);
1284 fcport->retry_delay_timestamp =
1286 spin_unlock_irqrestore(&fcport->rport_lock,
1304 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1407 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1432 struct qedf_rport *fcport = io_req->fcport;
1443 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1445 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1449 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1467 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1508 if (io_req->fcport == NULL) {
1509 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
1519 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1521 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1525 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1532 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
1533 (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
1534 io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
1536 "Dropping EQE for xid=0x%x as fcport is flushing",
1583 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1594 if (!fcport) {
1595 QEDF_ERR(NULL, "fcport is NULL\n");
1599 /* Check that fcport is still offloaded */
1600 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1601 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1605 qedf = fcport->qedf;
1613 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1615 while (atomic_read(&fcport->ios_to_queue)) {
1618 atomic_read(&fcport->ios_to_queue));
1622 atomic_read(&fcport->ios_to_queue));
1632 "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1633 atomic_read(&fcport->num_active_ios), fcport,
1634 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
1639 set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1641 set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1642 fcport->lun_reset_lun = lun;
1650 if (!io_req->fcport)
1668 if (io_req->fcport != fcport)
1673 * Workaround: Within qedf_send_rrq, we check if the fcport is
1801 flush_cnt, atomic_read(&fcport->num_active_ios));
1803 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1805 while (atomic_read(&fcport->num_active_ios)) {
1809 atomic_read(&fcport->num_active_ios),
1815 atomic_read(&fcport->num_active_ios));
1818 if (io_req->fcport &&
1819 io_req->fcport == fcport) {
1841 clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1842 clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1854 struct qedf_rport *fcport = io_req->fcport;
1865 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1871 qedf = fcport->qedf;
1872 rdata = fcport->rdata;
1895 if (!atomic_read(&fcport->free_sqes)) {
1901 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1902 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1907 spin_lock_irqsave(&fcport->rport_lock, flags);
1915 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1921 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1939 spin_lock_irqsave(&fcport->rport_lock, flags);
1941 sqe_idx = qedf_get_sqe_idx(fcport);
1942 sqe = &fcport->sq[sqe_idx];
1947 qedf_ring_doorbell(fcport);
1949 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1962 struct qedf_rport *fcport = io_req->fcport;
1973 if (!fcport) {
1975 "Dropping ABTS completion xid=0x%x as fcport is NULL",
1984 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1985 test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
1987 "Dropping ABTS completion xid=0x%x as fcport is flushing",
2050 struct qedf_ctx *qedf = io_req->fcport->qedf;
2152 struct qedf_rport *fcport;
2161 fcport = io_req->fcport;
2162 if (!fcport) {
2163 QEDF_ERR(NULL, "fcport is NULL.\n");
2168 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2173 qedf = fcport->qedf;
2194 if (!atomic_read(&fcport->free_sqes)) {
2212 "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
2214 refcount, fcport, fcport->rdata->ids.port_id);
2217 spin_lock_irqsave(&fcport->rport_lock, flags);
2219 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2224 spin_lock_irqsave(&fcport->rport_lock, flags);
2226 sqe_idx = qedf_get_sqe_idx(fcport);
2227 sqe = &fcport->sq[sqe_idx];
2232 qedf_ring_doorbell(fcport);
2234 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2290 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2295 struct qedf_ctx *qedf = fcport->qedf;
2311 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2312 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2317 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2331 io_req->fcport = fcport;
2356 spin_lock_irqsave(&fcport->rport_lock, flags);
2358 sqe_idx = qedf_get_sqe_idx(fcport);
2359 sqe = &fcport->sq[sqe_idx];
2362 qedf_init_task(fcport, lport, io_req, task, sqe);
2363 qedf_ring_doorbell(fcport);
2365 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2385 * Double check that fcport has not gone into an uploading state before
2388 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2390 "fcport is uploading, not executing flush.\n");
2398 qedf_flush_active_ios(fcport, lun);
2400 qedf_flush_active_ios(fcport, -1);
2417 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2424 struct fc_rport_priv *rdata = fcport->rdata;
2459 if (!fcport) {
2465 qedf = fcport->qedf;
2473 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2491 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2492 if (!fcport->rdata)
2493 QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
2494 fcport);
2497 "fcport %p port_id=%06x is uploading.\n",
2498 fcport, fcport->rdata->ids.port_id);
2503 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);