Lines Matching refs:fcport

24 	struct qedf_rport *fcport;
31 fcport = io_req->fcport;
32 if (io_req->fcport == NULL) {
33 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
37 qedf = fcport->qedf;
68 qedf_restart_rport(fcport);
303 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
305 struct qedf_ctx *qedf = fcport->qedf;
314 free_sqes = atomic_read(&fcport->free_sqes);
324 if ((atomic_read(&fcport->num_active_ios) >=
328 atomic_read(&fcport->num_active_ios));
367 atomic_inc(&fcport->num_active_ios);
368 atomic_dec(&fcport->free_sqes);
373 io_req->fcport = fcport;
411 struct qedf_ctx *qedf = io_req->fcport->qedf;
442 struct qedf_rport *fcport = io_req->fcport;
446 QEDF_WARN(&fcport->qedf->dbg_ctx,
457 atomic_dec(&fcport->num_active_ios);
459 if (atomic_read(&fcport->num_active_ios) < 0) {
460 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
466 io_req->fcport = NULL;
471 io_req->fcport = NULL;
591 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
601 struct qedf_ctx *qedf = fcport->qedf;
633 io_req->task_params->conn_cid = fcport->fw_cid;
636 io_req->task_params->is_tape_device = fcport->dev_type;
685 struct qedf_rport *fcport = io_req->fcport;
686 struct qedf_ctx *qedf = io_req->fcport->qedf;
713 io_req->task_params->conn_cid = fcport->fw_cid;
717 io_req->task_params->is_tape_device = fcport->dev_type;
762 /* Presumed that fcport->rport_lock is held */
763 u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
765 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
768 rval = fcport->sq_prod_idx;
771 fcport->sq_prod_idx++;
772 fcport->fw_sq_prod_idx++;
773 if (fcport->sq_prod_idx == total_sqe)
774 fcport->sq_prod_idx = 0;
779 void qedf_ring_doorbell(struct qedf_rport *fcport)
790 dbell.sq_prod = fcport->fw_sq_prod_idx;
796 writel(*(u32 *)&dbell, fcport->p_doorbell);
805 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
808 struct qedf_ctx *qedf = fcport->qedf;
819 io_log->port_id = fcport->rdata->ids.port_id;
852 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
893 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
894 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
906 sqe_idx = qedf_get_sqe_idx(fcport);
907 sqe = &fcport->sq[sqe_idx];
921 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
924 qedf_ring_doorbell(fcport);
930 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
942 struct qedf_rport *fcport;
1003 fcport = (struct qedf_rport *)&rp[1];
1005 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
1006 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1015 atomic_inc(&fcport->ios_to_queue);
1017 if (fcport->retry_delay_timestamp) {
1018 /* Take fcport->rport_lock for resetting the delay_timestamp */
1019 spin_lock_irqsave(&fcport->rport_lock, flags);
1020 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
1021 fcport->retry_delay_timestamp = 0;
1023 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1026 atomic_dec(&fcport->ios_to_queue);
1029 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1032 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1035 atomic_dec(&fcport->ios_to_queue);
1041 /* Take fcport->rport_lock for posting to fcport send queue */
1042 spin_lock_irqsave(&fcport->rport_lock, flags);
1043 if (qedf_post_io_req(fcport, io_req)) {
1046 atomic_inc(&fcport->free_sqes);
1049 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1050 atomic_dec(&fcport->ios_to_queue);
1060 struct qedf_ctx *qedf = io_req->fcport->qedf;
1129 struct qedf_rport *fcport;
1182 fcport = io_req->fcport;
1188 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1189 (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
1190 sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
1192 "Dropping good completion xid=0x%x as fcport is flushing",
1293 /* Take fcport->rport_lock to
1296 spin_lock_irqsave(&fcport->rport_lock, flags);
1297 fcport->retry_delay_timestamp =
1299 spin_unlock_irqrestore(&fcport->rport_lock,
1317 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1427 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1452 struct qedf_rport *fcport = io_req->fcport;
1463 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1465 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1469 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1487 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1528 if (io_req->fcport == NULL) {
1529 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
1539 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1541 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1545 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1552 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
1553 (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
1554 io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
1556 "Dropping EQE for xid=0x%x as fcport is flushing",
1603 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1614 if (!fcport) {
1615 QEDF_ERR(NULL, "fcport is NULL\n");
1619 /* Check that fcport is still offloaded */
1620 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1621 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1625 qedf = fcport->qedf;
1633 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1635 while (atomic_read(&fcport->ios_to_queue)) {
1638 atomic_read(&fcport->ios_to_queue));
1642 atomic_read(&fcport->ios_to_queue));
1652 "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1653 atomic_read(&fcport->num_active_ios), fcport,
1654 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
1659 set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1661 set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1662 fcport->lun_reset_lun = lun;
1670 if (!io_req->fcport)
1688 if (io_req->fcport != fcport)
1693 * Workaround: Within qedf_send_rrq, we check if the fcport is
1821 flush_cnt, atomic_read(&fcport->num_active_ios));
1823 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1825 while (atomic_read(&fcport->num_active_ios)) {
1829 atomic_read(&fcport->num_active_ios),
1835 atomic_read(&fcport->num_active_ios));
1838 if (io_req->fcport &&
1839 io_req->fcport == fcport) {
1861 clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1862 clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1874 struct qedf_rport *fcport = io_req->fcport;
1885 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1891 qedf = fcport->qedf;
1892 rdata = fcport->rdata;
1915 if (!atomic_read(&fcport->free_sqes)) {
1921 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1922 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1927 spin_lock_irqsave(&fcport->rport_lock, flags);
1935 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1941 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1959 spin_lock_irqsave(&fcport->rport_lock, flags);
1961 sqe_idx = qedf_get_sqe_idx(fcport);
1962 sqe = &fcport->sq[sqe_idx];
1967 qedf_ring_doorbell(fcport);
1969 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1982 struct qedf_rport *fcport = io_req->fcport;
1993 if (!fcport) {
1995 "Dropping ABTS completion xid=0x%x as fcport is NULL",
2004 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
2005 test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
2007 "Dropping ABTS completion xid=0x%x as fcport is flushing",
2070 struct qedf_ctx *qedf = io_req->fcport->qedf;
2172 struct qedf_rport *fcport;
2181 fcport = io_req->fcport;
2182 if (!fcport) {
2183 QEDF_ERR(NULL, "fcport is NULL.\n");
2188 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2193 qedf = fcport->qedf;
2214 if (!atomic_read(&fcport->free_sqes)) {
2232 "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
2234 refcount, fcport, fcport->rdata->ids.port_id);
2237 spin_lock_irqsave(&fcport->rport_lock, flags);
2239 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2244 spin_lock_irqsave(&fcport->rport_lock, flags);
2246 sqe_idx = qedf_get_sqe_idx(fcport);
2247 sqe = &fcport->sq[sqe_idx];
2252 qedf_ring_doorbell(fcport);
2254 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2310 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2315 struct qedf_ctx *qedf = fcport->qedf;
2331 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2332 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2337 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2351 io_req->fcport = fcport;
2376 spin_lock_irqsave(&fcport->rport_lock, flags);
2378 sqe_idx = qedf_get_sqe_idx(fcport);
2379 sqe = &fcport->sq[sqe_idx];
2382 qedf_init_task(fcport, lport, io_req, task, sqe);
2383 qedf_ring_doorbell(fcport);
2385 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2405 * Double check that fcport has not gone into an uploading state before
2408 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2410 "fcport is uploading, not executing flush.\n");
2418 qedf_flush_active_ios(fcport, lun);
2420 qedf_flush_active_ios(fcport, -1);
2437 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2444 struct fc_rport_priv *rdata = fcport->rdata;
2479 if (!fcport) {
2485 qedf = fcport->qedf;
2493 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2511 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2512 if (!fcport->rdata)
2513 QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
2514 fcport);
2517 "fcport %p port_id=%06x is uploading.\n",
2518 fcport, fcport->rdata->ids.port_id);
2523 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);