Lines Matching refs:rport

350 	struct fc_rport *rport = starget_to_rport(starget);
357 if (rport) {
358 fc_starget_node_name(starget) = rport->node_name;
359 fc_starget_port_name(starget) = rport->port_name;
360 fc_starget_port_id(starget) = rport->port_id;
695 struct fc_rport *rport = transport_class_to_rport(dev); \
696 struct Scsi_Host *shost = rport_to_shost(rport); \
699 !((rport->port_state == FC_PORTSTATE_BLOCKED) || \
700 (rport->port_state == FC_PORTSTATE_DELETED) || \
701 (rport->port_state == FC_PORTSTATE_NOTPRESENT))) \
702 i->f->get_rport_##field(rport); \
703 return snprintf(buf, sz, format_string, cast rport->field); \
713 struct fc_rport *rport = transport_class_to_rport(dev); \
714 struct Scsi_Host *shost = rport_to_shost(rport); \
717 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \
718 (rport->port_state == FC_PORTSTATE_DELETED) || \
719 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \
724 i->f->set_rport_##field(rport, val); \
730 static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
735 static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
741 static FC_DEVICE_ATTR(rport, field, S_IRUGO | S_IWUSR, \
751 struct fc_rport *rport = transport_class_to_rport(dev); \
752 return snprintf(buf, sz, format_string, cast rport->field); \
757 static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
762 static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
771 struct fc_rport *rport = transport_class_to_rport(dev); \
773 name = get_fc_##title##_name(rport->title); \
778 static FC_DEVICE_ATTR(rport, title, S_IRUGO, \
825 struct fc_rport *rport = transport_class_to_rport(dev);
826 if (rport->supported_classes == FC_COS_UNSPECIFIED)
828 return get_fc_cos_names(rport->supported_classes, buf);
830 static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO,
854 static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport,
857 struct Scsi_Host *shost = rport_to_shost(rport);
860 if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
861 (rport->port_state == FC_PORTSTATE_DELETED) ||
862 (rport->port_state == FC_PORTSTATE_NOTPRESENT))
874 if (rport->fast_io_fail_tmo == -1 &&
878 i->f->set_rport_dev_loss_tmo(rport, val);
887 struct fc_rport *rport = transport_class_to_rport(dev);
895 rc = fc_rport_set_dev_loss_tmo(rport, val);
900 static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR,
914 struct fc_rport *rport = transport_class_to_rport(dev);
917 if ((rport->port_id != -1) &&
918 (rport->port_id & FC_WELLKNOWN_PORTID_MASK) ==
920 switch (rport->port_id & FC_WELLKNOWN_ROLE_MASK) {
935 if (rport->roles == FC_PORT_ROLE_UNKNOWN)
937 return get_fc_port_roles_names(rport->roles, buf);
940 static FC_DEVICE_ATTR(rport, roles, S_IRUGO,
953 struct fc_rport *rport = transport_class_to_rport(dev);
955 if (rport->fast_io_fail_tmo == -1)
957 return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo);
967 struct fc_rport *rport = transport_class_to_rport(dev);
969 if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
970 (rport->port_state == FC_PORTSTATE_DELETED) ||
971 (rport->port_state == FC_PORTSTATE_NOTPRESENT))
974 rport->fast_io_fail_tmo = -1;
983 if ((val >= rport->dev_loss_tmo) ||
987 rport->fast_io_fail_tmo = val;
991 static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
1013 struct fc_rport *rport = starget_to_rport(starget); \
1014 if (rport) \
1015 fc_starget_##field(starget) = rport->field; \
1602 struct fc_rport *rport;
1613 get_list_head_entry(rport,
1615 list_del(&rport->peers);
1616 rport->port_state = FC_PORTSTATE_DELETED;
1617 fc_queue_work(shost, &rport->rport_delete_work);
1657 struct fc_rport *rport;
1667 list_for_each_entry(rport, &fc_host->rports, peers)
1668 fc_rport_set_dev_loss_tmo(rport, val);
1983 struct fc_rport *rport = dev_to_rport(dev);
1985 kfree(rport);
2052 * rport is in a blocked state, typically due to a temporarily loss of
2060 * the underlying rport. If the rport is blocked, it returns
2064 * If the rport is not blocked, normal error handling continues.
2072 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
2074 if (rport->port_state == FC_PORTSTATE_BLOCKED)
2082 * Called by fc_user_scan to locate an rport on the shost that
2084 * on the rport.
2089 struct fc_rport *rport;
2094 list_for_each_entry(rport, &fc_host_rports(shost), peers) {
2095 if (rport->scsi_target_id == -1)
2098 if (rport->port_state != FC_PORTSTATE_ONLINE)
2101 if ((channel == rport->channel) &&
2102 (id == rport->scsi_target_id)) {
2104 scsi_scan_target(&rport->dev, channel, id, lun,
2115 * wants to place all target objects below the rport object. So this
2116 * routine must invoke the scsi_scan_target() routine with the rport
2414 struct fc_rport *rport = NULL, *next_rport = NULL;
2428 list_for_each_entry_safe(rport, next_rport,
2430 list_del(&rport->peers);
2431 rport->port_state = FC_PORTSTATE_DELETED;
2432 fc_queue_work(shost, &rport->rport_delete_work);
2435 list_for_each_entry_safe(rport, next_rport,
2437 list_del(&rport->peers);
2438 rport->port_state = FC_PORTSTATE_DELETED;
2439 fc_queue_work(shost, &rport->rport_delete_work);
2447 /* flush all stgt delete, and rport delete work items, then kill it */
2463 static void fc_terminate_rport_io(struct fc_rport *rport)
2465 struct Scsi_Host *shost = rport_to_shost(rport);
2468 /* Involve the LLDD if possible to terminate all io on the rport. */
2470 i->f->terminate_rport_io(rport);
2475 scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
2479 * fc_starget_delete - called to delete the scsi descendants of an rport
2487 struct fc_rport *rport =
2490 fc_terminate_rport_io(rport);
2491 scsi_remove_target(&rport->dev);
2496 * fc_rport_final_delete - finish rport termination and delete it.
2502 struct fc_rport *rport =
2504 struct device *dev = &rport->dev;
2505 struct Scsi_Host *shost = rport_to_shost(rport);
2510 fc_terminate_rport_io(rport);
2514 * that we can reclaim the rport scan work element.
2516 if (rport->flags & FC_RPORT_SCAN_PENDING)
2525 if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
2527 if (!cancel_delayed_work(&rport->fail_io_work))
2529 if (!cancel_delayed_work(&rport->dev_loss_work))
2531 cancel_work_sync(&rport->scan_work);
2533 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
2538 if (rport->scsi_target_id != -1)
2539 fc_starget_delete(&rport->stgt_delete_work);
2542 * Notify the driver that the rport is now dead. The LLDD will
2543 * also guarantee that any communication to the rport is terminated
2546 * rport for the binding.
2549 if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
2551 rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
2557 i->f->dev_loss_tmo_callbk(rport);
2559 fc_bsg_remove(rport->rqst_q);
2564 scsi_host_put(shost); /* for fc_host->rport list */
2588 struct fc_rport *rport;
2595 rport = kzalloc(size, GFP_KERNEL);
2596 if (unlikely(!rport)) {
2601 rport->maxframe_size = -1;
2602 rport->supported_classes = FC_COS_UNSPECIFIED;
2603 rport->dev_loss_tmo = fc_host->dev_loss_tmo;
2604 memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name));
2605 memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name));
2606 rport->port_id = ids->port_id;
2607 rport->roles = ids->roles;
2608 rport->port_state = FC_PORTSTATE_ONLINE;
2610 rport->dd_data = &rport[1];
2611 rport->channel = channel;
2612 rport->fast_io_fail_tmo = -1;
2614 INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
2615 INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
2616 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
2617 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
2618 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
2622 rport->number = fc_host->next_rport_number++;
2623 if ((rport->roles & FC_PORT_ROLE_FCP_TARGET) ||
2624 (rport->roles & FC_PORT_ROLE_FCP_DUMMY_INITIATOR))
2625 rport->scsi_target_id = fc_host->next_target_id++;
2627 rport->scsi_target_id = -1;
2628 list_add_tail(&rport->peers, &fc_host->rports);
2629 scsi_host_get(shost); /* for fc_host->rport list */
2633 dev = &rport->dev;
2637 dev_set_name(dev, "rport-%d:%d-%d",
2638 shost->host_no, channel, rport->number);
2649 fc_bsg_rportadd(shost, rport);
2652 if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
2654 rport->flags |= FC_RPORT_SCAN_PENDING;
2655 scsi_queue_work(shost, &rport->scan_work);
2658 return rport;
2663 list_del(&rport->peers);
2664 scsi_host_put(shost); /* for fc_host->rport list */
2667 kfree(rport);
2715 struct fc_rport *rport;
2723 * Search the list of "active" rports, for an rport that has been
2729 list_for_each_entry(rport, &fc_host->rports, peers) {
2731 if ((rport->port_state == FC_PORTSTATE_BLOCKED ||
2732 rport->port_state == FC_PORTSTATE_NOTPRESENT) &&
2733 (rport->channel == channel)) {
2738 if (rport->port_name == ids->port_name)
2742 if (rport->node_name == ids->node_name)
2746 if (rport->port_id == ids->port_id)
2753 memcpy(&rport->node_name, &ids->node_name,
2754 sizeof(rport->node_name));
2755 memcpy(&rport->port_name, &ids->port_name,
2756 sizeof(rport->port_name));
2757 rport->port_id = ids->port_id;
2759 rport->port_state = FC_PORTSTATE_ONLINE;
2760 rport->roles = ids->roles;
2765 memset(rport->dd_data, 0,
2770 * io terminate and rport timers, and
2785 if ((rport->scsi_target_id != -1) &&
2787 return rport;
2794 if (!cancel_delayed_work(&rport->fail_io_work))
2796 if (!cancel_delayed_work(&rport->dev_loss_work))
2801 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
2808 if (rport->scsi_target_id != -1) {
2809 scsi_target_unblock(&rport->dev,
2813 rport->flags |= FC_RPORT_SCAN_PENDING;
2815 &rport->scan_work);
2820 fc_bsg_goose_queue(rport);
2822 return rport;
2835 list_for_each_entry(rport, &fc_host->rport_bindings,
2837 if (rport->channel != channel)
2842 if (rport->port_name == ids->port_name)
2846 if (rport->node_name == ids->node_name)
2850 if (rport->port_id == ids->port_id)
2858 list_move_tail(&rport->peers, &fc_host->rports);
2864 memcpy(&rport->node_name, &ids->node_name,
2865 sizeof(rport->node_name));
2866 memcpy(&rport->port_name, &ids->port_name,
2867 sizeof(rport->port_name));
2868 rport->port_id = ids->port_id;
2869 rport->port_state = FC_PORTSTATE_ONLINE;
2870 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
2873 memset(rport->dd_data, 0,
2877 fc_remote_port_rolechg(rport, ids->roles);
2878 return rport;
2885 rport = fc_remote_port_create(shost, channel, ids);
2887 return rport;
2894 * @rport: The remote port that no longer exists
2917 * temporary blocked state. From the LLDD's perspective, the rport no
2945 fc_remote_port_delete(struct fc_rport *rport)
2947 struct Scsi_Host *shost = rport_to_shost(rport);
2948 unsigned long timeout = rport->dev_loss_tmo;
2954 * We do need to reclaim the rport scan work element, so eventually
2961 if (rport->port_state != FC_PORTSTATE_ONLINE) {
2968 * unconditionally just jump to deleting the rport.
2970 * and its not appropriate to just terminate the rport at the
2972 * send ELS traffic to re-validate the login. If the rport is
2976 * destroying an rport.
2979 rport->port_state = FC_PORTSTATE_BLOCKED;
2981 rport->flags |= FC_RPORT_DEVLOSS_PENDING;
2985 scsi_target_block(&rport->dev);
2988 if ((rport->fast_io_fail_tmo != -1) &&
2989 (rport->fast_io_fail_tmo < timeout))
2990 fc_queue_devloss_work(shost, &rport->fail_io_work,
2991 rport->fast_io_fail_tmo * HZ);
2994 fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ);
3000 * @rport: The remote port that changed.
3019 fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
3021 struct Scsi_Host *shost = rport_to_shost(rport);
3028 if (rport->scsi_target_id == -1) {
3029 rport->scsi_target_id = fc_host->next_target_id++;
3031 } else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET))
3035 rport->roles = roles;
3044 * Note: we know the rport exists and is in an online
3045 * state as the LLDD would not have had an rport
3052 if (!cancel_delayed_work(&rport->fail_io_work))
3054 if (!cancel_delayed_work(&rport->dev_loss_work))
3058 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
3066 scsi_target_unblock(&rport->dev, SDEV_RUNNING);
3069 rport->flags |= FC_RPORT_SCAN_PENDING;
3070 scsi_queue_work(shost, &rport->scan_work);
3078 * @work: rport target that failed to reappear in the allotted time.
3086 struct fc_rport *rport =
3088 struct Scsi_Host *shost = rport_to_shost(rport);
3096 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
3103 if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
3104 (rport->scsi_target_id != -1) &&
3105 !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
3106 dev_printk(KERN_ERR, &rport->dev,
3110 scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
3111 fc_queue_work(shost, &rport->stgt_delete_work);
3116 if (rport->port_state != FC_PORTSTATE_BLOCKED) {
3118 dev_printk(KERN_ERR, &rport->dev,
3120 " rport%s alone\n",
3121 (rport->scsi_target_id != -1) ? " and starget" : "");
3126 (rport->scsi_target_id == -1)) {
3127 list_del(&rport->peers);
3128 rport->port_state = FC_PORTSTATE_DELETED;
3129 dev_printk(KERN_ERR, &rport->dev,
3131 " rport%s\n",
3132 (rport->scsi_target_id != -1) ? " and starget" : "");
3133 fc_queue_work(shost, &rport->rport_delete_work);
3138 dev_printk(KERN_ERR, &rport->dev,
3142 list_move_tail(&rport->peers, &fc_host->rport_bindings);
3153 rport->maxframe_size = -1;
3154 rport->supported_classes = FC_COS_UNSPECIFIED;
3155 rport->roles = FC_PORT_ROLE_UNKNOWN;
3156 rport->port_state = FC_PORTSTATE_NOTPRESENT;
3157 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
3165 fc_terminate_rport_io(rport);
3169 if (rport->port_state == FC_PORTSTATE_NOTPRESENT) { /* still missing */
3174 rport->node_name = -1;
3175 rport->port_id = -1;
3178 rport->port_name = -1;
3179 rport->port_id = -1;
3182 rport->node_name = -1;
3183 rport->port_name = -1;
3194 rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
3195 fc_queue_work(shost, &rport->stgt_delete_work);
3203 * Notify the driver that the rport is now dead. The LLDD will
3204 * also guarantee that any communication to the rport is terminated
3209 i->f->dev_loss_tmo_callbk(rport);
3215 * @work: rport to terminate io on.
3223 struct fc_rport *rport =
3226 if (rport->port_state != FC_PORTSTATE_BLOCKED)
3229 rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT;
3230 fc_terminate_rport_io(rport);
3240 struct fc_rport *rport =
3242 struct Scsi_Host *shost = rport_to_shost(rport);
3246 if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
3247 (rport->roles & FC_PORT_ROLE_FCP_TARGET) &&
3249 scsi_scan_target(&rport->dev, rport->channel,
3250 rport->scsi_target_id, SCAN_WILD_CARD,
3255 rport->flags &= ~FC_RPORT_SCAN_PENDING;
3261 * @rport: Remote port that scsi_eh is trying to recover.
3273 int fc_block_rport(struct fc_rport *rport)
3275 struct Scsi_Host *shost = rport_to_shost(rport);
3279 while (rport->port_state == FC_PORTSTATE_BLOCKED &&
3280 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) {
3287 if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
3310 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
3312 if (WARN_ON_ONCE(!rport))
3315 return fc_block_rport(rport);
3560 struct fc_rport *rport = fc_bsg_to_rport(job);
3564 if (rport && rport->port_state == FC_PORTSTATE_BLOCKED)
3588 * @shost: scsi host rport attached to
3667 * fc_bsg_goose_queue - restart rport queue in case it was stopped
3668 * @rport: rport to be restarted
3671 fc_bsg_goose_queue(struct fc_rport *rport)
3673 struct request_queue *q = rport->rqst_q;
3680 * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
3681 * @shost: scsi host rport attached to
3698 /* Validate the rport command */
3744 static blk_status_t fc_bsg_rport_prep(struct fc_rport *rport)
3746 if (rport->port_state == FC_PORTSTATE_BLOCKED &&
3747 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
3750 if (rport->port_state != FC_PORTSTATE_ONLINE)
3759 struct fc_rport *rport = fc_bsg_to_rport(job);
3762 ret = fc_bsg_rport_prep(rport);
3812 * @shost: shost that rport is attached to
3813 * @rport: rport that the bsg hooks are being attached to
3816 fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
3818 struct device *dev = &rport->dev;
3822 rport->rqst_q = NULL;
3835 rport->rqst_q = q;