Lines Matching defs:res
1094 * @res: resource entry struct
1100 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1104 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1107 res->needs_sync_complete = 0;
1108 res->in_erp = 0;
1109 res->add_to_ml = 0;
1110 res->del_from_ml = 0;
1111 res->resetting_device = 0;
1112 res->reset_occurred = 0;
1113 res->sdev = NULL;
1116 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1117 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1118 res->qmodel = IPR_QUEUEING_MODEL64(res);
1119 res->type = cfgtew->u.cfgte64->res_type;
1121 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1122 sizeof(res->res_path));
1124 res->bus = 0;
1125 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1126 sizeof(res->dev_lun.scsi_lun));
1127 res->lun = scsilun_to_int(&res->dev_lun);
1129 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1133 res->target = gscsi_res->target;
1138 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1140 set_bit(res->target, ioa_cfg->target_ids);
1142 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1143 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1144 res->target = 0;
1145 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1146 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1147 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1149 set_bit(res->target, ioa_cfg->array_ids);
1150 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1151 res->bus = IPR_VSET_VIRTUAL_BUS;
1152 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1154 set_bit(res->target, ioa_cfg->vset_ids);
1156 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1158 set_bit(res->target, ioa_cfg->target_ids);
1161 res->qmodel = IPR_QUEUEING_MODEL(res);
1162 res->flags = cfgtew->u.cfgte->flags;
1163 if (res->flags & IPR_IS_IOA_RESOURCE)
1164 res->type = IPR_RES_TYPE_IOAFP;
1166 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1168 res->bus = cfgtew->u.cfgte->res_addr.bus;
1169 res->target = cfgtew->u.cfgte->res_addr.target;
1170 res->lun = cfgtew->u.cfgte->res_addr.lun;
1171 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1177 * @res: resource entry struct
1183 static int ipr_is_same_device(struct ipr_resource_entry *res,
1186 if (res->ioa_cfg->sis64) {
1187 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1189 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1194 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1195 res->target == cfgtew->u.cfgte->res_addr.target &&
1196 res->lun == cfgtew->u.cfgte->res_addr.lun)
1248 * @res: resource entry struct
1254 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1260 if (res->ioa_cfg->sis64) {
1261 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1262 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1263 res->type = cfgtew->u.cfgte64->res_type;
1265 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1268 res->qmodel = IPR_QUEUEING_MODEL64(res);
1269 res->res_handle = cfgtew->u.cfgte64->res_handle;
1270 res->dev_id = cfgtew->u.cfgte64->dev_id;
1272 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1273 sizeof(res->dev_lun.scsi_lun));
1275 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1276 sizeof(res->res_path))) {
1277 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1278 sizeof(res->res_path));
1282 if (res->sdev && new_path)
1283 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1284 ipr_format_res_path(res->ioa_cfg,
1285 res->res_path, buffer, sizeof(buffer)));
1287 res->flags = cfgtew->u.cfgte->flags;
1288 if (res->flags & IPR_IS_IOA_RESOURCE)
1289 res->type = IPR_RES_TYPE_IOAFP;
1291 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1293 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1296 res->qmodel = IPR_QUEUEING_MODEL(res);
1297 res->res_handle = cfgtew->u.cfgte->res_handle;
1304 * @res: resource entry struct
1309 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1312 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1317 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1318 clear_bit(res->target, ioa_cfg->array_ids);
1319 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1320 clear_bit(res->target, ioa_cfg->vset_ids);
1321 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1323 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1325 clear_bit(res->target, ioa_cfg->target_ids);
1327 } else if (res->bus == 0)
1328 clear_bit(res->target, ioa_cfg->target_ids);
1342 struct ipr_resource_entry *res = NULL;
1356 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1357 if (res->res_handle == cc_res_handle) {
1371 res = list_entry(ioa_cfg->free_res_q.next,
1374 list_del(&res->queue);
1375 ipr_init_res_entry(res, &cfgtew);
1376 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1379 ipr_update_res_entry(res, &cfgtew);
1382 if (res->sdev) {
1383 res->del_from_ml = 1;
1384 res->res_handle = IPR_INVALID_RES_HANDLE;
1387 ipr_clear_res_target(res);
1388 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1390 } else if (!res->sdev || res->del_from_ml) {
1391 res->add_to_ml = 1;
2650 * @res: resource entry struct of SES
2656 ipr_find_ses_entry(struct ipr_resource_entry *res)
2665 vpids = &res->std_inq_data.vpids;
2695 struct ipr_resource_entry *res;
2700 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2701 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2704 if (bus != res->bus)
2707 if (!(ste = ipr_find_ses_entry(res)))
3238 struct ipr_resource_entry *res;
3256 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3257 if (res->del_from_ml && res->sdev) {
3259 sdev = res->sdev;
3261 if (!res->add_to_ml)
3262 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3264 res->del_from_ml = 0;
3275 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3276 if (res->add_to_ml) {
3277 bus = res->bus;
3278 target = res->target;
3279 lun = res->lun;
3280 res->add_to_ml = 0;
4427 struct ipr_resource_entry *res;
4432 res = (struct ipr_resource_entry *)sdev->hostdata;
4433 if (res)
4434 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4461 struct ipr_resource_entry *res;
4467 res = (struct ipr_resource_entry *)sdev->hostdata;
4468 if (res && ioa_cfg->sis64)
4470 __ipr_format_res_path(res->res_path, buffer,
4472 else if (res)
4474 res->bus, res->target, res->lun);
4501 struct ipr_resource_entry *res;
4506 res = (struct ipr_resource_entry *)sdev->hostdata;
4507 if (res && ioa_cfg->sis64)
4508 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4509 else if (res)
4510 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4537 struct ipr_resource_entry *res;
4542 res = (struct ipr_resource_entry *)sdev->hostdata;
4544 if (res)
4545 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4573 struct ipr_resource_entry *res;
4578 res = (struct ipr_resource_entry *)sdev->hostdata;
4579 if (res)
4580 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4603 struct ipr_resource_entry *res;
4608 res = (struct ipr_resource_entry *)sdev->hostdata;
4609 if (res) {
4610 if (ipr_is_af_dasd_device(res)) {
4611 res->raw_mode = simple_strtoul(buf, NULL, 10);
4613 if (res->sdev)
4614 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4615 res->raw_mode ? "enabled" : "disabled");
4690 struct ipr_resource_entry *res;
4692 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4693 if ((res->bus == starget->channel) &&
4694 (res->target == starget->id)) {
4695 return res;
4734 struct ipr_resource_entry *res;
4736 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4737 if ((res->bus == sdev->channel) &&
4738 (res->target == sdev->id) &&
4739 (res->lun == sdev->lun))
4740 return res;
4755 struct ipr_resource_entry *res;
4762 res = (struct ipr_resource_entry *) sdev->hostdata;
4763 if (res) {
4765 res->sdev = NULL;
4782 struct ipr_resource_entry *res;
4787 res = sdev->hostdata;
4788 if (res) {
4789 if (ipr_is_af_dasd_device(res))
4791 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4795 if (ipr_is_vset_device(res)) {
4807 res->res_path, buffer, sizeof(buffer)));
4829 struct ipr_resource_entry *res;
4837 res = ipr_find_sdev(sdev);
4838 if (res) {
4839 res->sdev = sdev;
4840 res->add_to_ml = 0;
4841 res->in_erp = 0;
4842 sdev->hostdata = res;
4843 if (!ipr_is_naca_model(res))
4844 res->needs_sync_complete = 1;
4846 if (ipr_is_gata(res)) {
5000 * @res: resource entry struct
5011 struct ipr_resource_entry *res)
5026 ioarcb->res_handle = res->res_handle;
5052 struct ipr_resource_entry *res;
5057 res = scsi_cmd->device->hostdata;
5069 res->resetting_device = 1;
5072 rc = ipr_device_reset(ioa_cfg, res);
5073 res->resetting_device = 0;
5074 res->reset_occurred = 1;
5084 struct ipr_resource_entry *res;
5087 res = cmd->device->hostdata;
5089 if (!res)
5114 struct ipr_resource_entry *res;
5118 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5119 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5120 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5192 struct ipr_resource_entry *res;
5200 res = scsi_cmd->device->hostdata;
5209 if (!res)
5219 if (!ipr_is_gscsi(res))
5239 ipr_cmd->ioarcb.res_handle = res->res_handle;
5260 if (!ipr_is_naca_model(res))
5261 res->needs_sync_complete = 1;
5732 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5744 if (res) {
5745 if (!ipr_is_naca_model(res))
5746 res->needs_sync_complete = 1;
5747 res->in_erp = 0;
5878 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5881 res->in_erp = 1;
5902 * @res: resource entry struct
5912 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5940 if (!ipr_is_gscsi(res))
5947 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5977 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5988 if (ipr_is_vset_device(res) &&
6039 if (ipr_is_vset_device(res))
6100 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6104 if (!res) {
6109 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6112 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6116 if (ipr_is_naca_model(res))
6127 if (!ipr_is_naca_model(res))
6128 res->needs_sync_complete = 1;
6131 if (!res->in_erp)
6132 res->needs_sync_complete = 1;
6150 if (!res->resetting_device)
6153 if (!ipr_is_naca_model(res))
6154 res->needs_sync_complete = 1;
6160 if (!ipr_is_naca_model(res)) {
6166 if (!ipr_is_naca_model(res))
6167 res->needs_sync_complete = 1;
6172 if (res->raw_mode) {
6173 res->raw_mode = 0;
6181 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6182 res->needs_sync_complete = 1;
6246 struct ipr_resource_entry *res;
6257 res = scsi_cmd->device->hostdata;
6277 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6296 if (ipr_is_gscsi(res)) {
6300 if (res->reset_occurred) {
6301 res->reset_occurred = 0;
6306 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6317 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6320 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6348 ioarcb->res_handle = res->res_handle;
6349 if (res->needs_sync_complete) {
6351 res->needs_sync_complete = 0;
6354 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6506 struct ipr_resource_entry *res;
6520 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6521 if (res->add_to_ml || res->del_from_ml) {
6586 struct ipr_resource_entry *res = ipr_cmd->u.res;
6590 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6591 if (!ipr_is_scsi_disk(res))
6594 ipr_cmd->u.res = res;
6595 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6826 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6900 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7040 struct ipr_resource_entry *res, *temp;
7054 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7055 list_move_tail(&res->queue, &old_res);
7069 list_for_each_entry_safe(res, temp, &old_res, queue) {
7070 if (ipr_is_same_device(res, &cfgtew)) {
7071 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7084 res = list_entry(ioa_cfg->free_res_q.next,
7086 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7087 ipr_init_res_entry(res, &cfgtew);
7088 res->add_to_ml = 1;
7089 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7090 res->sdev->allow_restart = 1;
7093 ipr_update_res_entry(res, &cfgtew);
7096 list_for_each_entry_safe(res, temp, &old_res, queue) {
7097 if (res->sdev) {
7098 res->del_from_ml = 1;
7099 res->res_handle = IPR_INVALID_RES_HANDLE;
7100 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7104 list_for_each_entry_safe(res, temp, &old_res, queue) {
7105 ipr_clear_res_target(res);
7106 list_move_tail(&res->queue, &ioa_cfg->free_res_q);