1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008 10 * Author.........: Nigel Hislop <hislop_nigel@emc.com> 11 */ 12 13#define KMSG_COMPONENT "dasd-eckd" 14 15#include <linux/stddef.h> 16#include <linux/kernel.h> 17#include <linux/slab.h> 18#include <linux/hdreg.h> /* HDIO_GETGEO */ 19#include <linux/bio.h> 20#include <linux/module.h> 21#include <linux/compat.h> 22#include <linux/init.h> 23#include <linux/seq_file.h> 24 25#include <asm/css_chars.h> 26#include <asm/debug.h> 27#include <asm/idals.h> 28#include <asm/ebcdic.h> 29#include <asm/io.h> 30#include <linux/uaccess.h> 31#include <asm/cio.h> 32#include <asm/ccwdev.h> 33#include <asm/itcw.h> 34#include <asm/schid.h> 35#include <asm/chpid.h> 36 37#include "dasd_int.h" 38#include "dasd_eckd.h" 39 40#ifdef PRINTK_HEADER 41#undef PRINTK_HEADER 42#endif /* PRINTK_HEADER */ 43#define PRINTK_HEADER "dasd(eckd):" 44 45/* 46 * raw track access always map to 64k in memory 47 * so it maps to 16 blocks of 4k per track 48 */ 49#define DASD_RAW_BLOCK_PER_TRACK 16 50#define DASD_RAW_BLOCKSIZE 4096 51/* 64k are 128 x 512 byte sectors */ 52#define DASD_RAW_SECTORS_PER_TRACK 128 53 54MODULE_LICENSE("GPL"); 55 56static struct dasd_discipline dasd_eckd_discipline; 57 58/* The ccw bus type uses this table to find devices that it sends to 59 * dasd_eckd_probe */ 60static struct ccw_device_id dasd_eckd_ids[] = { 61 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, 62 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, 63 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3}, 64 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, 65 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, 66 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, 67 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7}, 68 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8}, 69 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9}, 70 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa}, 71 { /* end of list */ }, 72}; 73 74MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids); 75 76static struct ccw_driver dasd_eckd_driver; /* see below */ 77 78static void *rawpadpage; 79 80#define INIT_CQR_OK 0 81#define INIT_CQR_UNFORMATTED 1 82#define INIT_CQR_ERROR 2 83 84/* emergency request for reserve/release */ 85static struct { 86 struct dasd_ccw_req cqr; 87 struct ccw1 ccw; 88 char data[32]; 89} *dasd_reserve_req; 90static DEFINE_MUTEX(dasd_reserve_mutex); 91 92static struct { 93 struct dasd_ccw_req cqr; 94 struct ccw1 ccw[2]; 95 char data[40]; 96} *dasd_vol_info_req; 97static DEFINE_MUTEX(dasd_vol_info_mutex); 98 99struct ext_pool_exhaust_work_data { 100 struct work_struct worker; 101 struct dasd_device *device; 102 struct dasd_device *base; 103}; 104 105/* definitions for the path verification worker */ 106struct pe_handler_work_data { 107 struct work_struct worker; 108 struct dasd_device *device; 109 struct dasd_ccw_req cqr; 110 struct ccw1 ccw; 111 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE]; 112 int isglobal; 113 __u8 tbvpm; 114}; 115static struct pe_handler_work_data *pe_handler_worker; 116static DEFINE_MUTEX(dasd_pe_handler_mutex); 117 118struct check_attention_work_data { 119 struct work_struct worker; 120 struct dasd_device *device; 121 __u8 lpum; 122}; 123 124static int dasd_eckd_ext_pool_id(struct dasd_device *); 125static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int, 126 struct dasd_device *, struct dasd_device *, 127 unsigned int, int, unsigned int, unsigned int, 128 unsigned int, unsigned int); 129 130/* initial attempt at a probe function. this can be simplified once 131 * the other detection code is gone */ 132static int 133dasd_eckd_probe (struct ccw_device *cdev) 134{ 135 int ret; 136 137 /* set ECKD specific ccw-device options */ 138 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE | 139 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH); 140 if (ret) { 141 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 142 "dasd_eckd_probe: could not set " 143 "ccw-device options"); 144 return ret; 145 } 146 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline); 147 return ret; 148} 149 150static int 151dasd_eckd_set_online(struct ccw_device *cdev) 152{ 153 return dasd_generic_set_online(cdev, &dasd_eckd_discipline); 154} 155 156static const int sizes_trk0[] = { 28, 148, 84 }; 157#define LABEL_SIZE 140 158 159/* head and record addresses of count_area read in analysis ccw */ 160static const int count_area_head[] = { 0, 0, 0, 0, 1 }; 161static const int count_area_rec[] = { 1, 2, 3, 4, 1 }; 162 163static inline unsigned int 164ceil_quot(unsigned int d1, unsigned int d2) 165{ 166 return (d1 + (d2 - 1)) / d2; 167} 168 169static unsigned int 170recs_per_track(struct dasd_eckd_characteristics * rdc, 171 unsigned int kl, unsigned int dl) 172{ 173 int dn, kn; 174 175 switch (rdc->dev_type) { 176 case 0x3380: 177 if (kl) 178 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) + 179 ceil_quot(dl + 12, 32)); 180 else 181 return 1499 / (15 + ceil_quot(dl + 12, 32)); 182 case 0x3390: 183 dn = ceil_quot(dl + 6, 232) + 1; 184 if (kl) { 185 kn = ceil_quot(kl + 6, 232) + 1; 186 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) + 187 9 + ceil_quot(dl + 6 * dn, 34)); 188 } else 189 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34)); 190 case 0x9345: 191 dn = ceil_quot(dl + 6, 232) + 1; 192 if (kl) { 193 kn = ceil_quot(kl + 6, 232) + 1; 194 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) + 195 ceil_quot(dl + 6 * dn, 34)); 196 } else 197 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34)); 198 } 199 return 0; 200} 201 202static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head) 203{ 204 geo->cyl = (__u16) cyl; 205 geo->head = cyl >> 16; 206 geo->head <<= 4; 207 geo->head |= head; 208} 209 210/* 211 * calculate failing track from sense data depending if 212 * it is an EAV device or not 213 */ 214static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device, 215 sector_t *track) 216{ 217 struct dasd_eckd_private *private = device->private; 218 u8 *sense = NULL; 219 u32 cyl; 220 u8 head; 221 222 sense = dasd_get_sense(irb); 223 if (!sense) { 224 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 225 "ESE error no sense data\n"); 226 return -EINVAL; 227 } 228 if (!(sense[27] & DASD_SENSE_BIT_2)) { 229 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 230 "ESE error no valid track data\n"); 231 return -EINVAL; 232 } 233 234 if (sense[27] & DASD_SENSE_BIT_3) { 235 /* enhanced addressing */ 236 cyl = sense[30] << 20; 237 cyl |= (sense[31] & 0xF0) << 12; 238 cyl |= sense[28] << 8; 239 cyl |= sense[29]; 240 } else { 241 cyl = sense[29] << 8; 242 cyl |= sense[30]; 243 } 244 head = sense[31] & 0x0F; 245 *track = cyl * private->rdc_data.trk_per_cyl + head; 246 return 0; 247} 248 249static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data, 250 struct dasd_device *device) 251{ 252 struct dasd_eckd_private *private = device->private; 253 int rc; 254 255 rc = get_phys_clock(&data->ep_sys_time); 256 /* 257 * Ignore return code if XRC is not supported or 258 * sync clock is switched off 259 */ 260 if ((rc && !private->rdc_data.facilities.XRC_supported) || 261 rc == -EOPNOTSUPP || rc == -EACCES) 262 return 0; 263 264 /* switch on System Time Stamp - needed for XRC Support */ 265 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ 266 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ 267 268 if (ccw) { 269 ccw->count = sizeof(struct DE_eckd_data); 270 ccw->flags |= CCW_FLAG_SLI; 271 } 272 273 return rc; 274} 275 276static int 277define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk, 278 unsigned int totrk, int cmd, struct dasd_device *device, 279 int blksize) 280{ 281 struct dasd_eckd_private *private = device->private; 282 u16 heads, beghead, endhead; 283 u32 begcyl, endcyl; 284 int rc = 0; 285 286 if (ccw) { 287 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; 288 ccw->flags = 0; 289 ccw->count = 16; 290 ccw->cda = (__u32)__pa(data); 291 } 292 293 memset(data, 0, sizeof(struct DE_eckd_data)); 294 switch (cmd) { 295 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 296 case DASD_ECKD_CCW_READ_RECORD_ZERO: 297 case DASD_ECKD_CCW_READ: 298 case DASD_ECKD_CCW_READ_MT: 299 case DASD_ECKD_CCW_READ_CKD: 300 case DASD_ECKD_CCW_READ_CKD_MT: 301 case DASD_ECKD_CCW_READ_KD: 302 case DASD_ECKD_CCW_READ_KD_MT: 303 data->mask.perm = 0x1; 304 data->attributes.operation = private->attrib.operation; 305 break; 306 case DASD_ECKD_CCW_READ_COUNT: 307 data->mask.perm = 0x1; 308 data->attributes.operation = DASD_BYPASS_CACHE; 309 break; 310 case DASD_ECKD_CCW_READ_TRACK: 311 case DASD_ECKD_CCW_READ_TRACK_DATA: 312 data->mask.perm = 0x1; 313 data->attributes.operation = private->attrib.operation; 314 data->blk_size = 0; 315 break; 316 case DASD_ECKD_CCW_WRITE: 317 case DASD_ECKD_CCW_WRITE_MT: 318 case DASD_ECKD_CCW_WRITE_KD: 319 case DASD_ECKD_CCW_WRITE_KD_MT: 320 data->mask.perm = 0x02; 321 data->attributes.operation = private->attrib.operation; 322 rc = set_timestamp(ccw, data, device); 323 break; 324 case DASD_ECKD_CCW_WRITE_CKD: 325 case DASD_ECKD_CCW_WRITE_CKD_MT: 326 data->attributes.operation = DASD_BYPASS_CACHE; 327 rc = set_timestamp(ccw, data, device); 328 break; 329 case DASD_ECKD_CCW_ERASE: 330 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 331 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 332 data->mask.perm = 0x3; 333 data->mask.auth = 0x1; 334 data->attributes.operation = DASD_BYPASS_CACHE; 335 rc = set_timestamp(ccw, data, device); 336 break; 337 case DASD_ECKD_CCW_WRITE_FULL_TRACK: 338 data->mask.perm = 0x03; 339 data->attributes.operation = private->attrib.operation; 340 data->blk_size = 0; 341 break; 342 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 343 data->mask.perm = 0x02; 344 data->attributes.operation = private->attrib.operation; 345 data->blk_size = blksize; 346 rc = set_timestamp(ccw, data, device); 347 break; 348 default: 349 dev_err(&device->cdev->dev, 350 "0x%x is not a known command\n", cmd); 351 break; 352 } 353 354 data->attributes.mode = 0x3; /* ECKD */ 355 356 if ((private->rdc_data.cu_type == 0x2105 || 357 private->rdc_data.cu_type == 0x2107 || 358 private->rdc_data.cu_type == 0x1750) 359 && !(private->uses_cdl && trk < 2)) 360 data->ga_extended |= 0x40; /* Regular Data Format Mode */ 361 362 heads = private->rdc_data.trk_per_cyl; 363 begcyl = trk / heads; 364 beghead = trk % heads; 365 endcyl = totrk / heads; 366 endhead = totrk % heads; 367 368 /* check for sequential prestage - enhance cylinder range */ 369 if (data->attributes.operation == DASD_SEQ_PRESTAGE || 370 data->attributes.operation == DASD_SEQ_ACCESS) { 371 372 if (endcyl + private->attrib.nr_cyl < private->real_cyl) 373 endcyl += private->attrib.nr_cyl; 374 else 375 endcyl = (private->real_cyl - 1); 376 } 377 378 set_ch_t(&data->beg_ext, begcyl, beghead); 379 set_ch_t(&data->end_ext, endcyl, endhead); 380 return rc; 381} 382 383 384static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data, 385 unsigned int trk, unsigned int rec_on_trk, 386 int count, int cmd, struct dasd_device *device, 387 unsigned int reclen, unsigned int tlf) 388{ 389 struct dasd_eckd_private *private = device->private; 390 int sector; 391 int dn, d; 392 393 if (ccw) { 394 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT; 395 ccw->flags = 0; 396 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) 397 ccw->count = 22; 398 else 399 ccw->count = 20; 400 ccw->cda = (__u32)__pa(data); 401 } 402 403 memset(data, 0, sizeof(*data)); 404 sector = 0; 405 if (rec_on_trk) { 406 switch (private->rdc_data.dev_type) { 407 case 0x3390: 408 dn = ceil_quot(reclen + 6, 232); 409 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 410 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 411 break; 412 case 0x3380: 413 d = 7 + ceil_quot(reclen + 12, 32); 414 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 415 break; 416 } 417 } 418 data->sector = sector; 419 /* note: meaning of count depends on the operation 420 * for record based I/O it's the number of records, but for 421 * track based I/O it's the number of tracks 422 */ 423 data->count = count; 424 switch (cmd) { 425 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 426 data->operation.orientation = 0x3; 427 data->operation.operation = 0x03; 428 break; 429 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 430 data->operation.orientation = 0x3; 431 data->operation.operation = 0x16; 432 break; 433 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 434 data->operation.orientation = 0x1; 435 data->operation.operation = 0x03; 436 data->count++; 437 break; 438 case DASD_ECKD_CCW_READ_RECORD_ZERO: 439 data->operation.orientation = 0x3; 440 data->operation.operation = 0x16; 441 data->count++; 442 break; 443 case DASD_ECKD_CCW_WRITE: 444 case DASD_ECKD_CCW_WRITE_MT: 445 case DASD_ECKD_CCW_WRITE_KD: 446 case DASD_ECKD_CCW_WRITE_KD_MT: 447 data->auxiliary.length_valid = 0x1; 448 data->length = reclen; 449 data->operation.operation = 0x01; 450 break; 451 case DASD_ECKD_CCW_WRITE_CKD: 452 case DASD_ECKD_CCW_WRITE_CKD_MT: 453 data->auxiliary.length_valid = 0x1; 454 data->length = reclen; 455 data->operation.operation = 0x03; 456 break; 457 case DASD_ECKD_CCW_WRITE_FULL_TRACK: 458 data->operation.orientation = 0x0; 459 data->operation.operation = 0x3F; 460 data->extended_operation = 0x11; 461 data->length = 0; 462 data->extended_parameter_length = 0x02; 463 if (data->count > 8) { 464 data->extended_parameter[0] = 0xFF; 465 data->extended_parameter[1] = 0xFF; 466 data->extended_parameter[1] <<= (16 - count); 467 } else { 468 data->extended_parameter[0] = 0xFF; 469 data->extended_parameter[0] <<= (8 - count); 470 data->extended_parameter[1] = 0x00; 471 } 472 data->sector = 0xFF; 473 break; 474 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 475 data->auxiliary.length_valid = 0x1; 476 data->length = reclen; /* not tlf, as one might think */ 477 data->operation.operation = 0x3F; 478 data->extended_operation = 0x23; 479 break; 480 case DASD_ECKD_CCW_READ: 481 case DASD_ECKD_CCW_READ_MT: 482 case DASD_ECKD_CCW_READ_KD: 483 case DASD_ECKD_CCW_READ_KD_MT: 484 data->auxiliary.length_valid = 0x1; 485 data->length = reclen; 486 data->operation.operation = 0x06; 487 break; 488 case DASD_ECKD_CCW_READ_CKD: 489 case DASD_ECKD_CCW_READ_CKD_MT: 490 data->auxiliary.length_valid = 0x1; 491 data->length = reclen; 492 data->operation.operation = 0x16; 493 break; 494 case DASD_ECKD_CCW_READ_COUNT: 495 data->operation.operation = 0x06; 496 break; 497 case DASD_ECKD_CCW_READ_TRACK: 498 data->operation.orientation = 0x1; 499 data->operation.operation = 0x0C; 500 data->extended_parameter_length = 0; 501 data->sector = 0xFF; 502 break; 503 case DASD_ECKD_CCW_READ_TRACK_DATA: 504 data->auxiliary.length_valid = 0x1; 505 data->length = tlf; 506 data->operation.operation = 0x0C; 507 break; 508 case DASD_ECKD_CCW_ERASE: 509 data->length = reclen; 510 data->auxiliary.length_valid = 0x1; 511 data->operation.operation = 0x0b; 512 break; 513 default: 514 DBF_DEV_EVENT(DBF_ERR, device, 515 "fill LRE unknown opcode 0x%x", cmd); 516 BUG(); 517 } 518 set_ch_t(&data->seek_addr, 519 trk / private->rdc_data.trk_per_cyl, 520 trk % private->rdc_data.trk_per_cyl); 521 data->search_arg.cyl = data->seek_addr.cyl; 522 data->search_arg.head = data->seek_addr.head; 523 data->search_arg.record = rec_on_trk; 524} 525 526static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 527 unsigned int trk, unsigned int totrk, int cmd, 528 struct dasd_device *basedev, struct dasd_device *startdev, 529 unsigned int format, unsigned int rec_on_trk, int count, 530 unsigned int blksize, unsigned int tlf) 531{ 532 struct dasd_eckd_private *basepriv, *startpriv; 533 struct LRE_eckd_data *lredata; 534 struct DE_eckd_data *dedata; 535 int rc = 0; 536 537 basepriv = basedev->private; 538 startpriv = startdev->private; 539 dedata = &pfxdata->define_extent; 540 lredata = &pfxdata->locate_record; 541 542 ccw->cmd_code = DASD_ECKD_CCW_PFX; 543 ccw->flags = 0; 544 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) { 545 ccw->count = sizeof(*pfxdata) + 2; 546 ccw->cda = (__u32) __pa(pfxdata); 547 memset(pfxdata, 0, sizeof(*pfxdata) + 2); 548 } else { 549 ccw->count = sizeof(*pfxdata); 550 ccw->cda = (__u32) __pa(pfxdata); 551 memset(pfxdata, 0, sizeof(*pfxdata)); 552 } 553 554 /* prefix data */ 555 if (format > 1) { 556 DBF_DEV_EVENT(DBF_ERR, basedev, 557 "PFX LRE unknown format 0x%x", format); 558 BUG(); 559 return -EINVAL; 560 } 561 pfxdata->format = format; 562 pfxdata->base_address = basepriv->ned->unit_addr; 563 pfxdata->base_lss = basepriv->ned->ID; 564 pfxdata->validity.define_extent = 1; 565 566 /* private uid is kept up to date, conf_data may be outdated */ 567 if (startpriv->uid.type == UA_BASE_PAV_ALIAS) 568 pfxdata->validity.verify_base = 1; 569 570 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) { 571 pfxdata->validity.verify_base = 1; 572 pfxdata->validity.hyper_pav = 1; 573 } 574 575 rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize); 576 577 /* 578 * For some commands the System Time Stamp is set in the define extent 579 * data when XRC is supported. The validity of the time stamp must be 580 * reflected in the prefix data as well. 581 */ 582 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02) 583 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */ 584 585 if (format == 1) { 586 locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd, 587 basedev, blksize, tlf); 588 } 589 590 return rc; 591} 592 593static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 594 unsigned int trk, unsigned int totrk, int cmd, 595 struct dasd_device *basedev, struct dasd_device *startdev) 596{ 597 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev, 598 0, 0, 0, 0, 0); 599} 600 601static void 602locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk, 603 unsigned int rec_on_trk, int no_rec, int cmd, 604 struct dasd_device * device, int reclen) 605{ 606 struct dasd_eckd_private *private = device->private; 607 int sector; 608 int dn, d; 609 610 DBF_DEV_EVENT(DBF_INFO, device, 611 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d", 612 trk, rec_on_trk, no_rec, cmd, reclen); 613 614 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; 615 ccw->flags = 0; 616 ccw->count = 16; 617 ccw->cda = (__u32) __pa(data); 618 619 memset(data, 0, sizeof(struct LO_eckd_data)); 620 sector = 0; 621 if (rec_on_trk) { 622 switch (private->rdc_data.dev_type) { 623 case 0x3390: 624 dn = ceil_quot(reclen + 6, 232); 625 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 626 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 627 break; 628 case 0x3380: 629 d = 7 + ceil_quot(reclen + 12, 32); 630 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 631 break; 632 } 633 } 634 data->sector = sector; 635 data->count = no_rec; 636 switch (cmd) { 637 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 638 data->operation.orientation = 0x3; 639 data->operation.operation = 0x03; 640 break; 641 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 642 data->operation.orientation = 0x3; 643 data->operation.operation = 0x16; 644 break; 645 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 646 data->operation.orientation = 0x1; 647 data->operation.operation = 0x03; 648 data->count++; 649 break; 650 case DASD_ECKD_CCW_READ_RECORD_ZERO: 651 data->operation.orientation = 0x3; 652 data->operation.operation = 0x16; 653 data->count++; 654 break; 655 case DASD_ECKD_CCW_WRITE: 656 case DASD_ECKD_CCW_WRITE_MT: 657 case DASD_ECKD_CCW_WRITE_KD: 658 case DASD_ECKD_CCW_WRITE_KD_MT: 659 data->auxiliary.last_bytes_used = 0x1; 660 data->length = reclen; 661 data->operation.operation = 0x01; 662 break; 663 case DASD_ECKD_CCW_WRITE_CKD: 664 case DASD_ECKD_CCW_WRITE_CKD_MT: 665 data->auxiliary.last_bytes_used = 0x1; 666 data->length = reclen; 667 data->operation.operation = 0x03; 668 break; 669 case DASD_ECKD_CCW_READ: 670 case DASD_ECKD_CCW_READ_MT: 671 case DASD_ECKD_CCW_READ_KD: 672 case DASD_ECKD_CCW_READ_KD_MT: 673 data->auxiliary.last_bytes_used = 0x1; 674 data->length = reclen; 675 data->operation.operation = 0x06; 676 break; 677 case DASD_ECKD_CCW_READ_CKD: 678 case DASD_ECKD_CCW_READ_CKD_MT: 679 data->auxiliary.last_bytes_used = 0x1; 680 data->length = reclen; 681 data->operation.operation = 0x16; 682 break; 683 case DASD_ECKD_CCW_READ_COUNT: 684 data->operation.operation = 0x06; 685 break; 686 case DASD_ECKD_CCW_ERASE: 687 data->length = reclen; 688 data->auxiliary.last_bytes_used = 0x1; 689 data->operation.operation = 0x0b; 690 break; 691 default: 692 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record " 693 "opcode 0x%x", cmd); 694 } 695 set_ch_t(&data->seek_addr, 696 trk / private->rdc_data.trk_per_cyl, 697 trk % private->rdc_data.trk_per_cyl); 698 data->search_arg.cyl = data->seek_addr.cyl; 699 data->search_arg.head = data->seek_addr.head; 700 data->search_arg.record = rec_on_trk; 701} 702 703/* 704 * Returns 1 if the block is one of the special blocks that needs 705 * to get read/written with the KD variant of the command. 706 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and 707 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT. 708 * Luckily the KD variants differ only by one bit (0x08) from the 709 * normal variant. So don't wonder about code like: 710 * if (dasd_eckd_cdl_special(blk_per_trk, recid)) 711 * ccw->cmd_code |= 0x8; 712 */ 713static inline int 714dasd_eckd_cdl_special(int blk_per_trk, int recid) 715{ 716 if (recid < 3) 717 return 1; 718 if (recid < blk_per_trk) 719 return 0; 720 if (recid < 2 * blk_per_trk) 721 return 1; 722 return 0; 723} 724 725/* 726 * Returns the record size for the special blocks of the cdl format. 727 * Only returns something useful if dasd_eckd_cdl_special is true 728 * for the recid. 729 */ 730static inline int 731dasd_eckd_cdl_reclen(int recid) 732{ 733 if (recid < 3) 734 return sizes_trk0[recid]; 735 return LABEL_SIZE; 736} 737/* create unique id from private structure. */ 738static void create_uid(struct dasd_eckd_private *private) 739{ 740 int count; 741 struct dasd_uid *uid; 742 743 uid = &private->uid; 744 memset(uid, 0, sizeof(struct dasd_uid)); 745 memcpy(uid->vendor, private->ned->HDA_manufacturer, 746 sizeof(uid->vendor) - 1); 747 EBCASC(uid->vendor, sizeof(uid->vendor) - 1); 748 memcpy(uid->serial, private->ned->HDA_location, 749 sizeof(uid->serial) - 1); 750 EBCASC(uid->serial, sizeof(uid->serial) - 1); 751 uid->ssid = private->gneq->subsystemID; 752 uid->real_unit_addr = private->ned->unit_addr; 753 if (private->sneq) { 754 uid->type = private->sneq->sua_flags; 755 if (uid->type == UA_BASE_PAV_ALIAS) 756 uid->base_unit_addr = private->sneq->base_unit_addr; 757 } else { 758 uid->type = UA_BASE_DEVICE; 759 } 760 if (private->vdsneq) { 761 for (count = 0; count < 16; count++) { 762 sprintf(uid->vduit+2*count, "%02x", 763 private->vdsneq->uit[count]); 764 } 765 } 766} 767 768/* 769 * Generate device unique id that specifies the physical device. 770 */ 771static int dasd_eckd_generate_uid(struct dasd_device *device) 772{ 773 struct dasd_eckd_private *private = device->private; 774 unsigned long flags; 775 776 if (!private) 777 return -ENODEV; 778 if (!private->ned || !private->gneq) 779 return -ENODEV; 780 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 781 create_uid(private); 782 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 783 return 0; 784} 785 786static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid) 787{ 788 struct dasd_eckd_private *private = device->private; 789 unsigned long flags; 790 791 if (private) { 792 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 793 *uid = private->uid; 794 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 795 return 0; 796 } 797 return -EINVAL; 798} 799 800/* 801 * compare device UID with data of a given dasd_eckd_private structure 802 * return 0 for match 803 */ 804static int dasd_eckd_compare_path_uid(struct dasd_device *device, 805 struct dasd_eckd_private *private) 806{ 807 struct dasd_uid device_uid; 808 809 create_uid(private); 810 dasd_eckd_get_uid(device, &device_uid); 811 812 return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid)); 813} 814 815static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device, 816 struct dasd_ccw_req *cqr, 817 __u8 *rcd_buffer, 818 __u8 lpm) 819{ 820 struct ccw1 *ccw; 821 /* 822 * buffer has to start with EBCDIC "V1.0" to show 823 * support for virtual device SNEQ 824 */ 825 rcd_buffer[0] = 0xE5; 826 rcd_buffer[1] = 0xF1; 827 rcd_buffer[2] = 0x4B; 828 rcd_buffer[3] = 0xF0; 829 830 ccw = cqr->cpaddr; 831 ccw->cmd_code = DASD_ECKD_CCW_RCD; 832 ccw->flags = 0; 833 ccw->cda = (__u32)(addr_t)rcd_buffer; 834 ccw->count = DASD_ECKD_RCD_DATA_SIZE; 835 cqr->magic = DASD_ECKD_MAGIC; 836 837 cqr->startdev = device; 838 cqr->memdev = device; 839 cqr->block = NULL; 840 cqr->expires = 10*HZ; 841 cqr->lpm = lpm; 842 cqr->retries = 256; 843 cqr->buildclk = get_tod_clock(); 844 cqr->status = DASD_CQR_FILLED; 845 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 846} 847 848/* 849 * Wakeup helper for read_conf 850 * if the cqr is not done and needs some error recovery 851 * the buffer has to be re-initialized with the EBCDIC "V1.0" 852 * to show support for virtual device SNEQ 853 */ 854static void read_conf_cb(struct dasd_ccw_req *cqr, void *data) 855{ 856 struct ccw1 *ccw; 857 __u8 *rcd_buffer; 858 859 if (cqr->status != DASD_CQR_DONE) { 860 ccw = cqr->cpaddr; 861 rcd_buffer = (__u8 *)((addr_t) ccw->cda); 862 memset(rcd_buffer, 0, sizeof(*rcd_buffer)); 863 864 rcd_buffer[0] = 0xE5; 865 rcd_buffer[1] = 0xF1; 866 rcd_buffer[2] = 0x4B; 867 rcd_buffer[3] = 0xF0; 868 } 869 dasd_wakeup_cb(cqr, data); 870} 871 872static int dasd_eckd_read_conf_immediately(struct dasd_device *device, 873 struct dasd_ccw_req *cqr, 874 __u8 *rcd_buffer, 875 __u8 lpm) 876{ 877 struct ciw *ciw; 878 int rc; 879 /* 880 * sanity check: scan for RCD command in extended SenseID data 881 * some devices do not support RCD 882 */ 883 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 884 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) 885 return -EOPNOTSUPP; 886 887 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm); 888 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 889 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 890 cqr->retries = 5; 891 cqr->callback = read_conf_cb; 892 rc = dasd_sleep_on_immediatly(cqr); 893 return rc; 894} 895 896static int dasd_eckd_read_conf_lpm(struct dasd_device *device, 897 void **rcd_buffer, 898 int *rcd_buffer_size, __u8 lpm) 899{ 900 struct ciw *ciw; 901 char *rcd_buf = NULL; 902 int ret; 903 struct dasd_ccw_req *cqr; 904 905 /* 906 * sanity check: scan for RCD command in extended SenseID data 907 * some devices do not support RCD 908 */ 909 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 910 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) { 911 ret = -EOPNOTSUPP; 912 goto out_error; 913 } 914 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA); 915 if (!rcd_buf) { 916 ret = -ENOMEM; 917 goto out_error; 918 } 919 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, 920 0, /* use rcd_buf as data ara */ 921 device, NULL); 922 if (IS_ERR(cqr)) { 923 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 924 "Could not allocate RCD request"); 925 ret = -ENOMEM; 926 goto out_error; 927 } 928 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm); 929 cqr->callback = read_conf_cb; 930 ret = dasd_sleep_on(cqr); 931 /* 932 * on success we update the user input parms 933 */ 934 dasd_sfree_request(cqr, cqr->memdev); 935 if (ret) 936 goto out_error; 937 938 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE; 939 *rcd_buffer = rcd_buf; 940 return 0; 941out_error: 942 kfree(rcd_buf); 943 *rcd_buffer = NULL; 944 *rcd_buffer_size = 0; 945 return ret; 946} 947 948static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private) 949{ 950 951 struct dasd_sneq *sneq; 952 int i, count; 953 954 private->ned = NULL; 955 private->sneq = NULL; 956 private->vdsneq = NULL; 957 private->gneq = NULL; 958 count = private->conf_len / sizeof(struct dasd_sneq); 959 sneq = (struct dasd_sneq *)private->conf_data; 960 for (i = 0; i < count; ++i) { 961 if (sneq->flags.identifier == 1 && sneq->format == 1) 962 private->sneq = sneq; 963 else if (sneq->flags.identifier == 1 && sneq->format == 4) 964 private->vdsneq = (struct vd_sneq *)sneq; 965 else if (sneq->flags.identifier == 2) 966 private->gneq = (struct dasd_gneq *)sneq; 967 else if (sneq->flags.identifier == 3 && sneq->res1 == 1) 968 private->ned = (struct dasd_ned *)sneq; 969 sneq++; 970 } 971 if (!private->ned || !private->gneq) { 972 private->ned = NULL; 973 private->sneq = NULL; 974 private->vdsneq = NULL; 975 private->gneq = NULL; 976 return -EINVAL; 977 } 978 return 0; 979 980}; 981 982static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len) 983{ 984 struct dasd_gneq *gneq; 985 int i, count, found; 986 987 count = conf_len / sizeof(*gneq); 988 gneq = (struct dasd_gneq *)conf_data; 989 found = 0; 990 for (i = 0; i < count; ++i) { 991 if (gneq->flags.identifier == 2) { 992 found = 1; 993 break; 994 } 995 gneq++; 996 } 997 if (found) 998 return ((char *)gneq)[18] & 0x07; 999 else 1000 return 0; 1001} 1002 1003static void dasd_eckd_clear_conf_data(struct dasd_device *device) 1004{ 1005 struct dasd_eckd_private *private = device->private; 1006 int i; 1007 1008 private->conf_data = NULL; 1009 private->conf_len = 0; 1010 for (i = 0; i < 8; i++) { 1011 kfree(device->path[i].conf_data); 1012 device->path[i].conf_data = NULL; 1013 device->path[i].cssid = 0; 1014 device->path[i].ssid = 0; 1015 device->path[i].chpid = 0; 1016 } 1017} 1018 1019 1020static int dasd_eckd_read_conf(struct dasd_device *device) 1021{ 1022 void *conf_data; 1023 int conf_len, conf_data_saved; 1024 int rc, path_err, pos; 1025 __u8 lpm, opm; 1026 struct dasd_eckd_private *private, path_private; 1027 struct dasd_uid *uid; 1028 char print_path_uid[60], print_device_uid[60]; 1029 struct channel_path_desc_fmt0 *chp_desc; 1030 struct subchannel_id sch_id; 1031 1032 private = device->private; 1033 opm = ccw_device_get_path_mask(device->cdev); 1034 ccw_device_get_schid(device->cdev, &sch_id); 1035 conf_data_saved = 0; 1036 path_err = 0; 1037 /* get configuration data per operational path */ 1038 for (lpm = 0x80; lpm; lpm>>= 1) { 1039 if (!(lpm & opm)) 1040 continue; 1041 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 1042 &conf_len, lpm); 1043 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 1044 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1045 "Read configuration data returned " 1046 "error %d", rc); 1047 return rc; 1048 } 1049 if (conf_data == NULL) { 1050 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1051 "No configuration data " 1052 "retrieved"); 1053 /* no further analysis possible */ 1054 dasd_path_add_opm(device, opm); 1055 continue; /* no error */ 1056 } 1057 /* save first valid configuration data */ 1058 if (!conf_data_saved) { 1059 /* initially clear previously stored conf_data */ 1060 dasd_eckd_clear_conf_data(device); 1061 private->conf_data = conf_data; 1062 private->conf_len = conf_len; 1063 if (dasd_eckd_identify_conf_parts(private)) { 1064 private->conf_data = NULL; 1065 private->conf_len = 0; 1066 kfree(conf_data); 1067 continue; 1068 } 1069 pos = pathmask_to_pos(lpm); 1070 /* store per path conf_data */ 1071 device->path[pos].conf_data = conf_data; 1072 device->path[pos].cssid = sch_id.cssid; 1073 device->path[pos].ssid = sch_id.ssid; 1074 chp_desc = ccw_device_get_chp_desc(device->cdev, pos); 1075 if (chp_desc) 1076 device->path[pos].chpid = chp_desc->chpid; 1077 kfree(chp_desc); 1078 /* 1079 * build device UID that other path data 1080 * can be compared to it 1081 */ 1082 dasd_eckd_generate_uid(device); 1083 conf_data_saved++; 1084 } else { 1085 path_private.conf_data = conf_data; 1086 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE; 1087 if (dasd_eckd_identify_conf_parts( 1088 &path_private)) { 1089 path_private.conf_data = NULL; 1090 path_private.conf_len = 0; 1091 kfree(conf_data); 1092 continue; 1093 } 1094 if (dasd_eckd_compare_path_uid( 1095 device, &path_private)) { 1096 uid = &path_private.uid; 1097 if (strlen(uid->vduit) > 0) 1098 snprintf(print_path_uid, 1099 sizeof(print_path_uid), 1100 "%s.%s.%04x.%02x.%s", 1101 uid->vendor, uid->serial, 1102 uid->ssid, uid->real_unit_addr, 1103 uid->vduit); 1104 else 1105 snprintf(print_path_uid, 1106 sizeof(print_path_uid), 1107 "%s.%s.%04x.%02x", 1108 uid->vendor, uid->serial, 1109 uid->ssid, 1110 uid->real_unit_addr); 1111 uid = &private->uid; 1112 if (strlen(uid->vduit) > 0) 1113 snprintf(print_device_uid, 1114 sizeof(print_device_uid), 1115 "%s.%s.%04x.%02x.%s", 1116 uid->vendor, uid->serial, 1117 uid->ssid, uid->real_unit_addr, 1118 uid->vduit); 1119 else 1120 snprintf(print_device_uid, 1121 sizeof(print_device_uid), 1122 "%s.%s.%04x.%02x", 1123 uid->vendor, uid->serial, 1124 uid->ssid, 1125 uid->real_unit_addr); 1126 dev_err(&device->cdev->dev, 1127 "Not all channel paths lead to " 1128 "the same device, path %02X leads to " 1129 "device %s instead of %s\n", lpm, 1130 print_path_uid, print_device_uid); 1131 path_err = -EINVAL; 1132 dasd_path_add_cablepm(device, lpm); 1133 continue; 1134 } 1135 pos = pathmask_to_pos(lpm); 1136 /* store per path conf_data */ 1137 device->path[pos].conf_data = conf_data; 1138 device->path[pos].cssid = sch_id.cssid; 1139 device->path[pos].ssid = sch_id.ssid; 1140 chp_desc = ccw_device_get_chp_desc(device->cdev, pos); 1141 if (chp_desc) 1142 device->path[pos].chpid = chp_desc->chpid; 1143 kfree(chp_desc); 1144 path_private.conf_data = NULL; 1145 path_private.conf_len = 0; 1146 } 1147 switch (dasd_eckd_path_access(conf_data, conf_len)) { 1148 case 0x02: 1149 dasd_path_add_nppm(device, lpm); 1150 break; 1151 case 0x03: 1152 dasd_path_add_ppm(device, lpm); 1153 break; 1154 } 1155 if (!dasd_path_get_opm(device)) { 1156 dasd_path_set_opm(device, lpm); 1157 dasd_generic_path_operational(device); 1158 } else { 1159 dasd_path_add_opm(device, lpm); 1160 } 1161 } 1162 1163 return path_err; 1164} 1165 1166static u32 get_fcx_max_data(struct dasd_device *device) 1167{ 1168 struct dasd_eckd_private *private = device->private; 1169 int fcx_in_css, fcx_in_gneq, fcx_in_features; 1170 unsigned int mdc; 1171 int tpm; 1172 1173 if (dasd_nofcx) 1174 return 0; 1175 /* is transport mode supported? */ 1176 fcx_in_css = css_general_characteristics.fcx; 1177 fcx_in_gneq = private->gneq->reserved2[7] & 0x04; 1178 fcx_in_features = private->features.feature[40] & 0x80; 1179 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; 1180 1181 if (!tpm) 1182 return 0; 1183 1184 mdc = ccw_device_get_mdc(device->cdev, 0); 1185 if (mdc == 0) { 1186 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n"); 1187 return 0; 1188 } else { 1189 return (u32)mdc * FCX_MAX_DATA_FACTOR; 1190 } 1191} 1192 1193static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) 1194{ 1195 struct dasd_eckd_private *private = device->private; 1196 unsigned int mdc; 1197 u32 fcx_max_data; 1198 1199 if (private->fcx_max_data) { 1200 mdc = ccw_device_get_mdc(device->cdev, lpm); 1201 if (mdc == 0) { 1202 dev_warn(&device->cdev->dev, 1203 "Detecting the maximum data size for zHPF " 1204 "requests failed (rc=%d) for a new path %x\n", 1205 mdc, lpm); 1206 return mdc; 1207 } 1208 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR; 1209 if (fcx_max_data < private->fcx_max_data) { 1210 dev_warn(&device->cdev->dev, 1211 "The maximum data size for zHPF requests %u " 1212 "on a new path %x is below the active maximum " 1213 "%u\n", fcx_max_data, lpm, 1214 private->fcx_max_data); 1215 return -EACCES; 1216 } 1217 } 1218 return 0; 1219} 1220 1221static int rebuild_device_uid(struct dasd_device *device, 1222 struct pe_handler_work_data *data) 1223{ 1224 struct dasd_eckd_private *private = device->private; 1225 __u8 lpm, opm = dasd_path_get_opm(device); 1226 int rc = -ENODEV; 1227 1228 for (lpm = 0x80; lpm; lpm >>= 1) { 1229 if (!(lpm & opm)) 1230 continue; 1231 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); 1232 memset(&data->cqr, 0, sizeof(data->cqr)); 1233 data->cqr.cpaddr = &data->ccw; 1234 rc = dasd_eckd_read_conf_immediately(device, &data->cqr, 1235 data->rcd_buffer, 1236 lpm); 1237 1238 if (rc) { 1239 if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */ 1240 continue; 1241 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1242 "Read configuration data " 1243 "returned error %d", rc); 1244 break; 1245 } 1246 memcpy(private->conf_data, data->rcd_buffer, 1247 DASD_ECKD_RCD_DATA_SIZE); 1248 if (dasd_eckd_identify_conf_parts(private)) { 1249 rc = -ENODEV; 1250 } else /* first valid path is enough */ 1251 break; 1252 } 1253 1254 if (!rc) 1255 rc = dasd_eckd_generate_uid(device); 1256 1257 return rc; 1258} 1259 1260static void dasd_eckd_path_available_action(struct dasd_device *device, 1261 struct pe_handler_work_data *data) 1262{ 1263 struct dasd_eckd_private path_private; 1264 struct dasd_uid *uid; 1265 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE]; 1266 __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm; 1267 unsigned long flags; 1268 char print_uid[60]; 1269 int rc; 1270 1271 opm = 0; 1272 npm = 0; 1273 ppm = 0; 1274 epm = 0; 1275 hpfpm = 0; 1276 cablepm = 0; 1277 1278 for (lpm = 0x80; lpm; lpm >>= 1) { 1279 if (!(lpm & data->tbvpm)) 1280 continue; 1281 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); 1282 memset(&data->cqr, 0, sizeof(data->cqr)); 1283 data->cqr.cpaddr = &data->ccw; 1284 rc = dasd_eckd_read_conf_immediately(device, &data->cqr, 1285 data->rcd_buffer, 1286 lpm); 1287 if (!rc) { 1288 switch (dasd_eckd_path_access(data->rcd_buffer, 1289 DASD_ECKD_RCD_DATA_SIZE) 1290 ) { 1291 case 0x02: 1292 npm |= lpm; 1293 break; 1294 case 0x03: 1295 ppm |= lpm; 1296 break; 1297 } 1298 opm |= lpm; 1299 } else if (rc == -EOPNOTSUPP) { 1300 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1301 "path verification: No configuration " 1302 "data retrieved"); 1303 opm |= lpm; 1304 } else if (rc == -EAGAIN) { 1305 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1306 "path verification: device is stopped," 1307 " try again later"); 1308 epm |= lpm; 1309 } else { 1310 dev_warn(&device->cdev->dev, 1311 "Reading device feature codes failed " 1312 "(rc=%d) for new path %x\n", rc, lpm); 1313 continue; 1314 } 1315 if (verify_fcx_max_data(device, lpm)) { 1316 opm &= ~lpm; 1317 npm &= ~lpm; 1318 ppm &= ~lpm; 1319 hpfpm |= lpm; 1320 continue; 1321 } 1322 1323 /* 1324 * save conf_data for comparison after 1325 * rebuild_device_uid may have changed 1326 * the original data 1327 */ 1328 memcpy(&path_rcd_buf, data->rcd_buffer, 1329 DASD_ECKD_RCD_DATA_SIZE); 1330 path_private.conf_data = (void *) &path_rcd_buf; 1331 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE; 1332 if (dasd_eckd_identify_conf_parts(&path_private)) { 1333 path_private.conf_data = NULL; 1334 path_private.conf_len = 0; 1335 continue; 1336 } 1337 1338 /* 1339 * compare path UID with device UID only if at least 1340 * one valid path is left 1341 * in other case the device UID may have changed and 1342 * the first working path UID will be used as device UID 1343 */ 1344 if (dasd_path_get_opm(device) && 1345 dasd_eckd_compare_path_uid(device, &path_private)) { 1346 /* 1347 * the comparison was not successful 1348 * rebuild the device UID with at least one 1349 * known path in case a z/VM hyperswap command 1350 * has changed the device 1351 * 1352 * after this compare again 1353 * 1354 * if either the rebuild or the recompare fails 1355 * the path can not be used 1356 */ 1357 if (rebuild_device_uid(device, data) || 1358 dasd_eckd_compare_path_uid( 1359 device, &path_private)) { 1360 uid = &path_private.uid; 1361 if (strlen(uid->vduit) > 0) 1362 snprintf(print_uid, sizeof(print_uid), 1363 "%s.%s.%04x.%02x.%s", 1364 uid->vendor, uid->serial, 1365 uid->ssid, uid->real_unit_addr, 1366 uid->vduit); 1367 else 1368 snprintf(print_uid, sizeof(print_uid), 1369 "%s.%s.%04x.%02x", 1370 uid->vendor, uid->serial, 1371 uid->ssid, 1372 uid->real_unit_addr); 1373 dev_err(&device->cdev->dev, 1374 "The newly added channel path %02X " 1375 "will not be used because it leads " 1376 "to a different device %s\n", 1377 lpm, print_uid); 1378 opm &= ~lpm; 1379 npm &= ~lpm; 1380 ppm &= ~lpm; 1381 cablepm |= lpm; 1382 continue; 1383 } 1384 } 1385 1386 /* 1387 * There is a small chance that a path is lost again between 1388 * above path verification and the following modification of 1389 * the device opm mask. We could avoid that race here by using 1390 * yet another path mask, but we rather deal with this unlikely 1391 * situation in dasd_start_IO. 1392 */ 1393 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1394 if (!dasd_path_get_opm(device) && opm) { 1395 dasd_path_set_opm(device, opm); 1396 dasd_generic_path_operational(device); 1397 } else { 1398 dasd_path_add_opm(device, opm); 1399 } 1400 dasd_path_add_nppm(device, npm); 1401 dasd_path_add_ppm(device, ppm); 1402 dasd_path_add_tbvpm(device, epm); 1403 dasd_path_add_cablepm(device, cablepm); 1404 dasd_path_add_nohpfpm(device, hpfpm); 1405 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1406 } 1407} 1408 1409static void do_pe_handler_work(struct work_struct *work) 1410{ 1411 struct pe_handler_work_data *data; 1412 struct dasd_device *device; 1413 1414 data = container_of(work, struct pe_handler_work_data, worker); 1415 device = data->device; 1416 1417 /* delay path verification until device was resumed */ 1418 if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 1419 schedule_work(work); 1420 return; 1421 } 1422 /* check if path verification already running and delay if so */ 1423 if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) { 1424 schedule_work(work); 1425 return; 1426 } 1427 1428 dasd_eckd_path_available_action(device, data); 1429 1430 clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags); 1431 dasd_put_device(device); 1432 if (data->isglobal) 1433 mutex_unlock(&dasd_pe_handler_mutex); 1434 else 1435 kfree(data); 1436} 1437 1438static int dasd_eckd_pe_handler(struct dasd_device *device, __u8 lpm) 1439{ 1440 struct pe_handler_work_data *data; 1441 1442 data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA); 1443 if (!data) { 1444 if (mutex_trylock(&dasd_pe_handler_mutex)) { 1445 data = pe_handler_worker; 1446 data->isglobal = 1; 1447 } else { 1448 return -ENOMEM; 1449 } 1450 } else { 1451 memset(data, 0, sizeof(*data)); 1452 data->isglobal = 0; 1453 } 1454 INIT_WORK(&data->worker, do_pe_handler_work); 1455 dasd_get_device(device); 1456 data->device = device; 1457 data->tbvpm = lpm; 1458 schedule_work(&data->worker); 1459 return 0; 1460} 1461 1462static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm) 1463{ 1464 struct dasd_eckd_private *private = device->private; 1465 unsigned long flags; 1466 1467 if (!private->fcx_max_data) 1468 private->fcx_max_data = get_fcx_max_data(device); 1469 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1470 dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device)); 1471 dasd_schedule_device_bh(device); 1472 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1473} 1474 1475static int dasd_eckd_read_features(struct dasd_device *device) 1476{ 1477 struct dasd_eckd_private *private = device->private; 1478 struct dasd_psf_prssd_data *prssdp; 1479 struct dasd_rssd_features *features; 1480 struct dasd_ccw_req *cqr; 1481 struct ccw1 *ccw; 1482 int rc; 1483 1484 memset(&private->features, 0, sizeof(struct dasd_rssd_features)); 1485 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 1486 (sizeof(struct dasd_psf_prssd_data) + 1487 sizeof(struct dasd_rssd_features)), 1488 device, NULL); 1489 if (IS_ERR(cqr)) { 1490 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " 1491 "allocate initialization request"); 1492 return PTR_ERR(cqr); 1493 } 1494 cqr->startdev = device; 1495 cqr->memdev = device; 1496 cqr->block = NULL; 1497 cqr->retries = 256; 1498 cqr->expires = 10 * HZ; 1499 1500 /* Prepare for Read Subsystem Data */ 1501 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1502 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 1503 prssdp->order = PSF_ORDER_PRSSD; 1504 prssdp->suborder = 0x41; /* Read Feature Codes */ 1505 /* all other bytes of prssdp must be zero */ 1506 1507 ccw = cqr->cpaddr; 1508 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1509 ccw->count = sizeof(struct dasd_psf_prssd_data); 1510 ccw->flags |= CCW_FLAG_CC; 1511 ccw->cda = (__u32)(addr_t) prssdp; 1512 1513 /* Read Subsystem Data - feature codes */ 1514 features = (struct dasd_rssd_features *) (prssdp + 1); 1515 memset(features, 0, sizeof(struct dasd_rssd_features)); 1516 1517 ccw++; 1518 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1519 ccw->count = sizeof(struct dasd_rssd_features); 1520 ccw->cda = (__u32)(addr_t) features; 1521 1522 cqr->buildclk = get_tod_clock(); 1523 cqr->status = DASD_CQR_FILLED; 1524 rc = dasd_sleep_on(cqr); 1525 if (rc == 0) { 1526 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1527 features = (struct dasd_rssd_features *) (prssdp + 1); 1528 memcpy(&private->features, features, 1529 sizeof(struct dasd_rssd_features)); 1530 } else 1531 dev_warn(&device->cdev->dev, "Reading device feature codes" 1532 " failed with rc=%d\n", rc); 1533 dasd_sfree_request(cqr, cqr->memdev); 1534 return rc; 1535} 1536 1537/* Read Volume Information - Volume Storage Query */ 1538static int dasd_eckd_read_vol_info(struct dasd_device *device) 1539{ 1540 struct dasd_eckd_private *private = device->private; 1541 struct dasd_psf_prssd_data *prssdp; 1542 struct dasd_rssd_vsq *vsq; 1543 struct dasd_ccw_req *cqr; 1544 struct ccw1 *ccw; 1545 int useglobal; 1546 int rc; 1547 1548 /* This command cannot be executed on an alias device */ 1549 if (private->uid.type == UA_BASE_PAV_ALIAS || 1550 private->uid.type == UA_HYPER_PAV_ALIAS) 1551 return 0; 1552 1553 useglobal = 0; 1554 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */, 1555 sizeof(*prssdp) + sizeof(*vsq), device, NULL); 1556 if (IS_ERR(cqr)) { 1557 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1558 "Could not allocate initialization request"); 1559 mutex_lock(&dasd_vol_info_mutex); 1560 useglobal = 1; 1561 cqr = &dasd_vol_info_req->cqr; 1562 memset(cqr, 0, sizeof(*cqr)); 1563 memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req)); 1564 cqr->cpaddr = &dasd_vol_info_req->ccw; 1565 cqr->data = &dasd_vol_info_req->data; 1566 cqr->magic = DASD_ECKD_MAGIC; 1567 } 1568 1569 /* Prepare for Read Subsystem Data */ 1570 prssdp = cqr->data; 1571 prssdp->order = PSF_ORDER_PRSSD; 1572 prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */ 1573 prssdp->lss = private->ned->ID; 1574 prssdp->volume = private->ned->unit_addr; 1575 1576 ccw = cqr->cpaddr; 1577 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1578 ccw->count = sizeof(*prssdp); 1579 ccw->flags |= CCW_FLAG_CC; 1580 ccw->cda = (__u32)(addr_t)prssdp; 1581 1582 /* Read Subsystem Data - Volume Storage Query */ 1583 vsq = (struct dasd_rssd_vsq *)(prssdp + 1); 1584 memset(vsq, 0, sizeof(*vsq)); 1585 1586 ccw++; 1587 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1588 ccw->count = sizeof(*vsq); 1589 ccw->flags |= CCW_FLAG_SLI; 1590 ccw->cda = (__u32)(addr_t)vsq; 1591 1592 cqr->buildclk = get_tod_clock(); 1593 cqr->status = DASD_CQR_FILLED; 1594 cqr->startdev = device; 1595 cqr->memdev = device; 1596 cqr->block = NULL; 1597 cqr->retries = 256; 1598 cqr->expires = device->default_expires * HZ; 1599 /* The command might not be supported. Suppress the error output */ 1600 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 1601 1602 rc = dasd_sleep_on_interruptible(cqr); 1603 if (rc == 0) { 1604 memcpy(&private->vsq, vsq, sizeof(*vsq)); 1605 } else { 1606 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1607 "Reading the volume storage information failed with rc=%d", rc); 1608 } 1609 1610 if (useglobal) 1611 mutex_unlock(&dasd_vol_info_mutex); 1612 else 1613 dasd_sfree_request(cqr, cqr->memdev); 1614 1615 return rc; 1616} 1617 1618static int dasd_eckd_is_ese(struct dasd_device *device) 1619{ 1620 struct dasd_eckd_private *private = device->private; 1621 1622 return private->vsq.vol_info.ese; 1623} 1624 1625static int dasd_eckd_ext_pool_id(struct dasd_device *device) 1626{ 1627 struct dasd_eckd_private *private = device->private; 1628 1629 return private->vsq.extent_pool_id; 1630} 1631 1632/* 1633 * This value represents the total amount of available space. As more space is 1634 * allocated by ESE volumes, this value will decrease. 1635 * The data for this value is therefore updated on any call. 1636 */ 1637static int dasd_eckd_space_configured(struct dasd_device *device) 1638{ 1639 struct dasd_eckd_private *private = device->private; 1640 int rc; 1641 1642 rc = dasd_eckd_read_vol_info(device); 1643 1644 return rc ? : private->vsq.space_configured; 1645} 1646 1647/* 1648 * The value of space allocated by an ESE volume may have changed and is 1649 * therefore updated on any call. 1650 */ 1651static int dasd_eckd_space_allocated(struct dasd_device *device) 1652{ 1653 struct dasd_eckd_private *private = device->private; 1654 int rc; 1655 1656 rc = dasd_eckd_read_vol_info(device); 1657 1658 return rc ? : private->vsq.space_allocated; 1659} 1660 1661static int dasd_eckd_logical_capacity(struct dasd_device *device) 1662{ 1663 struct dasd_eckd_private *private = device->private; 1664 1665 return private->vsq.logical_capacity; 1666} 1667 1668static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work) 1669{ 1670 struct ext_pool_exhaust_work_data *data; 1671 struct dasd_device *device; 1672 struct dasd_device *base; 1673 1674 data = container_of(work, struct ext_pool_exhaust_work_data, worker); 1675 device = data->device; 1676 base = data->base; 1677 1678 if (!base) 1679 base = device; 1680 if (dasd_eckd_space_configured(base) != 0) { 1681 dasd_generic_space_avail(device); 1682 } else { 1683 dev_warn(&device->cdev->dev, "No space left in the extent pool\n"); 1684 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space"); 1685 } 1686 1687 dasd_put_device(device); 1688 kfree(data); 1689} 1690 1691static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device, 1692 struct dasd_ccw_req *cqr) 1693{ 1694 struct ext_pool_exhaust_work_data *data; 1695 1696 data = kzalloc(sizeof(*data), GFP_ATOMIC); 1697 if (!data) 1698 return -ENOMEM; 1699 INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work); 1700 dasd_get_device(device); 1701 data->device = device; 1702 1703 if (cqr->block) 1704 data->base = cqr->block->base; 1705 else if (cqr->basedev) 1706 data->base = cqr->basedev; 1707 else 1708 data->base = NULL; 1709 1710 schedule_work(&data->worker); 1711 1712 return 0; 1713} 1714 1715static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device, 1716 struct dasd_rssd_lcq *lcq) 1717{ 1718 struct dasd_eckd_private *private = device->private; 1719 int pool_id = dasd_eckd_ext_pool_id(device); 1720 struct dasd_ext_pool_sum eps; 1721 int i; 1722 1723 for (i = 0; i < lcq->pool_count; i++) { 1724 eps = lcq->ext_pool_sum[i]; 1725 if (eps.pool_id == pool_id) { 1726 memcpy(&private->eps, &eps, 1727 sizeof(struct dasd_ext_pool_sum)); 1728 } 1729 } 1730} 1731 1732/* Read Extent Pool Information - Logical Configuration Query */ 1733static int dasd_eckd_read_ext_pool_info(struct dasd_device *device) 1734{ 1735 struct dasd_eckd_private *private = device->private; 1736 struct dasd_psf_prssd_data *prssdp; 1737 struct dasd_rssd_lcq *lcq; 1738 struct dasd_ccw_req *cqr; 1739 struct ccw1 *ccw; 1740 int rc; 1741 1742 /* This command cannot be executed on an alias device */ 1743 if (private->uid.type == UA_BASE_PAV_ALIAS || 1744 private->uid.type == UA_HYPER_PAV_ALIAS) 1745 return 0; 1746 1747 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */, 1748 sizeof(*prssdp) + sizeof(*lcq), device, NULL); 1749 if (IS_ERR(cqr)) { 1750 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1751 "Could not allocate initialization request"); 1752 return PTR_ERR(cqr); 1753 } 1754 1755 /* Prepare for Read Subsystem Data */ 1756 prssdp = cqr->data; 1757 memset(prssdp, 0, sizeof(*prssdp)); 1758 prssdp->order = PSF_ORDER_PRSSD; 1759 prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */ 1760 1761 ccw = cqr->cpaddr; 1762 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1763 ccw->count = sizeof(*prssdp); 1764 ccw->flags |= CCW_FLAG_CC; 1765 ccw->cda = (__u32)(addr_t)prssdp; 1766 1767 lcq = (struct dasd_rssd_lcq *)(prssdp + 1); 1768 memset(lcq, 0, sizeof(*lcq)); 1769 1770 ccw++; 1771 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1772 ccw->count = sizeof(*lcq); 1773 ccw->flags |= CCW_FLAG_SLI; 1774 ccw->cda = (__u32)(addr_t)lcq; 1775 1776 cqr->buildclk = get_tod_clock(); 1777 cqr->status = DASD_CQR_FILLED; 1778 cqr->startdev = device; 1779 cqr->memdev = device; 1780 cqr->block = NULL; 1781 cqr->retries = 256; 1782 cqr->expires = device->default_expires * HZ; 1783 /* The command might not be supported. Suppress the error output */ 1784 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 1785 1786 rc = dasd_sleep_on_interruptible(cqr); 1787 if (rc == 0) { 1788 dasd_eckd_cpy_ext_pool_data(device, lcq); 1789 } else { 1790 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1791 "Reading the logical configuration failed with rc=%d", rc); 1792 } 1793 1794 dasd_sfree_request(cqr, cqr->memdev); 1795 1796 return rc; 1797} 1798 1799/* 1800 * Depending on the device type, the extent size is specified either as 1801 * cylinders per extent (CKD) or size per extent (FBA) 1802 * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl. 1803 */ 1804static int dasd_eckd_ext_size(struct dasd_device *device) 1805{ 1806 struct dasd_eckd_private *private = device->private; 1807 struct dasd_ext_pool_sum eps = private->eps; 1808 1809 if (!eps.flags.extent_size_valid) 1810 return 0; 1811 if (eps.extent_size.size_1G) 1812 return 1113; 1813 if (eps.extent_size.size_16M) 1814 return 21; 1815 1816 return 0; 1817} 1818 1819static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device) 1820{ 1821 struct dasd_eckd_private *private = device->private; 1822 1823 return private->eps.warn_thrshld; 1824} 1825 1826static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device) 1827{ 1828 struct dasd_eckd_private *private = device->private; 1829 1830 return private->eps.flags.capacity_at_warnlevel; 1831} 1832 1833/* 1834 * Extent Pool out of space 1835 */ 1836static int dasd_eckd_ext_pool_oos(struct dasd_device *device) 1837{ 1838 struct dasd_eckd_private *private = device->private; 1839 1840 return private->eps.flags.pool_oos; 1841} 1842 1843/* 1844 * Build CP for Perform Subsystem Function - SSC. 1845 */ 1846static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, 1847 int enable_pav) 1848{ 1849 struct dasd_ccw_req *cqr; 1850 struct dasd_psf_ssc_data *psf_ssc_data; 1851 struct ccw1 *ccw; 1852 1853 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , 1854 sizeof(struct dasd_psf_ssc_data), 1855 device, NULL); 1856 1857 if (IS_ERR(cqr)) { 1858 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1859 "Could not allocate PSF-SSC request"); 1860 return cqr; 1861 } 1862 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 1863 psf_ssc_data->order = PSF_ORDER_SSC; 1864 psf_ssc_data->suborder = 0xc0; 1865 if (enable_pav) { 1866 psf_ssc_data->suborder |= 0x08; 1867 psf_ssc_data->reserved[0] = 0x88; 1868 } 1869 ccw = cqr->cpaddr; 1870 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1871 ccw->cda = (__u32)(addr_t)psf_ssc_data; 1872 ccw->count = 66; 1873 1874 cqr->startdev = device; 1875 cqr->memdev = device; 1876 cqr->block = NULL; 1877 cqr->retries = 256; 1878 cqr->expires = 10*HZ; 1879 cqr->buildclk = get_tod_clock(); 1880 cqr->status = DASD_CQR_FILLED; 1881 return cqr; 1882} 1883 1884/* 1885 * Perform Subsystem Function. 1886 * It is necessary to trigger CIO for channel revalidation since this 1887 * call might change behaviour of DASD devices. 1888 */ 1889static int 1890dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav, 1891 unsigned long flags) 1892{ 1893 struct dasd_ccw_req *cqr; 1894 int rc; 1895 1896 cqr = dasd_eckd_build_psf_ssc(device, enable_pav); 1897 if (IS_ERR(cqr)) 1898 return PTR_ERR(cqr); 1899 1900 /* 1901 * set flags e.g. turn on failfast, to prevent blocking 1902 * the calling function should handle failed requests 1903 */ 1904 cqr->flags |= flags; 1905 1906 rc = dasd_sleep_on(cqr); 1907 if (!rc) 1908 /* trigger CIO to reprobe devices */ 1909 css_schedule_reprobe(); 1910 else if (cqr->intrc == -EAGAIN) 1911 rc = -EAGAIN; 1912 1913 dasd_sfree_request(cqr, cqr->memdev); 1914 return rc; 1915} 1916 1917/* 1918 * Valide storage server of current device. 1919 */ 1920static int dasd_eckd_validate_server(struct dasd_device *device, 1921 unsigned long flags) 1922{ 1923 struct dasd_eckd_private *private = device->private; 1924 int enable_pav, rc; 1925 1926 if (private->uid.type == UA_BASE_PAV_ALIAS || 1927 private->uid.type == UA_HYPER_PAV_ALIAS) 1928 return 0; 1929 if (dasd_nopav || MACHINE_IS_VM) 1930 enable_pav = 0; 1931 else 1932 enable_pav = 1; 1933 rc = dasd_eckd_psf_ssc(device, enable_pav, flags); 1934 1935 /* may be requested feature is not available on server, 1936 * therefore just report error and go ahead */ 1937 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x " 1938 "returned rc=%d", private->uid.ssid, rc); 1939 return rc; 1940} 1941 1942/* 1943 * worker to do a validate server in case of a lost pathgroup 1944 */ 1945static void dasd_eckd_do_validate_server(struct work_struct *work) 1946{ 1947 struct dasd_device *device = container_of(work, struct dasd_device, 1948 kick_validate); 1949 unsigned long flags = 0; 1950 1951 set_bit(DASD_CQR_FLAGS_FAILFAST, &flags); 1952 if (dasd_eckd_validate_server(device, flags) 1953 == -EAGAIN) { 1954 /* schedule worker again if failed */ 1955 schedule_work(&device->kick_validate); 1956 return; 1957 } 1958 1959 dasd_put_device(device); 1960} 1961 1962static void dasd_eckd_kick_validate_server(struct dasd_device *device) 1963{ 1964 dasd_get_device(device); 1965 /* exit if device not online or in offline processing */ 1966 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1967 device->state < DASD_STATE_ONLINE) { 1968 dasd_put_device(device); 1969 return; 1970 } 1971 /* queue call to do_validate_server to the kernel event daemon. */ 1972 if (!schedule_work(&device->kick_validate)) 1973 dasd_put_device(device); 1974} 1975 1976/* 1977 * Check device characteristics. 1978 * If the device is accessible using ECKD discipline, the device is enabled. 1979 */ 1980static int 1981dasd_eckd_check_characteristics(struct dasd_device *device) 1982{ 1983 struct dasd_eckd_private *private = device->private; 1984 struct dasd_block *block; 1985 struct dasd_uid temp_uid; 1986 int rc, i; 1987 int readonly; 1988 unsigned long value; 1989 1990 /* setup work queue for validate server*/ 1991 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server); 1992 /* setup work queue for summary unit check */ 1993 INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check); 1994 1995 if (!ccw_device_is_pathgroup(device->cdev)) { 1996 dev_warn(&device->cdev->dev, 1997 "A channel path group could not be established\n"); 1998 return -EIO; 1999 } 2000 if (!ccw_device_is_multipath(device->cdev)) { 2001 dev_info(&device->cdev->dev, 2002 "The DASD is not operating in multipath mode\n"); 2003 } 2004 if (!private) { 2005 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 2006 if (!private) { 2007 dev_warn(&device->cdev->dev, 2008 "Allocating memory for private DASD data " 2009 "failed\n"); 2010 return -ENOMEM; 2011 } 2012 device->private = private; 2013 } else { 2014 memset(private, 0, sizeof(*private)); 2015 } 2016 /* Invalidate status of initial analysis. */ 2017 private->init_cqr_status = -1; 2018 /* Set default cache operations. */ 2019 private->attrib.operation = DASD_NORMAL_CACHE; 2020 private->attrib.nr_cyl = 0; 2021 2022 /* Read Configuration Data */ 2023 rc = dasd_eckd_read_conf(device); 2024 if (rc) 2025 goto out_err1; 2026 2027 /* set some default values */ 2028 device->default_expires = DASD_EXPIRES; 2029 device->default_retries = DASD_RETRIES; 2030 device->path_thrhld = DASD_ECKD_PATH_THRHLD; 2031 device->path_interval = DASD_ECKD_PATH_INTERVAL; 2032 2033 if (private->gneq) { 2034 value = 1; 2035 for (i = 0; i < private->gneq->timeout.value; i++) 2036 value = 10 * value; 2037 value = value * private->gneq->timeout.number; 2038 /* do not accept useless values */ 2039 if (value != 0 && value <= DASD_EXPIRES_MAX) 2040 device->default_expires = value; 2041 } 2042 2043 dasd_eckd_get_uid(device, &temp_uid); 2044 if (temp_uid.type == UA_BASE_DEVICE) { 2045 block = dasd_alloc_block(); 2046 if (IS_ERR(block)) { 2047 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 2048 "could not allocate dasd " 2049 "block structure"); 2050 rc = PTR_ERR(block); 2051 goto out_err1; 2052 } 2053 device->block = block; 2054 block->base = device; 2055 } 2056 2057 /* register lcu with alias handling, enable PAV */ 2058 rc = dasd_alias_make_device_known_to_lcu(device); 2059 if (rc) 2060 goto out_err2; 2061 2062 dasd_eckd_validate_server(device, 0); 2063 2064 /* device may report different configuration data after LCU setup */ 2065 rc = dasd_eckd_read_conf(device); 2066 if (rc) 2067 goto out_err3; 2068 2069 /* Read Feature Codes */ 2070 dasd_eckd_read_features(device); 2071 2072 /* Read Volume Information */ 2073 dasd_eckd_read_vol_info(device); 2074 2075 /* Read Extent Pool Information */ 2076 dasd_eckd_read_ext_pool_info(device); 2077 2078 /* Read Device Characteristics */ 2079 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 2080 &private->rdc_data, 64); 2081 if (rc) { 2082 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 2083 "Read device characteristic failed, rc=%d", rc); 2084 goto out_err3; 2085 } 2086 2087 if ((device->features & DASD_FEATURE_USERAW) && 2088 !(private->rdc_data.facilities.RT_in_LR)) { 2089 dev_err(&device->cdev->dev, "The storage server does not " 2090 "support raw-track access\n"); 2091 rc = -EINVAL; 2092 goto out_err3; 2093 } 2094 2095 /* find the valid cylinder size */ 2096 if (private->rdc_data.no_cyl == LV_COMPAT_CYL && 2097 private->rdc_data.long_no_cyl) 2098 private->real_cyl = private->rdc_data.long_no_cyl; 2099 else 2100 private->real_cyl = private->rdc_data.no_cyl; 2101 2102 private->fcx_max_data = get_fcx_max_data(device); 2103 2104 readonly = dasd_device_is_ro(device); 2105 if (readonly) 2106 set_bit(DASD_FLAG_DEVICE_RO, &device->flags); 2107 2108 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) " 2109 "with %d cylinders, %d heads, %d sectors%s\n", 2110 private->rdc_data.dev_type, 2111 private->rdc_data.dev_model, 2112 private->rdc_data.cu_type, 2113 private->rdc_data.cu_model.model, 2114 private->real_cyl, 2115 private->rdc_data.trk_per_cyl, 2116 private->rdc_data.sec_per_trk, 2117 readonly ? ", read-only device" : ""); 2118 return 0; 2119 2120out_err3: 2121 dasd_alias_disconnect_device_from_lcu(device); 2122out_err2: 2123 dasd_free_block(device->block); 2124 device->block = NULL; 2125out_err1: 2126 dasd_eckd_clear_conf_data(device); 2127 kfree(device->private); 2128 device->private = NULL; 2129 return rc; 2130} 2131 2132static void dasd_eckd_uncheck_device(struct dasd_device *device) 2133{ 2134 struct dasd_eckd_private *private = device->private; 2135 2136 if (!private) 2137 return; 2138 2139 dasd_alias_disconnect_device_from_lcu(device); 2140 private->ned = NULL; 2141 private->sneq = NULL; 2142 private->vdsneq = NULL; 2143 private->gneq = NULL; 2144 dasd_eckd_clear_conf_data(device); 2145} 2146 2147static struct dasd_ccw_req * 2148dasd_eckd_analysis_ccw(struct dasd_device *device) 2149{ 2150 struct dasd_eckd_private *private = device->private; 2151 struct eckd_count *count_data; 2152 struct LO_eckd_data *LO_data; 2153 struct dasd_ccw_req *cqr; 2154 struct ccw1 *ccw; 2155 int cplength, datasize; 2156 int i; 2157 2158 cplength = 8; 2159 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); 2160 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device, 2161 NULL); 2162 if (IS_ERR(cqr)) 2163 return cqr; 2164 ccw = cqr->cpaddr; 2165 /* Define extent for the first 2 tracks. */ 2166 define_extent(ccw++, cqr->data, 0, 1, 2167 DASD_ECKD_CCW_READ_COUNT, device, 0); 2168 LO_data = cqr->data + sizeof(struct DE_eckd_data); 2169 /* Locate record for the first 4 records on track 0. */ 2170 ccw[-1].flags |= CCW_FLAG_CC; 2171 locate_record(ccw++, LO_data++, 0, 0, 4, 2172 DASD_ECKD_CCW_READ_COUNT, device, 0); 2173 2174 count_data = private->count_area; 2175 for (i = 0; i < 4; i++) { 2176 ccw[-1].flags |= CCW_FLAG_CC; 2177 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2178 ccw->flags = 0; 2179 ccw->count = 8; 2180 ccw->cda = (__u32)(addr_t) count_data; 2181 ccw++; 2182 count_data++; 2183 } 2184 2185 /* Locate record for the first record on track 1. */ 2186 ccw[-1].flags |= CCW_FLAG_CC; 2187 locate_record(ccw++, LO_data++, 1, 0, 1, 2188 DASD_ECKD_CCW_READ_COUNT, device, 0); 2189 /* Read count ccw. */ 2190 ccw[-1].flags |= CCW_FLAG_CC; 2191 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2192 ccw->flags = 0; 2193 ccw->count = 8; 2194 ccw->cda = (__u32)(addr_t) count_data; 2195 2196 cqr->block = NULL; 2197 cqr->startdev = device; 2198 cqr->memdev = device; 2199 cqr->retries = 255; 2200 cqr->buildclk = get_tod_clock(); 2201 cqr->status = DASD_CQR_FILLED; 2202 /* Set flags to suppress output for expected errors */ 2203 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 2204 2205 return cqr; 2206} 2207 2208/* differentiate between 'no record found' and any other error */ 2209static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr) 2210{ 2211 char *sense; 2212 if (init_cqr->status == DASD_CQR_DONE) 2213 return INIT_CQR_OK; 2214 else if (init_cqr->status == DASD_CQR_NEED_ERP || 2215 init_cqr->status == DASD_CQR_FAILED) { 2216 sense = dasd_get_sense(&init_cqr->irb); 2217 if (sense && (sense[1] & SNS1_NO_REC_FOUND)) 2218 return INIT_CQR_UNFORMATTED; 2219 else 2220 return INIT_CQR_ERROR; 2221 } else 2222 return INIT_CQR_ERROR; 2223} 2224 2225/* 2226 * This is the callback function for the init_analysis cqr. It saves 2227 * the status of the initial analysis ccw before it frees it and kicks 2228 * the device to continue the startup sequence. This will call 2229 * dasd_eckd_do_analysis again (if the devices has not been marked 2230 * for deletion in the meantime). 2231 */ 2232static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, 2233 void *data) 2234{ 2235 struct dasd_device *device = init_cqr->startdev; 2236 struct dasd_eckd_private *private = device->private; 2237 2238 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr); 2239 dasd_sfree_request(init_cqr, device); 2240 dasd_kick_device(device); 2241} 2242 2243static int dasd_eckd_start_analysis(struct dasd_block *block) 2244{ 2245 struct dasd_ccw_req *init_cqr; 2246 2247 init_cqr = dasd_eckd_analysis_ccw(block->base); 2248 if (IS_ERR(init_cqr)) 2249 return PTR_ERR(init_cqr); 2250 init_cqr->callback = dasd_eckd_analysis_callback; 2251 init_cqr->callback_data = NULL; 2252 init_cqr->expires = 5*HZ; 2253 /* first try without ERP, so we can later handle unformatted 2254 * devices as special case 2255 */ 2256 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags); 2257 init_cqr->retries = 0; 2258 dasd_add_request_head(init_cqr); 2259 return -EAGAIN; 2260} 2261 2262static int dasd_eckd_end_analysis(struct dasd_block *block) 2263{ 2264 struct dasd_device *device = block->base; 2265 struct dasd_eckd_private *private = device->private; 2266 struct eckd_count *count_area; 2267 unsigned int sb, blk_per_trk; 2268 int status, i; 2269 struct dasd_ccw_req *init_cqr; 2270 2271 status = private->init_cqr_status; 2272 private->init_cqr_status = -1; 2273 if (status == INIT_CQR_ERROR) { 2274 /* try again, this time with full ERP */ 2275 init_cqr = dasd_eckd_analysis_ccw(device); 2276 dasd_sleep_on(init_cqr); 2277 status = dasd_eckd_analysis_evaluation(init_cqr); 2278 dasd_sfree_request(init_cqr, device); 2279 } 2280 2281 if (device->features & DASD_FEATURE_USERAW) { 2282 block->bp_block = DASD_RAW_BLOCKSIZE; 2283 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK; 2284 block->s2b_shift = 3; 2285 goto raw; 2286 } 2287 2288 if (status == INIT_CQR_UNFORMATTED) { 2289 dev_warn(&device->cdev->dev, "The DASD is not formatted\n"); 2290 return -EMEDIUMTYPE; 2291 } else if (status == INIT_CQR_ERROR) { 2292 dev_err(&device->cdev->dev, 2293 "Detecting the DASD disk layout failed because " 2294 "of an I/O error\n"); 2295 return -EIO; 2296 } 2297 2298 private->uses_cdl = 1; 2299 /* Check Track 0 for Compatible Disk Layout */ 2300 count_area = NULL; 2301 for (i = 0; i < 3; i++) { 2302 if (private->count_area[i].kl != 4 || 2303 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 || 2304 private->count_area[i].cyl != 0 || 2305 private->count_area[i].head != count_area_head[i] || 2306 private->count_area[i].record != count_area_rec[i]) { 2307 private->uses_cdl = 0; 2308 break; 2309 } 2310 } 2311 if (i == 3) 2312 count_area = &private->count_area[3]; 2313 2314 if (private->uses_cdl == 0) { 2315 for (i = 0; i < 5; i++) { 2316 if ((private->count_area[i].kl != 0) || 2317 (private->count_area[i].dl != 2318 private->count_area[0].dl) || 2319 private->count_area[i].cyl != 0 || 2320 private->count_area[i].head != count_area_head[i] || 2321 private->count_area[i].record != count_area_rec[i]) 2322 break; 2323 } 2324 if (i == 5) 2325 count_area = &private->count_area[0]; 2326 } else { 2327 if (private->count_area[3].record == 1) 2328 dev_warn(&device->cdev->dev, 2329 "Track 0 has no records following the VTOC\n"); 2330 } 2331 2332 if (count_area != NULL && count_area->kl == 0) { 2333 /* we found notthing violating our disk layout */ 2334 if (dasd_check_blocksize(count_area->dl) == 0) 2335 block->bp_block = count_area->dl; 2336 } 2337 if (block->bp_block == 0) { 2338 dev_warn(&device->cdev->dev, 2339 "The disk layout of the DASD is not supported\n"); 2340 return -EMEDIUMTYPE; 2341 } 2342 block->s2b_shift = 0; /* bits to shift 512 to get a block */ 2343 for (sb = 512; sb < block->bp_block; sb = sb << 1) 2344 block->s2b_shift++; 2345 2346 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); 2347 2348raw: 2349 block->blocks = ((unsigned long) private->real_cyl * 2350 private->rdc_data.trk_per_cyl * 2351 blk_per_trk); 2352 2353 dev_info(&device->cdev->dev, 2354 "DASD with %u KB/block, %lu KB total size, %u KB/track, " 2355 "%s\n", (block->bp_block >> 10), 2356 (((unsigned long) private->real_cyl * 2357 private->rdc_data.trk_per_cyl * 2358 blk_per_trk * (block->bp_block >> 9)) >> 1), 2359 ((blk_per_trk * block->bp_block) >> 10), 2360 private->uses_cdl ? 2361 "compatible disk layout" : "linux disk layout"); 2362 2363 return 0; 2364} 2365 2366static int dasd_eckd_do_analysis(struct dasd_block *block) 2367{ 2368 struct dasd_eckd_private *private = block->base->private; 2369 2370 if (private->init_cqr_status < 0) 2371 return dasd_eckd_start_analysis(block); 2372 else 2373 return dasd_eckd_end_analysis(block); 2374} 2375 2376static int dasd_eckd_basic_to_ready(struct dasd_device *device) 2377{ 2378 return dasd_alias_add_device(device); 2379}; 2380 2381static int dasd_eckd_online_to_ready(struct dasd_device *device) 2382{ 2383 if (cancel_work_sync(&device->reload_device)) 2384 dasd_put_device(device); 2385 if (cancel_work_sync(&device->kick_validate)) 2386 dasd_put_device(device); 2387 2388 return 0; 2389}; 2390 2391static int dasd_eckd_basic_to_known(struct dasd_device *device) 2392{ 2393 return dasd_alias_remove_device(device); 2394}; 2395 2396static int 2397dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) 2398{ 2399 struct dasd_eckd_private *private = block->base->private; 2400 2401 if (dasd_check_blocksize(block->bp_block) == 0) { 2402 geo->sectors = recs_per_track(&private->rdc_data, 2403 0, block->bp_block); 2404 } 2405 geo->cylinders = private->rdc_data.no_cyl; 2406 geo->heads = private->rdc_data.trk_per_cyl; 2407 return 0; 2408} 2409 2410/* 2411 * Build the TCW request for the format check 2412 */ 2413static struct dasd_ccw_req * 2414dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata, 2415 int enable_pav, struct eckd_count *fmt_buffer, 2416 int rpt) 2417{ 2418 struct dasd_eckd_private *start_priv; 2419 struct dasd_device *startdev = NULL; 2420 struct tidaw *last_tidaw = NULL; 2421 struct dasd_ccw_req *cqr; 2422 struct itcw *itcw; 2423 int itcw_size; 2424 int count; 2425 int rc; 2426 int i; 2427 2428 if (enable_pav) 2429 startdev = dasd_alias_get_start_dev(base); 2430 2431 if (!startdev) 2432 startdev = base; 2433 2434 start_priv = startdev->private; 2435 2436 count = rpt * (fdata->stop_unit - fdata->start_unit + 1); 2437 2438 /* 2439 * we're adding 'count' amount of tidaw to the itcw. 2440 * calculate the corresponding itcw_size 2441 */ 2442 itcw_size = itcw_calc_size(0, count, 0); 2443 2444 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); 2445 if (IS_ERR(cqr)) 2446 return cqr; 2447 2448 start_priv->count++; 2449 2450 itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0); 2451 if (IS_ERR(itcw)) { 2452 rc = -EINVAL; 2453 goto out_err; 2454 } 2455 2456 cqr->cpaddr = itcw_get_tcw(itcw); 2457 rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit, 2458 DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count, 2459 sizeof(struct eckd_count), 2460 count * sizeof(struct eckd_count), 0, rpt); 2461 if (rc) 2462 goto out_err; 2463 2464 for (i = 0; i < count; i++) { 2465 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++, 2466 sizeof(struct eckd_count)); 2467 if (IS_ERR(last_tidaw)) { 2468 rc = -EINVAL; 2469 goto out_err; 2470 } 2471 } 2472 2473 last_tidaw->flags |= TIDAW_FLAGS_LAST; 2474 itcw_finalize(itcw); 2475 2476 cqr->cpmode = 1; 2477 cqr->startdev = startdev; 2478 cqr->memdev = startdev; 2479 cqr->basedev = base; 2480 cqr->retries = startdev->default_retries; 2481 cqr->expires = startdev->default_expires * HZ; 2482 cqr->buildclk = get_tod_clock(); 2483 cqr->status = DASD_CQR_FILLED; 2484 /* Set flags to suppress output for expected errors */ 2485 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 2486 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 2487 2488 return cqr; 2489 2490out_err: 2491 dasd_sfree_request(cqr, startdev); 2492 2493 return ERR_PTR(rc); 2494} 2495 2496/* 2497 * Build the CCW request for the format check 2498 */ 2499static struct dasd_ccw_req * 2500dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata, 2501 int enable_pav, struct eckd_count *fmt_buffer, int rpt) 2502{ 2503 struct dasd_eckd_private *start_priv; 2504 struct dasd_eckd_private *base_priv; 2505 struct dasd_device *startdev = NULL; 2506 struct dasd_ccw_req *cqr; 2507 struct ccw1 *ccw; 2508 void *data; 2509 int cplength, datasize; 2510 int use_prefix; 2511 int count; 2512 int i; 2513 2514 if (enable_pav) 2515 startdev = dasd_alias_get_start_dev(base); 2516 2517 if (!startdev) 2518 startdev = base; 2519 2520 start_priv = startdev->private; 2521 base_priv = base->private; 2522 2523 count = rpt * (fdata->stop_unit - fdata->start_unit + 1); 2524 2525 use_prefix = base_priv->features.feature[8] & 0x01; 2526 2527 if (use_prefix) { 2528 cplength = 1; 2529 datasize = sizeof(struct PFX_eckd_data); 2530 } else { 2531 cplength = 2; 2532 datasize = sizeof(struct DE_eckd_data) + 2533 sizeof(struct LO_eckd_data); 2534 } 2535 cplength += count; 2536 2537 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev); 2538 if (IS_ERR(cqr)) 2539 return cqr; 2540 2541 start_priv->count++; 2542 data = cqr->data; 2543 ccw = cqr->cpaddr; 2544 2545 if (use_prefix) { 2546 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit, 2547 DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0, 2548 count, 0, 0); 2549 } else { 2550 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit, 2551 DASD_ECKD_CCW_READ_COUNT, startdev, 0); 2552 2553 data += sizeof(struct DE_eckd_data); 2554 ccw[-1].flags |= CCW_FLAG_CC; 2555 2556 locate_record(ccw++, data, fdata->start_unit, 0, count, 2557 DASD_ECKD_CCW_READ_COUNT, base, 0); 2558 } 2559 2560 for (i = 0; i < count; i++) { 2561 ccw[-1].flags |= CCW_FLAG_CC; 2562 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2563 ccw->flags = CCW_FLAG_SLI; 2564 ccw->count = 8; 2565 ccw->cda = (__u32)(addr_t) fmt_buffer; 2566 ccw++; 2567 fmt_buffer++; 2568 } 2569 2570 cqr->startdev = startdev; 2571 cqr->memdev = startdev; 2572 cqr->basedev = base; 2573 cqr->retries = DASD_RETRIES; 2574 cqr->expires = startdev->default_expires * HZ; 2575 cqr->buildclk = get_tod_clock(); 2576 cqr->status = DASD_CQR_FILLED; 2577 /* Set flags to suppress output for expected errors */ 2578 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 2579 2580 return cqr; 2581} 2582 2583static struct dasd_ccw_req * 2584dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev, 2585 struct format_data_t *fdata, int enable_pav) 2586{ 2587 struct dasd_eckd_private *base_priv; 2588 struct dasd_eckd_private *start_priv; 2589 struct dasd_ccw_req *fcp; 2590 struct eckd_count *ect; 2591 struct ch_t address; 2592 struct ccw1 *ccw; 2593 void *data; 2594 int rpt; 2595 int cplength, datasize; 2596 int i, j; 2597 int intensity = 0; 2598 int r0_perm; 2599 int nr_tracks; 2600 int use_prefix; 2601 2602 if (enable_pav) 2603 startdev = dasd_alias_get_start_dev(base); 2604 2605 if (!startdev) 2606 startdev = base; 2607 2608 start_priv = startdev->private; 2609 base_priv = base->private; 2610 2611 rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize); 2612 2613 nr_tracks = fdata->stop_unit - fdata->start_unit + 1; 2614 2615 /* 2616 * fdata->intensity is a bit string that tells us what to do: 2617 * Bit 0: write record zero 2618 * Bit 1: write home address, currently not supported 2619 * Bit 2: invalidate tracks 2620 * Bit 3: use OS/390 compatible disk layout (cdl) 2621 * Bit 4: do not allow storage subsystem to modify record zero 2622 * Only some bit combinations do make sense. 2623 */ 2624 if (fdata->intensity & 0x10) { 2625 r0_perm = 0; 2626 intensity = fdata->intensity & ~0x10; 2627 } else { 2628 r0_perm = 1; 2629 intensity = fdata->intensity; 2630 } 2631 2632 use_prefix = base_priv->features.feature[8] & 0x01; 2633 2634 switch (intensity) { 2635 case 0x00: /* Normal format */ 2636 case 0x08: /* Normal format, use cdl. */ 2637 cplength = 2 + (rpt*nr_tracks); 2638 if (use_prefix) 2639 datasize = sizeof(struct PFX_eckd_data) + 2640 sizeof(struct LO_eckd_data) + 2641 rpt * nr_tracks * sizeof(struct eckd_count); 2642 else 2643 datasize = sizeof(struct DE_eckd_data) + 2644 sizeof(struct LO_eckd_data) + 2645 rpt * nr_tracks * sizeof(struct eckd_count); 2646 break; 2647 case 0x01: /* Write record zero and format track. */ 2648 case 0x09: /* Write record zero and format track, use cdl. */ 2649 cplength = 2 + rpt * nr_tracks; 2650 if (use_prefix) 2651 datasize = sizeof(struct PFX_eckd_data) + 2652 sizeof(struct LO_eckd_data) + 2653 sizeof(struct eckd_count) + 2654 rpt * nr_tracks * sizeof(struct eckd_count); 2655 else 2656 datasize = sizeof(struct DE_eckd_data) + 2657 sizeof(struct LO_eckd_data) + 2658 sizeof(struct eckd_count) + 2659 rpt * nr_tracks * sizeof(struct eckd_count); 2660 break; 2661 case 0x04: /* Invalidate track. */ 2662 case 0x0c: /* Invalidate track, use cdl. */ 2663 cplength = 3; 2664 if (use_prefix) 2665 datasize = sizeof(struct PFX_eckd_data) + 2666 sizeof(struct LO_eckd_data) + 2667 sizeof(struct eckd_count); 2668 else 2669 datasize = sizeof(struct DE_eckd_data) + 2670 sizeof(struct LO_eckd_data) + 2671 sizeof(struct eckd_count); 2672 break; 2673 default: 2674 dev_warn(&startdev->cdev->dev, 2675 "An I/O control call used incorrect flags 0x%x\n", 2676 fdata->intensity); 2677 return ERR_PTR(-EINVAL); 2678 } 2679 2680 fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev); 2681 if (IS_ERR(fcp)) 2682 return fcp; 2683 2684 start_priv->count++; 2685 data = fcp->data; 2686 ccw = fcp->cpaddr; 2687 2688 switch (intensity & ~0x08) { 2689 case 0x00: /* Normal format. */ 2690 if (use_prefix) { 2691 prefix(ccw++, (struct PFX_eckd_data *) data, 2692 fdata->start_unit, fdata->stop_unit, 2693 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2694 /* grant subsystem permission to format R0 */ 2695 if (r0_perm) 2696 ((struct PFX_eckd_data *)data) 2697 ->define_extent.ga_extended |= 0x04; 2698 data += sizeof(struct PFX_eckd_data); 2699 } else { 2700 define_extent(ccw++, (struct DE_eckd_data *) data, 2701 fdata->start_unit, fdata->stop_unit, 2702 DASD_ECKD_CCW_WRITE_CKD, startdev, 0); 2703 /* grant subsystem permission to format R0 */ 2704 if (r0_perm) 2705 ((struct DE_eckd_data *) data) 2706 ->ga_extended |= 0x04; 2707 data += sizeof(struct DE_eckd_data); 2708 } 2709 ccw[-1].flags |= CCW_FLAG_CC; 2710 locate_record(ccw++, (struct LO_eckd_data *) data, 2711 fdata->start_unit, 0, rpt*nr_tracks, 2712 DASD_ECKD_CCW_WRITE_CKD, base, 2713 fdata->blksize); 2714 data += sizeof(struct LO_eckd_data); 2715 break; 2716 case 0x01: /* Write record zero + format track. */ 2717 if (use_prefix) { 2718 prefix(ccw++, (struct PFX_eckd_data *) data, 2719 fdata->start_unit, fdata->stop_unit, 2720 DASD_ECKD_CCW_WRITE_RECORD_ZERO, 2721 base, startdev); 2722 data += sizeof(struct PFX_eckd_data); 2723 } else { 2724 define_extent(ccw++, (struct DE_eckd_data *) data, 2725 fdata->start_unit, fdata->stop_unit, 2726 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0); 2727 data += sizeof(struct DE_eckd_data); 2728 } 2729 ccw[-1].flags |= CCW_FLAG_CC; 2730 locate_record(ccw++, (struct LO_eckd_data *) data, 2731 fdata->start_unit, 0, rpt * nr_tracks + 1, 2732 DASD_ECKD_CCW_WRITE_RECORD_ZERO, base, 2733 base->block->bp_block); 2734 data += sizeof(struct LO_eckd_data); 2735 break; 2736 case 0x04: /* Invalidate track. */ 2737 if (use_prefix) { 2738 prefix(ccw++, (struct PFX_eckd_data *) data, 2739 fdata->start_unit, fdata->stop_unit, 2740 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2741 data += sizeof(struct PFX_eckd_data); 2742 } else { 2743 define_extent(ccw++, (struct DE_eckd_data *) data, 2744 fdata->start_unit, fdata->stop_unit, 2745 DASD_ECKD_CCW_WRITE_CKD, startdev, 0); 2746 data += sizeof(struct DE_eckd_data); 2747 } 2748 ccw[-1].flags |= CCW_FLAG_CC; 2749 locate_record(ccw++, (struct LO_eckd_data *) data, 2750 fdata->start_unit, 0, 1, 2751 DASD_ECKD_CCW_WRITE_CKD, base, 8); 2752 data += sizeof(struct LO_eckd_data); 2753 break; 2754 } 2755 2756 for (j = 0; j < nr_tracks; j++) { 2757 /* calculate cylinder and head for the current track */ 2758 set_ch_t(&address, 2759 (fdata->start_unit + j) / 2760 base_priv->rdc_data.trk_per_cyl, 2761 (fdata->start_unit + j) % 2762 base_priv->rdc_data.trk_per_cyl); 2763 if (intensity & 0x01) { /* write record zero */ 2764 ect = (struct eckd_count *) data; 2765 data += sizeof(struct eckd_count); 2766 ect->cyl = address.cyl; 2767 ect->head = address.head; 2768 ect->record = 0; 2769 ect->kl = 0; 2770 ect->dl = 8; 2771 ccw[-1].flags |= CCW_FLAG_CC; 2772 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; 2773 ccw->flags = CCW_FLAG_SLI; 2774 ccw->count = 8; 2775 ccw->cda = (__u32)(addr_t) ect; 2776 ccw++; 2777 } 2778 if ((intensity & ~0x08) & 0x04) { /* erase track */ 2779 ect = (struct eckd_count *) data; 2780 data += sizeof(struct eckd_count); 2781 ect->cyl = address.cyl; 2782 ect->head = address.head; 2783 ect->record = 1; 2784 ect->kl = 0; 2785 ect->dl = 0; 2786 ccw[-1].flags |= CCW_FLAG_CC; 2787 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; 2788 ccw->flags = CCW_FLAG_SLI; 2789 ccw->count = 8; 2790 ccw->cda = (__u32)(addr_t) ect; 2791 } else { /* write remaining records */ 2792 for (i = 0; i < rpt; i++) { 2793 ect = (struct eckd_count *) data; 2794 data += sizeof(struct eckd_count); 2795 ect->cyl = address.cyl; 2796 ect->head = address.head; 2797 ect->record = i + 1; 2798 ect->kl = 0; 2799 ect->dl = fdata->blksize; 2800 /* 2801 * Check for special tracks 0-1 2802 * when formatting CDL 2803 */ 2804 if ((intensity & 0x08) && 2805 address.cyl == 0 && address.head == 0) { 2806 if (i < 3) { 2807 ect->kl = 4; 2808 ect->dl = sizes_trk0[i] - 4; 2809 } 2810 } 2811 if ((intensity & 0x08) && 2812 address.cyl == 0 && address.head == 1) { 2813 ect->kl = 44; 2814 ect->dl = LABEL_SIZE - 44; 2815 } 2816 ccw[-1].flags |= CCW_FLAG_CC; 2817 if (i != 0 || j == 0) 2818 ccw->cmd_code = 2819 DASD_ECKD_CCW_WRITE_CKD; 2820 else 2821 ccw->cmd_code = 2822 DASD_ECKD_CCW_WRITE_CKD_MT; 2823 ccw->flags = CCW_FLAG_SLI; 2824 ccw->count = 8; 2825 ccw->cda = (__u32)(addr_t) ect; 2826 ccw++; 2827 } 2828 } 2829 } 2830 2831 fcp->startdev = startdev; 2832 fcp->memdev = startdev; 2833 fcp->basedev = base; 2834 fcp->retries = 256; 2835 fcp->expires = startdev->default_expires * HZ; 2836 fcp->buildclk = get_tod_clock(); 2837 fcp->status = DASD_CQR_FILLED; 2838 2839 return fcp; 2840} 2841 2842/* 2843 * Wrapper function to build a CCW request depending on input data 2844 */ 2845static struct dasd_ccw_req * 2846dasd_eckd_format_build_ccw_req(struct dasd_device *base, 2847 struct format_data_t *fdata, int enable_pav, 2848 int tpm, struct eckd_count *fmt_buffer, int rpt) 2849{ 2850 struct dasd_ccw_req *ccw_req; 2851 2852 if (!fmt_buffer) { 2853 ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav); 2854 } else { 2855 if (tpm) 2856 ccw_req = dasd_eckd_build_check_tcw(base, fdata, 2857 enable_pav, 2858 fmt_buffer, rpt); 2859 else 2860 ccw_req = dasd_eckd_build_check(base, fdata, enable_pav, 2861 fmt_buffer, rpt); 2862 } 2863 2864 return ccw_req; 2865} 2866 2867/* 2868 * Sanity checks on format_data 2869 */ 2870static int dasd_eckd_format_sanity_checks(struct dasd_device *base, 2871 struct format_data_t *fdata) 2872{ 2873 struct dasd_eckd_private *private = base->private; 2874 2875 if (fdata->start_unit >= 2876 (private->real_cyl * private->rdc_data.trk_per_cyl)) { 2877 dev_warn(&base->cdev->dev, 2878 "Start track number %u used in formatting is too big\n", 2879 fdata->start_unit); 2880 return -EINVAL; 2881 } 2882 if (fdata->stop_unit >= 2883 (private->real_cyl * private->rdc_data.trk_per_cyl)) { 2884 dev_warn(&base->cdev->dev, 2885 "Stop track number %u used in formatting is too big\n", 2886 fdata->stop_unit); 2887 return -EINVAL; 2888 } 2889 if (fdata->start_unit > fdata->stop_unit) { 2890 dev_warn(&base->cdev->dev, 2891 "Start track %u used in formatting exceeds end track\n", 2892 fdata->start_unit); 2893 return -EINVAL; 2894 } 2895 if (dasd_check_blocksize(fdata->blksize) != 0) { 2896 dev_warn(&base->cdev->dev, 2897 "The DASD cannot be formatted with block size %u\n", 2898 fdata->blksize); 2899 return -EINVAL; 2900 } 2901 return 0; 2902} 2903 2904/* 2905 * This function will process format_data originally coming from an IOCTL 2906 */ 2907static int dasd_eckd_format_process_data(struct dasd_device *base, 2908 struct format_data_t *fdata, 2909 int enable_pav, int tpm, 2910 struct eckd_count *fmt_buffer, int rpt, 2911 struct irb *irb) 2912{ 2913 struct dasd_eckd_private *private = base->private; 2914 struct dasd_ccw_req *cqr, *n; 2915 struct list_head format_queue; 2916 struct dasd_device *device; 2917 char *sense = NULL; 2918 int old_start, old_stop, format_step; 2919 int step, retry; 2920 int rc; 2921 2922 rc = dasd_eckd_format_sanity_checks(base, fdata); 2923 if (rc) 2924 return rc; 2925 2926 INIT_LIST_HEAD(&format_queue); 2927 2928 old_start = fdata->start_unit; 2929 old_stop = fdata->stop_unit; 2930 2931 if (!tpm && fmt_buffer != NULL) { 2932 /* Command Mode / Format Check */ 2933 format_step = 1; 2934 } else if (tpm && fmt_buffer != NULL) { 2935 /* Transport Mode / Format Check */ 2936 format_step = DASD_CQR_MAX_CCW / rpt; 2937 } else { 2938 /* Normal Formatting */ 2939 format_step = DASD_CQR_MAX_CCW / 2940 recs_per_track(&private->rdc_data, 0, fdata->blksize); 2941 } 2942 2943 do { 2944 retry = 0; 2945 while (fdata->start_unit <= old_stop) { 2946 step = fdata->stop_unit - fdata->start_unit + 1; 2947 if (step > format_step) { 2948 fdata->stop_unit = 2949 fdata->start_unit + format_step - 1; 2950 } 2951 2952 cqr = dasd_eckd_format_build_ccw_req(base, fdata, 2953 enable_pav, tpm, 2954 fmt_buffer, rpt); 2955 if (IS_ERR(cqr)) { 2956 rc = PTR_ERR(cqr); 2957 if (rc == -ENOMEM) { 2958 if (list_empty(&format_queue)) 2959 goto out; 2960 /* 2961 * not enough memory available, start 2962 * requests retry after first requests 2963 * were finished 2964 */ 2965 retry = 1; 2966 break; 2967 } 2968 goto out_err; 2969 } 2970 list_add_tail(&cqr->blocklist, &format_queue); 2971 2972 if (fmt_buffer) { 2973 step = fdata->stop_unit - fdata->start_unit + 1; 2974 fmt_buffer += rpt * step; 2975 } 2976 fdata->start_unit = fdata->stop_unit + 1; 2977 fdata->stop_unit = old_stop; 2978 } 2979 2980 rc = dasd_sleep_on_queue(&format_queue); 2981 2982out_err: 2983 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) { 2984 device = cqr->startdev; 2985 private = device->private; 2986 2987 if (cqr->status == DASD_CQR_FAILED) { 2988 /* 2989 * Only get sense data if called by format 2990 * check 2991 */ 2992 if (fmt_buffer && irb) { 2993 sense = dasd_get_sense(&cqr->irb); 2994 memcpy(irb, &cqr->irb, sizeof(*irb)); 2995 } 2996 rc = -EIO; 2997 } 2998 list_del_init(&cqr->blocklist); 2999 dasd_ffree_request(cqr, device); 3000 private->count--; 3001 } 3002 3003 if (rc && rc != -EIO) 3004 goto out; 3005 if (rc == -EIO) { 3006 /* 3007 * In case fewer than the expected records are on the 3008 * track, we will most likely get a 'No Record Found' 3009 * error (in command mode) or a 'File Protected' error 3010 * (in transport mode). Those particular cases shouldn't 3011 * pass the -EIO to the IOCTL, therefore reset the rc 3012 * and continue. 3013 */ 3014 if (sense && 3015 (sense[1] & SNS1_NO_REC_FOUND || 3016 sense[1] & SNS1_FILE_PROTECTED)) 3017 retry = 1; 3018 else 3019 goto out; 3020 } 3021 3022 } while (retry); 3023 3024out: 3025 fdata->start_unit = old_start; 3026 fdata->stop_unit = old_stop; 3027 3028 return rc; 3029} 3030 3031static int dasd_eckd_format_device(struct dasd_device *base, 3032 struct format_data_t *fdata, int enable_pav) 3033{ 3034 return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL, 3035 0, NULL); 3036} 3037 3038static bool test_and_set_format_track(struct dasd_format_entry *to_format, 3039 struct dasd_ccw_req *cqr) 3040{ 3041 struct dasd_block *block = cqr->block; 3042 struct dasd_format_entry *format; 3043 unsigned long flags; 3044 bool rc = false; 3045 3046 spin_lock_irqsave(&block->format_lock, flags); 3047 if (cqr->trkcount != atomic_read(&block->trkcount)) { 3048 /* 3049 * The number of formatted tracks has changed after request 3050 * start and we can not tell if the current track was involved. 3051 * To avoid data corruption treat it as if the current track is 3052 * involved 3053 */ 3054 rc = true; 3055 goto out; 3056 } 3057 list_for_each_entry(format, &block->format_list, list) { 3058 if (format->track == to_format->track) { 3059 rc = true; 3060 goto out; 3061 } 3062 } 3063 list_add_tail(&to_format->list, &block->format_list); 3064 3065out: 3066 spin_unlock_irqrestore(&block->format_lock, flags); 3067 return rc; 3068} 3069 3070static void clear_format_track(struct dasd_format_entry *format, 3071 struct dasd_block *block) 3072{ 3073 unsigned long flags; 3074 3075 spin_lock_irqsave(&block->format_lock, flags); 3076 atomic_inc(&block->trkcount); 3077 list_del_init(&format->list); 3078 spin_unlock_irqrestore(&block->format_lock, flags); 3079} 3080 3081/* 3082 * Callback function to free ESE format requests. 3083 */ 3084static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data) 3085{ 3086 struct dasd_device *device = cqr->startdev; 3087 struct dasd_eckd_private *private = device->private; 3088 struct dasd_format_entry *format = data; 3089 3090 clear_format_track(format, cqr->basedev->block); 3091 private->count--; 3092 dasd_ffree_request(cqr, device); 3093} 3094 3095static struct dasd_ccw_req * 3096dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr, 3097 struct irb *irb) 3098{ 3099 struct dasd_eckd_private *private; 3100 struct dasd_format_entry *format; 3101 struct format_data_t fdata; 3102 unsigned int recs_per_trk; 3103 struct dasd_ccw_req *fcqr; 3104 struct dasd_device *base; 3105 struct dasd_block *block; 3106 unsigned int blksize; 3107 struct request *req; 3108 sector_t first_trk; 3109 sector_t last_trk; 3110 sector_t curr_trk; 3111 int rc; 3112 3113 req = dasd_get_callback_data(cqr); 3114 block = cqr->block; 3115 base = block->base; 3116 private = base->private; 3117 blksize = block->bp_block; 3118 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 3119 format = &startdev->format_entry; 3120 3121 first_trk = blk_rq_pos(req) >> block->s2b_shift; 3122 sector_div(first_trk, recs_per_trk); 3123 last_trk = 3124 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 3125 sector_div(last_trk, recs_per_trk); 3126 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk); 3127 if (rc) 3128 return ERR_PTR(rc); 3129 3130 if (curr_trk < first_trk || curr_trk > last_trk) { 3131 DBF_DEV_EVENT(DBF_WARNING, startdev, 3132 "ESE error track %llu not within range %llu - %llu\n", 3133 curr_trk, first_trk, last_trk); 3134 return ERR_PTR(-EINVAL); 3135 } 3136 format->track = curr_trk; 3137 /* test if track is already in formatting by another thread */ 3138 if (test_and_set_format_track(format, cqr)) { 3139 /* this is no real error so do not count down retries */ 3140 cqr->retries++; 3141 return ERR_PTR(-EEXIST); 3142 } 3143 3144 fdata.start_unit = curr_trk; 3145 fdata.stop_unit = curr_trk; 3146 fdata.blksize = blksize; 3147 fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0; 3148 3149 rc = dasd_eckd_format_sanity_checks(base, &fdata); 3150 if (rc) 3151 return ERR_PTR(-EINVAL); 3152 3153 /* 3154 * We're building the request with PAV disabled as we're reusing 3155 * the former startdev. 3156 */ 3157 fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0); 3158 if (IS_ERR(fcqr)) 3159 return fcqr; 3160 3161 fcqr->callback = dasd_eckd_ese_format_cb; 3162 fcqr->callback_data = (void *) format; 3163 3164 return fcqr; 3165} 3166 3167/* 3168 * When data is read from an unformatted area of an ESE volume, this function 3169 * returns zeroed data and thereby mimics a read of zero data. 3170 * 3171 * The first unformatted track is the one that got the NRF error, the address is 3172 * encoded in the sense data. 3173 * 3174 * All tracks before have returned valid data and should not be touched. 3175 * All tracks after the unformatted track might be formatted or not. This is 3176 * currently not known, remember the processed data and return the remainder of 3177 * the request to the blocklayer in __dasd_cleanup_cqr(). 3178 */ 3179static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb) 3180{ 3181 struct dasd_eckd_private *private; 3182 sector_t first_trk, last_trk; 3183 sector_t first_blk, last_blk; 3184 unsigned int blksize, off; 3185 unsigned int recs_per_trk; 3186 struct dasd_device *base; 3187 struct req_iterator iter; 3188 struct dasd_block *block; 3189 unsigned int skip_block; 3190 unsigned int blk_count; 3191 struct request *req; 3192 struct bio_vec bv; 3193 sector_t curr_trk; 3194 sector_t end_blk; 3195 char *dst; 3196 int rc; 3197 3198 req = (struct request *) cqr->callback_data; 3199 base = cqr->block->base; 3200 blksize = base->block->bp_block; 3201 block = cqr->block; 3202 private = base->private; 3203 skip_block = 0; 3204 blk_count = 0; 3205 3206 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 3207 first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift; 3208 sector_div(first_trk, recs_per_trk); 3209 last_trk = last_blk = 3210 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 3211 sector_div(last_trk, recs_per_trk); 3212 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk); 3213 if (rc) 3214 return rc; 3215 3216 /* sanity check if the current track from sense data is valid */ 3217 if (curr_trk < first_trk || curr_trk > last_trk) { 3218 DBF_DEV_EVENT(DBF_WARNING, base, 3219 "ESE error track %llu not within range %llu - %llu\n", 3220 curr_trk, first_trk, last_trk); 3221 return -EINVAL; 3222 } 3223 3224 /* 3225 * if not the first track got the NRF error we have to skip over valid 3226 * blocks 3227 */ 3228 if (curr_trk != first_trk) 3229 skip_block = curr_trk * recs_per_trk - first_blk; 3230 3231 /* we have no information beyond the current track */ 3232 end_blk = (curr_trk + 1) * recs_per_trk; 3233 3234 rq_for_each_segment(bv, req, iter) { 3235 dst = page_address(bv.bv_page) + bv.bv_offset; 3236 for (off = 0; off < bv.bv_len; off += blksize) { 3237 if (first_blk + blk_count >= end_blk) { 3238 cqr->proc_bytes = blk_count * blksize; 3239 return 0; 3240 } 3241 if (dst && !skip_block) 3242 memset(dst, 0, blksize); 3243 else 3244 skip_block--; 3245 dst += blksize; 3246 blk_count++; 3247 } 3248 } 3249 return 0; 3250} 3251 3252/* 3253 * Helper function to count consecutive records of a single track. 3254 */ 3255static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start, 3256 int max) 3257{ 3258 int head; 3259 int i; 3260 3261 head = fmt_buffer[start].head; 3262 3263 /* 3264 * There are 3 conditions where we stop counting: 3265 * - if data reoccurs (same head and record may reoccur), which may 3266 * happen due to the way DASD_ECKD_CCW_READ_COUNT works 3267 * - when the head changes, because we're iterating over several tracks 3268 * then (DASD_ECKD_CCW_READ_COUNT_MT) 3269 * - when we've reached the end of sensible data in the buffer (the 3270 * record will be 0 then) 3271 */ 3272 for (i = start; i < max; i++) { 3273 if (i > start) { 3274 if ((fmt_buffer[i].head == head && 3275 fmt_buffer[i].record == 1) || 3276 fmt_buffer[i].head != head || 3277 fmt_buffer[i].record == 0) 3278 break; 3279 } 3280 } 3281 3282 return i - start; 3283} 3284 3285/* 3286 * Evaluate a given range of tracks. Data like number of records, blocksize, 3287 * record ids, and key length are compared with expected data. 3288 * 3289 * If a mismatch occurs, the corresponding error bit is set, as well as 3290 * additional information, depending on the error. 3291 */ 3292static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer, 3293 struct format_check_t *cdata, 3294 int rpt_max, int rpt_exp, 3295 int trk_per_cyl, int tpm) 3296{ 3297 struct ch_t geo; 3298 int max_entries; 3299 int count = 0; 3300 int trkcount; 3301 int blksize; 3302 int pos = 0; 3303 int i, j; 3304 int kl; 3305 3306 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1; 3307 max_entries = trkcount * rpt_max; 3308 3309 for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) { 3310 /* Calculate the correct next starting position in the buffer */ 3311 if (tpm) { 3312 while (fmt_buffer[pos].record == 0 && 3313 fmt_buffer[pos].dl == 0) { 3314 if (pos++ > max_entries) 3315 break; 3316 } 3317 } else { 3318 if (i != cdata->expect.start_unit) 3319 pos += rpt_max - count; 3320 } 3321 3322 /* Calculate the expected geo values for the current track */ 3323 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl); 3324 3325 /* Count and check number of records */ 3326 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max); 3327 3328 if (count < rpt_exp) { 3329 cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS; 3330 break; 3331 } 3332 if (count > rpt_exp) { 3333 cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS; 3334 break; 3335 } 3336 3337 for (j = 0; j < count; j++, pos++) { 3338 blksize = cdata->expect.blksize; 3339 kl = 0; 3340 3341 /* 3342 * Set special values when checking CDL formatted 3343 * devices. 3344 */ 3345 if ((cdata->expect.intensity & 0x08) && 3346 geo.cyl == 0 && geo.head == 0) { 3347 if (j < 3) { 3348 blksize = sizes_trk0[j] - 4; 3349 kl = 4; 3350 } 3351 } 3352 if ((cdata->expect.intensity & 0x08) && 3353 geo.cyl == 0 && geo.head == 1) { 3354 blksize = LABEL_SIZE - 44; 3355 kl = 44; 3356 } 3357 3358 /* Check blocksize */ 3359 if (fmt_buffer[pos].dl != blksize) { 3360 cdata->result = DASD_FMT_ERR_BLKSIZE; 3361 goto out; 3362 } 3363 /* Check if key length is 0 */ 3364 if (fmt_buffer[pos].kl != kl) { 3365 cdata->result = DASD_FMT_ERR_KEY_LENGTH; 3366 goto out; 3367 } 3368 /* Check if record_id is correct */ 3369 if (fmt_buffer[pos].cyl != geo.cyl || 3370 fmt_buffer[pos].head != geo.head || 3371 fmt_buffer[pos].record != (j + 1)) { 3372 cdata->result = DASD_FMT_ERR_RECORD_ID; 3373 goto out; 3374 } 3375 } 3376 } 3377 3378out: 3379 /* 3380 * In case of no errors, we need to decrease by one 3381 * to get the correct positions. 3382 */ 3383 if (!cdata->result) { 3384 i--; 3385 pos--; 3386 } 3387 3388 cdata->unit = i; 3389 cdata->num_records = count; 3390 cdata->rec = fmt_buffer[pos].record; 3391 cdata->blksize = fmt_buffer[pos].dl; 3392 cdata->key_length = fmt_buffer[pos].kl; 3393} 3394 3395/* 3396 * Check the format of a range of tracks of a DASD. 3397 */ 3398static int dasd_eckd_check_device_format(struct dasd_device *base, 3399 struct format_check_t *cdata, 3400 int enable_pav) 3401{ 3402 struct dasd_eckd_private *private = base->private; 3403 struct eckd_count *fmt_buffer; 3404 struct irb irb; 3405 int rpt_max, rpt_exp; 3406 int fmt_buffer_size; 3407 int trk_per_cyl; 3408 int trkcount; 3409 int tpm = 0; 3410 int rc; 3411 3412 trk_per_cyl = private->rdc_data.trk_per_cyl; 3413 3414 /* Get maximum and expected amount of records per track */ 3415 rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1; 3416 rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize); 3417 3418 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1; 3419 fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count); 3420 3421 fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA); 3422 if (!fmt_buffer) 3423 return -ENOMEM; 3424 3425 /* 3426 * A certain FICON feature subset is needed to operate in transport 3427 * mode. Additionally, the support for transport mode is implicitly 3428 * checked by comparing the buffer size with fcx_max_data. As long as 3429 * the buffer size is smaller we can operate in transport mode and 3430 * process multiple tracks. If not, only one track at once is being 3431 * processed using command mode. 3432 */ 3433 if ((private->features.feature[40] & 0x04) && 3434 fmt_buffer_size <= private->fcx_max_data) 3435 tpm = 1; 3436 3437 rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav, 3438 tpm, fmt_buffer, rpt_max, &irb); 3439 if (rc && rc != -EIO) 3440 goto out; 3441 if (rc == -EIO) { 3442 /* 3443 * If our first attempt with transport mode enabled comes back 3444 * with an incorrect length error, we're going to retry the 3445 * check with command mode. 3446 */ 3447 if (tpm && scsw_cstat(&irb.scsw) == 0x40) { 3448 tpm = 0; 3449 rc = dasd_eckd_format_process_data(base, &cdata->expect, 3450 enable_pav, tpm, 3451 fmt_buffer, rpt_max, 3452 &irb); 3453 if (rc) 3454 goto out; 3455 } else { 3456 goto out; 3457 } 3458 } 3459 3460 dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp, 3461 trk_per_cyl, tpm); 3462 3463out: 3464 kfree(fmt_buffer); 3465 3466 return rc; 3467} 3468 3469static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) 3470{ 3471 if (cqr->retries < 0) { 3472 cqr->status = DASD_CQR_FAILED; 3473 return; 3474 } 3475 cqr->status = DASD_CQR_FILLED; 3476 if (cqr->block && (cqr->startdev != cqr->block->base)) { 3477 dasd_eckd_reset_ccw_to_base_io(cqr); 3478 cqr->startdev = cqr->block->base; 3479 cqr->lpm = dasd_path_get_opm(cqr->block->base); 3480 } 3481}; 3482 3483static dasd_erp_fn_t 3484dasd_eckd_erp_action(struct dasd_ccw_req * cqr) 3485{ 3486 struct dasd_device *device = (struct dasd_device *) cqr->startdev; 3487 struct ccw_device *cdev = device->cdev; 3488 3489 switch (cdev->id.cu_type) { 3490 case 0x3990: 3491 case 0x2105: 3492 case 0x2107: 3493 case 0x1750: 3494 return dasd_3990_erp_action; 3495 case 0x9343: 3496 case 0x3880: 3497 default: 3498 return dasd_default_erp_action; 3499 } 3500} 3501 3502static dasd_erp_fn_t 3503dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr) 3504{ 3505 return dasd_default_erp_postaction; 3506} 3507 3508static void dasd_eckd_check_for_device_change(struct dasd_device *device, 3509 struct dasd_ccw_req *cqr, 3510 struct irb *irb) 3511{ 3512 char mask; 3513 char *sense = NULL; 3514 struct dasd_eckd_private *private = device->private; 3515 3516 /* first of all check for state change pending interrupt */ 3517 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 3518 if ((scsw_dstat(&irb->scsw) & mask) == mask) { 3519 /* 3520 * for alias only, not in offline processing 3521 * and only if not suspended 3522 */ 3523 if (!device->block && private->lcu && 3524 device->state == DASD_STATE_ONLINE && 3525 !test_bit(DASD_FLAG_OFFLINE, &device->flags) && 3526 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 3527 /* schedule worker to reload device */ 3528 dasd_reload_device(device); 3529 } 3530 dasd_generic_handle_state_change(device); 3531 return; 3532 } 3533 3534 sense = dasd_get_sense(irb); 3535 if (!sense) 3536 return; 3537 3538 /* summary unit check */ 3539 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) && 3540 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { 3541 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) { 3542 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3543 "eckd suc: device already notified"); 3544 return; 3545 } 3546 sense = dasd_get_sense(irb); 3547 if (!sense) { 3548 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3549 "eckd suc: no reason code available"); 3550 clear_bit(DASD_FLAG_SUC, &device->flags); 3551 return; 3552 3553 } 3554 private->suc_reason = sense[8]; 3555 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x", 3556 "eckd handle summary unit check: reason", 3557 private->suc_reason); 3558 dasd_get_device(device); 3559 if (!schedule_work(&device->suc_work)) 3560 dasd_put_device(device); 3561 3562 return; 3563 } 3564 3565 /* service information message SIM */ 3566 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) && 3567 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { 3568 dasd_3990_erp_handle_sim(device, sense); 3569 return; 3570 } 3571 3572 /* loss of device reservation is handled via base devices only 3573 * as alias devices may be used with several bases 3574 */ 3575 if (device->block && (sense[27] & DASD_SENSE_BIT_0) && 3576 (sense[7] == 0x3F) && 3577 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 3578 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) { 3579 if (device->features & DASD_FEATURE_FAILONSLCK) 3580 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags); 3581 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags); 3582 dev_err(&device->cdev->dev, 3583 "The device reservation was lost\n"); 3584 } 3585} 3586 3587static int dasd_eckd_ras_sanity_checks(struct dasd_device *device, 3588 unsigned int first_trk, 3589 unsigned int last_trk) 3590{ 3591 struct dasd_eckd_private *private = device->private; 3592 unsigned int trks_per_vol; 3593 int rc = 0; 3594 3595 trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl; 3596 3597 if (first_trk >= trks_per_vol) { 3598 dev_warn(&device->cdev->dev, 3599 "Start track number %u used in the space release command is too big\n", 3600 first_trk); 3601 rc = -EINVAL; 3602 } else if (last_trk >= trks_per_vol) { 3603 dev_warn(&device->cdev->dev, 3604 "Stop track number %u used in the space release command is too big\n", 3605 last_trk); 3606 rc = -EINVAL; 3607 } else if (first_trk > last_trk) { 3608 dev_warn(&device->cdev->dev, 3609 "Start track %u used in the space release command exceeds the end track\n", 3610 first_trk); 3611 rc = -EINVAL; 3612 } 3613 return rc; 3614} 3615 3616/* 3617 * Helper function to count the amount of involved extents within a given range 3618 * with extent alignment in mind. 3619 */ 3620static int count_exts(unsigned int from, unsigned int to, int trks_per_ext) 3621{ 3622 int cur_pos = 0; 3623 int count = 0; 3624 int tmp; 3625 3626 if (from == to) 3627 return 1; 3628 3629 /* Count first partial extent */ 3630 if (from % trks_per_ext != 0) { 3631 tmp = from + trks_per_ext - (from % trks_per_ext) - 1; 3632 if (tmp > to) 3633 tmp = to; 3634 cur_pos = tmp - from + 1; 3635 count++; 3636 } 3637 /* Count full extents */ 3638 if (to - (from + cur_pos) + 1 >= trks_per_ext) { 3639 tmp = to - ((to - trks_per_ext + 1) % trks_per_ext); 3640 count += (tmp - (from + cur_pos) + 1) / trks_per_ext; 3641 cur_pos = tmp; 3642 } 3643 /* Count last partial extent */ 3644 if (cur_pos < to) 3645 count++; 3646 3647 return count; 3648} 3649 3650/* 3651 * Release allocated space for a given range or an entire volume. 3652 */ 3653static struct dasd_ccw_req * 3654dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block, 3655 struct request *req, unsigned int first_trk, 3656 unsigned int last_trk, int by_extent) 3657{ 3658 struct dasd_eckd_private *private = device->private; 3659 struct dasd_dso_ras_ext_range *ras_range; 3660 struct dasd_rssd_features *features; 3661 struct dasd_dso_ras_data *ras_data; 3662 u16 heads, beg_head, end_head; 3663 int cur_to_trk, cur_from_trk; 3664 struct dasd_ccw_req *cqr; 3665 u32 beg_cyl, end_cyl; 3666 struct ccw1 *ccw; 3667 int trks_per_ext; 3668 size_t ras_size; 3669 size_t size; 3670 int nr_exts; 3671 void *rq; 3672 int i; 3673 3674 if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk)) 3675 return ERR_PTR(-EINVAL); 3676 3677 rq = req ? blk_mq_rq_to_pdu(req) : NULL; 3678 3679 features = &private->features; 3680 3681 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl; 3682 nr_exts = 0; 3683 if (by_extent) 3684 nr_exts = count_exts(first_trk, last_trk, trks_per_ext); 3685 ras_size = sizeof(*ras_data); 3686 size = ras_size + (nr_exts * sizeof(*ras_range)); 3687 3688 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq); 3689 if (IS_ERR(cqr)) { 3690 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 3691 "Could not allocate RAS request"); 3692 return cqr; 3693 } 3694 3695 ras_data = cqr->data; 3696 memset(ras_data, 0, size); 3697 3698 ras_data->order = DSO_ORDER_RAS; 3699 ras_data->flags.vol_type = 0; /* CKD volume */ 3700 /* Release specified extents or entire volume */ 3701 ras_data->op_flags.by_extent = by_extent; 3702 /* 3703 * This bit guarantees initialisation of tracks within an extent that is 3704 * not fully specified, but is only supported with a certain feature 3705 * subset. 3706 */ 3707 ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01); 3708 ras_data->lss = private->ned->ID; 3709 ras_data->dev_addr = private->ned->unit_addr; 3710 ras_data->nr_exts = nr_exts; 3711 3712 if (by_extent) { 3713 heads = private->rdc_data.trk_per_cyl; 3714 cur_from_trk = first_trk; 3715 cur_to_trk = first_trk + trks_per_ext - 3716 (first_trk % trks_per_ext) - 1; 3717 if (cur_to_trk > last_trk) 3718 cur_to_trk = last_trk; 3719 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size); 3720 3721 for (i = 0; i < nr_exts; i++) { 3722 beg_cyl = cur_from_trk / heads; 3723 beg_head = cur_from_trk % heads; 3724 end_cyl = cur_to_trk / heads; 3725 end_head = cur_to_trk % heads; 3726 3727 set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head); 3728 set_ch_t(&ras_range->end_ext, end_cyl, end_head); 3729 3730 cur_from_trk = cur_to_trk + 1; 3731 cur_to_trk = cur_from_trk + trks_per_ext - 1; 3732 if (cur_to_trk > last_trk) 3733 cur_to_trk = last_trk; 3734 ras_range++; 3735 } 3736 } 3737 3738 ccw = cqr->cpaddr; 3739 ccw->cda = (__u32)(addr_t)cqr->data; 3740 ccw->cmd_code = DASD_ECKD_CCW_DSO; 3741 ccw->count = size; 3742 3743 cqr->startdev = device; 3744 cqr->memdev = device; 3745 cqr->block = block; 3746 cqr->retries = 256; 3747 cqr->expires = device->default_expires * HZ; 3748 cqr->buildclk = get_tod_clock(); 3749 cqr->status = DASD_CQR_FILLED; 3750 3751 return cqr; 3752} 3753 3754static int dasd_eckd_release_space_full(struct dasd_device *device) 3755{ 3756 struct dasd_ccw_req *cqr; 3757 int rc; 3758 3759 cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0); 3760 if (IS_ERR(cqr)) 3761 return PTR_ERR(cqr); 3762 3763 rc = dasd_sleep_on_interruptible(cqr); 3764 3765 dasd_sfree_request(cqr, cqr->memdev); 3766 3767 return rc; 3768} 3769 3770static int dasd_eckd_release_space_trks(struct dasd_device *device, 3771 unsigned int from, unsigned int to) 3772{ 3773 struct dasd_eckd_private *private = device->private; 3774 struct dasd_block *block = device->block; 3775 struct dasd_ccw_req *cqr, *n; 3776 struct list_head ras_queue; 3777 unsigned int device_exts; 3778 int trks_per_ext; 3779 int stop, step; 3780 int cur_pos; 3781 int rc = 0; 3782 int retry; 3783 3784 INIT_LIST_HEAD(&ras_queue); 3785 3786 device_exts = private->real_cyl / dasd_eckd_ext_size(device); 3787 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl; 3788 3789 /* Make sure device limits are not exceeded */ 3790 step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX); 3791 cur_pos = from; 3792 3793 do { 3794 retry = 0; 3795 while (cur_pos < to) { 3796 stop = cur_pos + step - 3797 ((cur_pos + step) % trks_per_ext) - 1; 3798 if (stop > to) 3799 stop = to; 3800 3801 cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1); 3802 if (IS_ERR(cqr)) { 3803 rc = PTR_ERR(cqr); 3804 if (rc == -ENOMEM) { 3805 if (list_empty(&ras_queue)) 3806 goto out; 3807 retry = 1; 3808 break; 3809 } 3810 goto err_out; 3811 } 3812 3813 spin_lock_irq(&block->queue_lock); 3814 list_add_tail(&cqr->blocklist, &ras_queue); 3815 spin_unlock_irq(&block->queue_lock); 3816 cur_pos = stop + 1; 3817 } 3818 3819 rc = dasd_sleep_on_queue_interruptible(&ras_queue); 3820 3821err_out: 3822 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) { 3823 device = cqr->startdev; 3824 private = device->private; 3825 3826 spin_lock_irq(&block->queue_lock); 3827 list_del_init(&cqr->blocklist); 3828 spin_unlock_irq(&block->queue_lock); 3829 dasd_sfree_request(cqr, device); 3830 private->count--; 3831 } 3832 } while (retry); 3833 3834out: 3835 return rc; 3836} 3837 3838static int dasd_eckd_release_space(struct dasd_device *device, 3839 struct format_data_t *rdata) 3840{ 3841 if (rdata->intensity & DASD_FMT_INT_ESE_FULL) 3842 return dasd_eckd_release_space_full(device); 3843 else if (rdata->intensity == 0) 3844 return dasd_eckd_release_space_trks(device, rdata->start_unit, 3845 rdata->stop_unit); 3846 else 3847 return -EINVAL; 3848} 3849 3850static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( 3851 struct dasd_device *startdev, 3852 struct dasd_block *block, 3853 struct request *req, 3854 sector_t first_rec, 3855 sector_t last_rec, 3856 sector_t first_trk, 3857 sector_t last_trk, 3858 unsigned int first_offs, 3859 unsigned int last_offs, 3860 unsigned int blk_per_trk, 3861 unsigned int blksize) 3862{ 3863 struct dasd_eckd_private *private; 3864 unsigned long *idaws; 3865 struct LO_eckd_data *LO_data; 3866 struct dasd_ccw_req *cqr; 3867 struct ccw1 *ccw; 3868 struct req_iterator iter; 3869 struct bio_vec bv; 3870 char *dst; 3871 unsigned int off; 3872 int count, cidaw, cplength, datasize; 3873 sector_t recid; 3874 unsigned char cmd, rcmd; 3875 int use_prefix; 3876 struct dasd_device *basedev; 3877 3878 basedev = block->base; 3879 private = basedev->private; 3880 if (rq_data_dir(req) == READ) 3881 cmd = DASD_ECKD_CCW_READ_MT; 3882 else if (rq_data_dir(req) == WRITE) 3883 cmd = DASD_ECKD_CCW_WRITE_MT; 3884 else 3885 return ERR_PTR(-EINVAL); 3886 3887 /* Check struct bio and count the number of blocks for the request. */ 3888 count = 0; 3889 cidaw = 0; 3890 rq_for_each_segment(bv, req, iter) { 3891 if (bv.bv_len & (blksize - 1)) 3892 /* Eckd can only do full blocks. */ 3893 return ERR_PTR(-EINVAL); 3894 count += bv.bv_len >> (block->s2b_shift + 9); 3895 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) 3896 cidaw += bv.bv_len >> (block->s2b_shift + 9); 3897 } 3898 /* Paranoia. */ 3899 if (count != last_rec - first_rec + 1) 3900 return ERR_PTR(-EINVAL); 3901 3902 /* use the prefix command if available */ 3903 use_prefix = private->features.feature[8] & 0x01; 3904 if (use_prefix) { 3905 /* 1x prefix + number of blocks */ 3906 cplength = 2 + count; 3907 /* 1x prefix + cidaws*sizeof(long) */ 3908 datasize = sizeof(struct PFX_eckd_data) + 3909 sizeof(struct LO_eckd_data) + 3910 cidaw * sizeof(unsigned long); 3911 } else { 3912 /* 1x define extent + 1x locate record + number of blocks */ 3913 cplength = 2 + count; 3914 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */ 3915 datasize = sizeof(struct DE_eckd_data) + 3916 sizeof(struct LO_eckd_data) + 3917 cidaw * sizeof(unsigned long); 3918 } 3919 /* Find out the number of additional locate record ccws for cdl. */ 3920 if (private->uses_cdl && first_rec < 2*blk_per_trk) { 3921 if (last_rec >= 2*blk_per_trk) 3922 count = 2*blk_per_trk - first_rec; 3923 cplength += count; 3924 datasize += count*sizeof(struct LO_eckd_data); 3925 } 3926 /* Allocate the ccw request. */ 3927 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 3928 startdev, blk_mq_rq_to_pdu(req)); 3929 if (IS_ERR(cqr)) 3930 return cqr; 3931 ccw = cqr->cpaddr; 3932 /* First ccw is define extent or prefix. */ 3933 if (use_prefix) { 3934 if (prefix(ccw++, cqr->data, first_trk, 3935 last_trk, cmd, basedev, startdev) == -EAGAIN) { 3936 /* Clock not in sync and XRC is enabled. 3937 * Try again later. 3938 */ 3939 dasd_sfree_request(cqr, startdev); 3940 return ERR_PTR(-EAGAIN); 3941 } 3942 idaws = (unsigned long *) (cqr->data + 3943 sizeof(struct PFX_eckd_data)); 3944 } else { 3945 if (define_extent(ccw++, cqr->data, first_trk, 3946 last_trk, cmd, basedev, 0) == -EAGAIN) { 3947 /* Clock not in sync and XRC is enabled. 3948 * Try again later. 3949 */ 3950 dasd_sfree_request(cqr, startdev); 3951 return ERR_PTR(-EAGAIN); 3952 } 3953 idaws = (unsigned long *) (cqr->data + 3954 sizeof(struct DE_eckd_data)); 3955 } 3956 /* Build locate_record+read/write/ccws. */ 3957 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 3958 recid = first_rec; 3959 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) { 3960 /* Only standard blocks so there is just one locate record. */ 3961 ccw[-1].flags |= CCW_FLAG_CC; 3962 locate_record(ccw++, LO_data++, first_trk, first_offs + 1, 3963 last_rec - recid + 1, cmd, basedev, blksize); 3964 } 3965 rq_for_each_segment(bv, req, iter) { 3966 dst = page_address(bv.bv_page) + bv.bv_offset; 3967 if (dasd_page_cache) { 3968 char *copy = kmem_cache_alloc(dasd_page_cache, 3969 GFP_DMA | __GFP_NOWARN); 3970 if (copy && rq_data_dir(req) == WRITE) 3971 memcpy(copy + bv.bv_offset, dst, bv.bv_len); 3972 if (copy) 3973 dst = copy + bv.bv_offset; 3974 } 3975 for (off = 0; off < bv.bv_len; off += blksize) { 3976 sector_t trkid = recid; 3977 unsigned int recoffs = sector_div(trkid, blk_per_trk); 3978 rcmd = cmd; 3979 count = blksize; 3980 /* Locate record for cdl special block ? */ 3981 if (private->uses_cdl && recid < 2*blk_per_trk) { 3982 if (dasd_eckd_cdl_special(blk_per_trk, recid)){ 3983 rcmd |= 0x8; 3984 count = dasd_eckd_cdl_reclen(recid); 3985 if (count < blksize && 3986 rq_data_dir(req) == READ) 3987 memset(dst + count, 0xe5, 3988 blksize - count); 3989 } 3990 ccw[-1].flags |= CCW_FLAG_CC; 3991 locate_record(ccw++, LO_data++, 3992 trkid, recoffs + 1, 3993 1, rcmd, basedev, count); 3994 } 3995 /* Locate record for standard blocks ? */ 3996 if (private->uses_cdl && recid == 2*blk_per_trk) { 3997 ccw[-1].flags |= CCW_FLAG_CC; 3998 locate_record(ccw++, LO_data++, 3999 trkid, recoffs + 1, 4000 last_rec - recid + 1, 4001 cmd, basedev, count); 4002 } 4003 /* Read/write ccw. */ 4004 ccw[-1].flags |= CCW_FLAG_CC; 4005 ccw->cmd_code = rcmd; 4006 ccw->count = count; 4007 if (idal_is_needed(dst, blksize)) { 4008 ccw->cda = (__u32)(addr_t) idaws; 4009 ccw->flags = CCW_FLAG_IDA; 4010 idaws = idal_create_words(idaws, dst, blksize); 4011 } else { 4012 ccw->cda = (__u32)(addr_t) dst; 4013 ccw->flags = 0; 4014 } 4015 ccw++; 4016 dst += blksize; 4017 recid++; 4018 } 4019 } 4020 if (blk_noretry_request(req) || 4021 block->base->features & DASD_FEATURE_FAILFAST) 4022 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4023 cqr->startdev = startdev; 4024 cqr->memdev = startdev; 4025 cqr->block = block; 4026 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 4027 cqr->lpm = dasd_path_get_ppm(startdev); 4028 cqr->retries = startdev->default_retries; 4029 cqr->buildclk = get_tod_clock(); 4030 cqr->status = DASD_CQR_FILLED; 4031 4032 /* Set flags to suppress output for expected errors */ 4033 if (dasd_eckd_is_ese(basedev)) { 4034 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 4035 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 4036 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 4037 } 4038 4039 return cqr; 4040} 4041 4042static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( 4043 struct dasd_device *startdev, 4044 struct dasd_block *block, 4045 struct request *req, 4046 sector_t first_rec, 4047 sector_t last_rec, 4048 sector_t first_trk, 4049 sector_t last_trk, 4050 unsigned int first_offs, 4051 unsigned int last_offs, 4052 unsigned int blk_per_trk, 4053 unsigned int blksize) 4054{ 4055 unsigned long *idaws; 4056 struct dasd_ccw_req *cqr; 4057 struct ccw1 *ccw; 4058 struct req_iterator iter; 4059 struct bio_vec bv; 4060 char *dst, *idaw_dst; 4061 unsigned int cidaw, cplength, datasize; 4062 unsigned int tlf; 4063 sector_t recid; 4064 unsigned char cmd; 4065 struct dasd_device *basedev; 4066 unsigned int trkcount, count, count_to_trk_end; 4067 unsigned int idaw_len, seg_len, part_len, len_to_track_end; 4068 unsigned char new_track, end_idaw; 4069 sector_t trkid; 4070 unsigned int recoffs; 4071 4072 basedev = block->base; 4073 if (rq_data_dir(req) == READ) 4074 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 4075 else if (rq_data_dir(req) == WRITE) 4076 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 4077 else 4078 return ERR_PTR(-EINVAL); 4079 4080 /* Track based I/O needs IDAWs for each page, and not just for 4081 * 64 bit addresses. We need additional idals for pages 4082 * that get filled from two tracks, so we use the number 4083 * of records as upper limit. 4084 */ 4085 cidaw = last_rec - first_rec + 1; 4086 trkcount = last_trk - first_trk + 1; 4087 4088 /* 1x prefix + one read/write ccw per track */ 4089 cplength = 1 + trkcount; 4090 4091 datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long); 4092 4093 /* Allocate the ccw request. */ 4094 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 4095 startdev, blk_mq_rq_to_pdu(req)); 4096 if (IS_ERR(cqr)) 4097 return cqr; 4098 ccw = cqr->cpaddr; 4099 /* transfer length factor: how many bytes to read from the last track */ 4100 if (first_trk == last_trk) 4101 tlf = last_offs - first_offs + 1; 4102 else 4103 tlf = last_offs + 1; 4104 tlf *= blksize; 4105 4106 if (prefix_LRE(ccw++, cqr->data, first_trk, 4107 last_trk, cmd, basedev, startdev, 4108 1 /* format */, first_offs + 1, 4109 trkcount, blksize, 4110 tlf) == -EAGAIN) { 4111 /* Clock not in sync and XRC is enabled. 4112 * Try again later. 4113 */ 4114 dasd_sfree_request(cqr, startdev); 4115 return ERR_PTR(-EAGAIN); 4116 } 4117 4118 /* 4119 * The translation of request into ccw programs must meet the 4120 * following conditions: 4121 * - all idaws but the first and the last must address full pages 4122 * (or 2K blocks on 31-bit) 4123 * - the scope of a ccw and it's idal ends with the track boundaries 4124 */ 4125 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data)); 4126 recid = first_rec; 4127 new_track = 1; 4128 end_idaw = 0; 4129 len_to_track_end = 0; 4130 idaw_dst = NULL; 4131 idaw_len = 0; 4132 rq_for_each_segment(bv, req, iter) { 4133 dst = page_address(bv.bv_page) + bv.bv_offset; 4134 seg_len = bv.bv_len; 4135 while (seg_len) { 4136 if (new_track) { 4137 trkid = recid; 4138 recoffs = sector_div(trkid, blk_per_trk); 4139 count_to_trk_end = blk_per_trk - recoffs; 4140 count = min((last_rec - recid + 1), 4141 (sector_t)count_to_trk_end); 4142 len_to_track_end = count * blksize; 4143 ccw[-1].flags |= CCW_FLAG_CC; 4144 ccw->cmd_code = cmd; 4145 ccw->count = len_to_track_end; 4146 ccw->cda = (__u32)(addr_t)idaws; 4147 ccw->flags = CCW_FLAG_IDA; 4148 ccw++; 4149 recid += count; 4150 new_track = 0; 4151 /* first idaw for a ccw may start anywhere */ 4152 if (!idaw_dst) 4153 idaw_dst = dst; 4154 } 4155 /* If we start a new idaw, we must make sure that it 4156 * starts on an IDA_BLOCK_SIZE boundary. 4157 * If we continue an idaw, we must make sure that the 4158 * current segment begins where the so far accumulated 4159 * idaw ends 4160 */ 4161 if (!idaw_dst) { 4162 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) { 4163 dasd_sfree_request(cqr, startdev); 4164 return ERR_PTR(-ERANGE); 4165 } else 4166 idaw_dst = dst; 4167 } 4168 if ((idaw_dst + idaw_len) != dst) { 4169 dasd_sfree_request(cqr, startdev); 4170 return ERR_PTR(-ERANGE); 4171 } 4172 part_len = min(seg_len, len_to_track_end); 4173 seg_len -= part_len; 4174 dst += part_len; 4175 idaw_len += part_len; 4176 len_to_track_end -= part_len; 4177 /* collected memory area ends on an IDA_BLOCK border, 4178 * -> create an idaw 4179 * idal_create_words will handle cases where idaw_len 4180 * is larger then IDA_BLOCK_SIZE 4181 */ 4182 if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1))) 4183 end_idaw = 1; 4184 /* We also need to end the idaw at track end */ 4185 if (!len_to_track_end) { 4186 new_track = 1; 4187 end_idaw = 1; 4188 } 4189 if (end_idaw) { 4190 idaws = idal_create_words(idaws, idaw_dst, 4191 idaw_len); 4192 idaw_dst = NULL; 4193 idaw_len = 0; 4194 end_idaw = 0; 4195 } 4196 } 4197 } 4198 4199 if (blk_noretry_request(req) || 4200 block->base->features & DASD_FEATURE_FAILFAST) 4201 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4202 cqr->startdev = startdev; 4203 cqr->memdev = startdev; 4204 cqr->block = block; 4205 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 4206 cqr->lpm = dasd_path_get_ppm(startdev); 4207 cqr->retries = startdev->default_retries; 4208 cqr->buildclk = get_tod_clock(); 4209 cqr->status = DASD_CQR_FILLED; 4210 4211 /* Set flags to suppress output for expected errors */ 4212 if (dasd_eckd_is_ese(basedev)) 4213 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 4214 4215 return cqr; 4216} 4217 4218static int prepare_itcw(struct itcw *itcw, 4219 unsigned int trk, unsigned int totrk, int cmd, 4220 struct dasd_device *basedev, 4221 struct dasd_device *startdev, 4222 unsigned int rec_on_trk, int count, 4223 unsigned int blksize, 4224 unsigned int total_data_size, 4225 unsigned int tlf, 4226 unsigned int blk_per_trk) 4227{ 4228 struct PFX_eckd_data pfxdata; 4229 struct dasd_eckd_private *basepriv, *startpriv; 4230 struct DE_eckd_data *dedata; 4231 struct LRE_eckd_data *lredata; 4232 struct dcw *dcw; 4233 4234 u32 begcyl, endcyl; 4235 u16 heads, beghead, endhead; 4236 u8 pfx_cmd; 4237 4238 int rc = 0; 4239 int sector = 0; 4240 int dn, d; 4241 4242 4243 /* setup prefix data */ 4244 basepriv = basedev->private; 4245 startpriv = startdev->private; 4246 dedata = &pfxdata.define_extent; 4247 lredata = &pfxdata.locate_record; 4248 4249 memset(&pfxdata, 0, sizeof(pfxdata)); 4250 pfxdata.format = 1; /* PFX with LRE */ 4251 pfxdata.base_address = basepriv->ned->unit_addr; 4252 pfxdata.base_lss = basepriv->ned->ID; 4253 pfxdata.validity.define_extent = 1; 4254 4255 /* private uid is kept up to date, conf_data may be outdated */ 4256 if (startpriv->uid.type == UA_BASE_PAV_ALIAS) 4257 pfxdata.validity.verify_base = 1; 4258 4259 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) { 4260 pfxdata.validity.verify_base = 1; 4261 pfxdata.validity.hyper_pav = 1; 4262 } 4263 4264 switch (cmd) { 4265 case DASD_ECKD_CCW_READ_TRACK_DATA: 4266 dedata->mask.perm = 0x1; 4267 dedata->attributes.operation = basepriv->attrib.operation; 4268 dedata->blk_size = blksize; 4269 dedata->ga_extended |= 0x42; 4270 lredata->operation.orientation = 0x0; 4271 lredata->operation.operation = 0x0C; 4272 lredata->auxiliary.check_bytes = 0x01; 4273 pfx_cmd = DASD_ECKD_CCW_PFX_READ; 4274 break; 4275 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 4276 dedata->mask.perm = 0x02; 4277 dedata->attributes.operation = basepriv->attrib.operation; 4278 dedata->blk_size = blksize; 4279 rc = set_timestamp(NULL, dedata, basedev); 4280 dedata->ga_extended |= 0x42; 4281 lredata->operation.orientation = 0x0; 4282 lredata->operation.operation = 0x3F; 4283 lredata->extended_operation = 0x23; 4284 lredata->auxiliary.check_bytes = 0x2; 4285 /* 4286 * If XRC is supported the System Time Stamp is set. The 4287 * validity of the time stamp must be reflected in the prefix 4288 * data as well. 4289 */ 4290 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02) 4291 pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */ 4292 pfx_cmd = DASD_ECKD_CCW_PFX; 4293 break; 4294 case DASD_ECKD_CCW_READ_COUNT_MT: 4295 dedata->mask.perm = 0x1; 4296 dedata->attributes.operation = DASD_BYPASS_CACHE; 4297 dedata->ga_extended |= 0x42; 4298 dedata->blk_size = blksize; 4299 lredata->operation.orientation = 0x2; 4300 lredata->operation.operation = 0x16; 4301 lredata->auxiliary.check_bytes = 0x01; 4302 pfx_cmd = DASD_ECKD_CCW_PFX_READ; 4303 break; 4304 default: 4305 DBF_DEV_EVENT(DBF_ERR, basedev, 4306 "prepare itcw, unknown opcode 0x%x", cmd); 4307 BUG(); 4308 break; 4309 } 4310 if (rc) 4311 return rc; 4312 4313 dedata->attributes.mode = 0x3; /* ECKD */ 4314 4315 heads = basepriv->rdc_data.trk_per_cyl; 4316 begcyl = trk / heads; 4317 beghead = trk % heads; 4318 endcyl = totrk / heads; 4319 endhead = totrk % heads; 4320 4321 /* check for sequential prestage - enhance cylinder range */ 4322 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE || 4323 dedata->attributes.operation == DASD_SEQ_ACCESS) { 4324 4325 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl) 4326 endcyl += basepriv->attrib.nr_cyl; 4327 else 4328 endcyl = (basepriv->real_cyl - 1); 4329 } 4330 4331 set_ch_t(&dedata->beg_ext, begcyl, beghead); 4332 set_ch_t(&dedata->end_ext, endcyl, endhead); 4333 4334 dedata->ep_format = 0x20; /* records per track is valid */ 4335 dedata->ep_rec_per_track = blk_per_trk; 4336 4337 if (rec_on_trk) { 4338 switch (basepriv->rdc_data.dev_type) { 4339 case 0x3390: 4340 dn = ceil_quot(blksize + 6, 232); 4341 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34); 4342 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 4343 break; 4344 case 0x3380: 4345 d = 7 + ceil_quot(blksize + 12, 32); 4346 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 4347 break; 4348 } 4349 } 4350 4351 if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) { 4352 lredata->auxiliary.length_valid = 0; 4353 lredata->auxiliary.length_scope = 0; 4354 lredata->sector = 0xff; 4355 } else { 4356 lredata->auxiliary.length_valid = 1; 4357 lredata->auxiliary.length_scope = 1; 4358 lredata->sector = sector; 4359 } 4360 lredata->auxiliary.imbedded_ccw_valid = 1; 4361 lredata->length = tlf; 4362 lredata->imbedded_ccw = cmd; 4363 lredata->count = count; 4364 set_ch_t(&lredata->seek_addr, begcyl, beghead); 4365 lredata->search_arg.cyl = lredata->seek_addr.cyl; 4366 lredata->search_arg.head = lredata->seek_addr.head; 4367 lredata->search_arg.record = rec_on_trk; 4368 4369 dcw = itcw_add_dcw(itcw, pfx_cmd, 0, 4370 &pfxdata, sizeof(pfxdata), total_data_size); 4371 return PTR_ERR_OR_ZERO(dcw); 4372} 4373 4374static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( 4375 struct dasd_device *startdev, 4376 struct dasd_block *block, 4377 struct request *req, 4378 sector_t first_rec, 4379 sector_t last_rec, 4380 sector_t first_trk, 4381 sector_t last_trk, 4382 unsigned int first_offs, 4383 unsigned int last_offs, 4384 unsigned int blk_per_trk, 4385 unsigned int blksize) 4386{ 4387 struct dasd_ccw_req *cqr; 4388 struct req_iterator iter; 4389 struct bio_vec bv; 4390 char *dst; 4391 unsigned int trkcount, ctidaw; 4392 unsigned char cmd; 4393 struct dasd_device *basedev; 4394 unsigned int tlf; 4395 struct itcw *itcw; 4396 struct tidaw *last_tidaw = NULL; 4397 int itcw_op; 4398 size_t itcw_size; 4399 u8 tidaw_flags; 4400 unsigned int seg_len, part_len, len_to_track_end; 4401 unsigned char new_track; 4402 sector_t recid, trkid; 4403 unsigned int offs; 4404 unsigned int count, count_to_trk_end; 4405 int ret; 4406 4407 basedev = block->base; 4408 if (rq_data_dir(req) == READ) { 4409 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 4410 itcw_op = ITCW_OP_READ; 4411 } else if (rq_data_dir(req) == WRITE) { 4412 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 4413 itcw_op = ITCW_OP_WRITE; 4414 } else 4415 return ERR_PTR(-EINVAL); 4416 4417 /* trackbased I/O needs address all memory via TIDAWs, 4418 * not just for 64 bit addresses. This allows us to map 4419 * each segment directly to one tidaw. 4420 * In the case of write requests, additional tidaws may 4421 * be needed when a segment crosses a track boundary. 4422 */ 4423 trkcount = last_trk - first_trk + 1; 4424 ctidaw = 0; 4425 rq_for_each_segment(bv, req, iter) { 4426 ++ctidaw; 4427 } 4428 if (rq_data_dir(req) == WRITE) 4429 ctidaw += (last_trk - first_trk); 4430 4431 /* Allocate the ccw request. */ 4432 itcw_size = itcw_calc_size(0, ctidaw, 0); 4433 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev, 4434 blk_mq_rq_to_pdu(req)); 4435 if (IS_ERR(cqr)) 4436 return cqr; 4437 4438 /* transfer length factor: how many bytes to read from the last track */ 4439 if (first_trk == last_trk) 4440 tlf = last_offs - first_offs + 1; 4441 else 4442 tlf = last_offs + 1; 4443 tlf *= blksize; 4444 4445 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); 4446 if (IS_ERR(itcw)) { 4447 ret = -EINVAL; 4448 goto out_error; 4449 } 4450 cqr->cpaddr = itcw_get_tcw(itcw); 4451 if (prepare_itcw(itcw, first_trk, last_trk, 4452 cmd, basedev, startdev, 4453 first_offs + 1, 4454 trkcount, blksize, 4455 (last_rec - first_rec + 1) * blksize, 4456 tlf, blk_per_trk) == -EAGAIN) { 4457 /* Clock not in sync and XRC is enabled. 4458 * Try again later. 4459 */ 4460 ret = -EAGAIN; 4461 goto out_error; 4462 } 4463 len_to_track_end = 0; 4464 /* 4465 * A tidaw can address 4k of memory, but must not cross page boundaries 4466 * We can let the block layer handle this by setting 4467 * blk_queue_segment_boundary to page boundaries and 4468 * blk_max_segment_size to page size when setting up the request queue. 4469 * For write requests, a TIDAW must not cross track boundaries, because 4470 * we have to set the CBC flag on the last tidaw for each track. 4471 */ 4472 if (rq_data_dir(req) == WRITE) { 4473 new_track = 1; 4474 recid = first_rec; 4475 rq_for_each_segment(bv, req, iter) { 4476 dst = page_address(bv.bv_page) + bv.bv_offset; 4477 seg_len = bv.bv_len; 4478 while (seg_len) { 4479 if (new_track) { 4480 trkid = recid; 4481 offs = sector_div(trkid, blk_per_trk); 4482 count_to_trk_end = blk_per_trk - offs; 4483 count = min((last_rec - recid + 1), 4484 (sector_t)count_to_trk_end); 4485 len_to_track_end = count * blksize; 4486 recid += count; 4487 new_track = 0; 4488 } 4489 part_len = min(seg_len, len_to_track_end); 4490 seg_len -= part_len; 4491 len_to_track_end -= part_len; 4492 /* We need to end the tidaw at track end */ 4493 if (!len_to_track_end) { 4494 new_track = 1; 4495 tidaw_flags = TIDAW_FLAGS_INSERT_CBC; 4496 } else 4497 tidaw_flags = 0; 4498 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags, 4499 dst, part_len); 4500 if (IS_ERR(last_tidaw)) { 4501 ret = -EINVAL; 4502 goto out_error; 4503 } 4504 dst += part_len; 4505 } 4506 } 4507 } else { 4508 rq_for_each_segment(bv, req, iter) { 4509 dst = page_address(bv.bv_page) + bv.bv_offset; 4510 last_tidaw = itcw_add_tidaw(itcw, 0x00, 4511 dst, bv.bv_len); 4512 if (IS_ERR(last_tidaw)) { 4513 ret = -EINVAL; 4514 goto out_error; 4515 } 4516 } 4517 } 4518 last_tidaw->flags |= TIDAW_FLAGS_LAST; 4519 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC; 4520 itcw_finalize(itcw); 4521 4522 if (blk_noretry_request(req) || 4523 block->base->features & DASD_FEATURE_FAILFAST) 4524 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4525 cqr->cpmode = 1; 4526 cqr->startdev = startdev; 4527 cqr->memdev = startdev; 4528 cqr->block = block; 4529 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 4530 cqr->lpm = dasd_path_get_ppm(startdev); 4531 cqr->retries = startdev->default_retries; 4532 cqr->buildclk = get_tod_clock(); 4533 cqr->status = DASD_CQR_FILLED; 4534 4535 /* Set flags to suppress output for expected errors */ 4536 if (dasd_eckd_is_ese(basedev)) { 4537 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 4538 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 4539 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 4540 } 4541 4542 return cqr; 4543out_error: 4544 dasd_sfree_request(cqr, startdev); 4545 return ERR_PTR(ret); 4546} 4547 4548static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, 4549 struct dasd_block *block, 4550 struct request *req) 4551{ 4552 int cmdrtd, cmdwtd; 4553 int use_prefix; 4554 int fcx_multitrack; 4555 struct dasd_eckd_private *private; 4556 struct dasd_device *basedev; 4557 sector_t first_rec, last_rec; 4558 sector_t first_trk, last_trk; 4559 unsigned int first_offs, last_offs; 4560 unsigned int blk_per_trk, blksize; 4561 int cdlspecial; 4562 unsigned int data_size; 4563 struct dasd_ccw_req *cqr; 4564 4565 basedev = block->base; 4566 private = basedev->private; 4567 4568 /* Calculate number of blocks/records per track. */ 4569 blksize = block->bp_block; 4570 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 4571 if (blk_per_trk == 0) 4572 return ERR_PTR(-EINVAL); 4573 /* Calculate record id of first and last block. */ 4574 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; 4575 first_offs = sector_div(first_trk, blk_per_trk); 4576 last_rec = last_trk = 4577 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 4578 last_offs = sector_div(last_trk, blk_per_trk); 4579 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 4580 4581 fcx_multitrack = private->features.feature[40] & 0x20; 4582 data_size = blk_rq_bytes(req); 4583 if (data_size % blksize) 4584 return ERR_PTR(-EINVAL); 4585 /* tpm write request add CBC data on each track boundary */ 4586 if (rq_data_dir(req) == WRITE) 4587 data_size += (last_trk - first_trk) * 4; 4588 4589 /* is read track data and write track data in command mode supported? */ 4590 cmdrtd = private->features.feature[9] & 0x20; 4591 cmdwtd = private->features.feature[12] & 0x40; 4592 use_prefix = private->features.feature[8] & 0x01; 4593 4594 cqr = NULL; 4595 if (cdlspecial || dasd_page_cache) { 4596 /* do nothing, just fall through to the cmd mode single case */ 4597 } else if ((data_size <= private->fcx_max_data) 4598 && (fcx_multitrack || (first_trk == last_trk))) { 4599 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req, 4600 first_rec, last_rec, 4601 first_trk, last_trk, 4602 first_offs, last_offs, 4603 blk_per_trk, blksize); 4604 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) && 4605 (PTR_ERR(cqr) != -ENOMEM)) 4606 cqr = NULL; 4607 } else if (use_prefix && 4608 (((rq_data_dir(req) == READ) && cmdrtd) || 4609 ((rq_data_dir(req) == WRITE) && cmdwtd))) { 4610 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req, 4611 first_rec, last_rec, 4612 first_trk, last_trk, 4613 first_offs, last_offs, 4614 blk_per_trk, blksize); 4615 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) && 4616 (PTR_ERR(cqr) != -ENOMEM)) 4617 cqr = NULL; 4618 } 4619 if (!cqr) 4620 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req, 4621 first_rec, last_rec, 4622 first_trk, last_trk, 4623 first_offs, last_offs, 4624 blk_per_trk, blksize); 4625 return cqr; 4626} 4627 4628static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, 4629 struct dasd_block *block, 4630 struct request *req) 4631{ 4632 sector_t start_padding_sectors, end_sector_offset, end_padding_sectors; 4633 unsigned int seg_len, len_to_track_end; 4634 unsigned int cidaw, cplength, datasize; 4635 sector_t first_trk, last_trk, sectors; 4636 struct dasd_eckd_private *base_priv; 4637 struct dasd_device *basedev; 4638 struct req_iterator iter; 4639 struct dasd_ccw_req *cqr; 4640 unsigned int trkcount; 4641 unsigned long *idaws; 4642 unsigned int size; 4643 unsigned char cmd; 4644 struct bio_vec bv; 4645 struct ccw1 *ccw; 4646 int use_prefix; 4647 void *data; 4648 char *dst; 4649 4650 /* 4651 * raw track access needs to be mutiple of 64k and on 64k boundary 4652 * For read requests we can fix an incorrect alignment by padding 4653 * the request with dummy pages. 4654 */ 4655 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK; 4656 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) % 4657 DASD_RAW_SECTORS_PER_TRACK; 4658 end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) % 4659 DASD_RAW_SECTORS_PER_TRACK; 4660 basedev = block->base; 4661 if ((start_padding_sectors || end_padding_sectors) && 4662 (rq_data_dir(req) == WRITE)) { 4663 DBF_DEV_EVENT(DBF_ERR, basedev, 4664 "raw write not track aligned (%llu,%llu) req %p", 4665 start_padding_sectors, end_padding_sectors, req); 4666 return ERR_PTR(-EINVAL); 4667 } 4668 4669 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK; 4670 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / 4671 DASD_RAW_SECTORS_PER_TRACK; 4672 trkcount = last_trk - first_trk + 1; 4673 4674 if (rq_data_dir(req) == READ) 4675 cmd = DASD_ECKD_CCW_READ_TRACK; 4676 else if (rq_data_dir(req) == WRITE) 4677 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK; 4678 else 4679 return ERR_PTR(-EINVAL); 4680 4681 /* 4682 * Raw track based I/O needs IDAWs for each page, 4683 * and not just for 64 bit addresses. 4684 */ 4685 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK; 4686 4687 /* 4688 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes 4689 * of extended parameter. This is needed for write full track. 4690 */ 4691 base_priv = basedev->private; 4692 use_prefix = base_priv->features.feature[8] & 0x01; 4693 if (use_prefix) { 4694 cplength = 1 + trkcount; 4695 size = sizeof(struct PFX_eckd_data) + 2; 4696 } else { 4697 cplength = 2 + trkcount; 4698 size = sizeof(struct DE_eckd_data) + 4699 sizeof(struct LRE_eckd_data) + 2; 4700 } 4701 size = ALIGN(size, 8); 4702 4703 datasize = size + cidaw * sizeof(unsigned long); 4704 4705 /* Allocate the ccw request. */ 4706 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, 4707 datasize, startdev, blk_mq_rq_to_pdu(req)); 4708 if (IS_ERR(cqr)) 4709 return cqr; 4710 4711 ccw = cqr->cpaddr; 4712 data = cqr->data; 4713 4714 if (use_prefix) { 4715 prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev, 4716 startdev, 1, 0, trkcount, 0, 0); 4717 } else { 4718 define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0); 4719 ccw[-1].flags |= CCW_FLAG_CC; 4720 4721 data += sizeof(struct DE_eckd_data); 4722 locate_record_ext(ccw++, data, first_trk, 0, 4723 trkcount, cmd, basedev, 0, 0); 4724 } 4725 4726 idaws = (unsigned long *)(cqr->data + size); 4727 len_to_track_end = 0; 4728 if (start_padding_sectors) { 4729 ccw[-1].flags |= CCW_FLAG_CC; 4730 ccw->cmd_code = cmd; 4731 /* maximum 3390 track size */ 4732 ccw->count = 57326; 4733 /* 64k map to one track */ 4734 len_to_track_end = 65536 - start_padding_sectors * 512; 4735 ccw->cda = (__u32)(addr_t)idaws; 4736 ccw->flags |= CCW_FLAG_IDA; 4737 ccw->flags |= CCW_FLAG_SLI; 4738 ccw++; 4739 for (sectors = 0; sectors < start_padding_sectors; sectors += 8) 4740 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 4741 } 4742 rq_for_each_segment(bv, req, iter) { 4743 dst = page_address(bv.bv_page) + bv.bv_offset; 4744 seg_len = bv.bv_len; 4745 if (cmd == DASD_ECKD_CCW_READ_TRACK) 4746 memset(dst, 0, seg_len); 4747 if (!len_to_track_end) { 4748 ccw[-1].flags |= CCW_FLAG_CC; 4749 ccw->cmd_code = cmd; 4750 /* maximum 3390 track size */ 4751 ccw->count = 57326; 4752 /* 64k map to one track */ 4753 len_to_track_end = 65536; 4754 ccw->cda = (__u32)(addr_t)idaws; 4755 ccw->flags |= CCW_FLAG_IDA; 4756 ccw->flags |= CCW_FLAG_SLI; 4757 ccw++; 4758 } 4759 len_to_track_end -= seg_len; 4760 idaws = idal_create_words(idaws, dst, seg_len); 4761 } 4762 for (sectors = 0; sectors < end_padding_sectors; sectors += 8) 4763 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 4764 if (blk_noretry_request(req) || 4765 block->base->features & DASD_FEATURE_FAILFAST) 4766 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4767 cqr->startdev = startdev; 4768 cqr->memdev = startdev; 4769 cqr->block = block; 4770 cqr->expires = startdev->default_expires * HZ; 4771 cqr->lpm = dasd_path_get_ppm(startdev); 4772 cqr->retries = startdev->default_retries; 4773 cqr->buildclk = get_tod_clock(); 4774 cqr->status = DASD_CQR_FILLED; 4775 4776 return cqr; 4777} 4778 4779 4780static int 4781dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) 4782{ 4783 struct dasd_eckd_private *private; 4784 struct ccw1 *ccw; 4785 struct req_iterator iter; 4786 struct bio_vec bv; 4787 char *dst, *cda; 4788 unsigned int blksize, blk_per_trk, off; 4789 sector_t recid; 4790 int status; 4791 4792 if (!dasd_page_cache) 4793 goto out; 4794 private = cqr->block->base->private; 4795 blksize = cqr->block->bp_block; 4796 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 4797 recid = blk_rq_pos(req) >> cqr->block->s2b_shift; 4798 ccw = cqr->cpaddr; 4799 /* Skip over define extent & locate record. */ 4800 ccw++; 4801 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 4802 ccw++; 4803 rq_for_each_segment(bv, req, iter) { 4804 dst = page_address(bv.bv_page) + bv.bv_offset; 4805 for (off = 0; off < bv.bv_len; off += blksize) { 4806 /* Skip locate record. */ 4807 if (private->uses_cdl && recid <= 2*blk_per_trk) 4808 ccw++; 4809 if (dst) { 4810 if (ccw->flags & CCW_FLAG_IDA) 4811 cda = *((char **)((addr_t) ccw->cda)); 4812 else 4813 cda = (char *)((addr_t) ccw->cda); 4814 if (dst != cda) { 4815 if (rq_data_dir(req) == READ) 4816 memcpy(dst, cda, bv.bv_len); 4817 kmem_cache_free(dasd_page_cache, 4818 (void *)((addr_t)cda & PAGE_MASK)); 4819 } 4820 dst = NULL; 4821 } 4822 ccw++; 4823 recid++; 4824 } 4825 } 4826out: 4827 status = cqr->status == DASD_CQR_DONE; 4828 dasd_sfree_request(cqr, cqr->memdev); 4829 return status; 4830} 4831 4832/* 4833 * Modify ccw/tcw in cqr so it can be started on a base device. 4834 * 4835 * Note that this is not enough to restart the cqr! 4836 * Either reset cqr->startdev as well (summary unit check handling) 4837 * or restart via separate cqr (as in ERP handling). 4838 */ 4839void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr) 4840{ 4841 struct ccw1 *ccw; 4842 struct PFX_eckd_data *pfxdata; 4843 struct tcw *tcw; 4844 struct tccb *tccb; 4845 struct dcw *dcw; 4846 4847 if (cqr->cpmode == 1) { 4848 tcw = cqr->cpaddr; 4849 tccb = tcw_get_tccb(tcw); 4850 dcw = (struct dcw *)&tccb->tca[0]; 4851 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0]; 4852 pfxdata->validity.verify_base = 0; 4853 pfxdata->validity.hyper_pav = 0; 4854 } else { 4855 ccw = cqr->cpaddr; 4856 pfxdata = cqr->data; 4857 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) { 4858 pfxdata->validity.verify_base = 0; 4859 pfxdata->validity.hyper_pav = 0; 4860 } 4861 } 4862} 4863 4864#define DASD_ECKD_CHANQ_MAX_SIZE 4 4865 4866static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base, 4867 struct dasd_block *block, 4868 struct request *req) 4869{ 4870 struct dasd_eckd_private *private; 4871 struct dasd_device *startdev; 4872 unsigned long flags; 4873 struct dasd_ccw_req *cqr; 4874 4875 startdev = dasd_alias_get_start_dev(base); 4876 if (!startdev) 4877 startdev = base; 4878 private = startdev->private; 4879 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE) 4880 return ERR_PTR(-EBUSY); 4881 4882 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); 4883 private->count++; 4884 if ((base->features & DASD_FEATURE_USERAW)) 4885 cqr = dasd_eckd_build_cp_raw(startdev, block, req); 4886 else 4887 cqr = dasd_eckd_build_cp(startdev, block, req); 4888 if (IS_ERR(cqr)) 4889 private->count--; 4890 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); 4891 return cqr; 4892} 4893 4894static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr, 4895 struct request *req) 4896{ 4897 struct dasd_eckd_private *private; 4898 unsigned long flags; 4899 4900 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags); 4901 private = cqr->memdev->private; 4902 private->count--; 4903 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags); 4904 return dasd_eckd_free_cp(cqr, req); 4905} 4906 4907static int 4908dasd_eckd_fill_info(struct dasd_device * device, 4909 struct dasd_information2_t * info) 4910{ 4911 struct dasd_eckd_private *private = device->private; 4912 4913 info->label_block = 2; 4914 info->FBA_layout = private->uses_cdl ? 0 : 1; 4915 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL; 4916 info->characteristics_size = sizeof(private->rdc_data); 4917 memcpy(info->characteristics, &private->rdc_data, 4918 sizeof(private->rdc_data)); 4919 info->confdata_size = min((unsigned long)private->conf_len, 4920 sizeof(info->configuration_data)); 4921 memcpy(info->configuration_data, private->conf_data, 4922 info->confdata_size); 4923 return 0; 4924} 4925 4926/* 4927 * SECTION: ioctl functions for eckd devices. 4928 */ 4929 4930/* 4931 * Release device ioctl. 4932 * Buils a channel programm to releases a prior reserved 4933 * (see dasd_eckd_reserve) device. 4934 */ 4935static int 4936dasd_eckd_release(struct dasd_device *device) 4937{ 4938 struct dasd_ccw_req *cqr; 4939 int rc; 4940 struct ccw1 *ccw; 4941 int useglobal; 4942 4943 if (!capable(CAP_SYS_ADMIN)) 4944 return -EACCES; 4945 4946 useglobal = 0; 4947 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 4948 if (IS_ERR(cqr)) { 4949 mutex_lock(&dasd_reserve_mutex); 4950 useglobal = 1; 4951 cqr = &dasd_reserve_req->cqr; 4952 memset(cqr, 0, sizeof(*cqr)); 4953 memset(&dasd_reserve_req->ccw, 0, 4954 sizeof(dasd_reserve_req->ccw)); 4955 cqr->cpaddr = &dasd_reserve_req->ccw; 4956 cqr->data = &dasd_reserve_req->data; 4957 cqr->magic = DASD_ECKD_MAGIC; 4958 } 4959 ccw = cqr->cpaddr; 4960 ccw->cmd_code = DASD_ECKD_CCW_RELEASE; 4961 ccw->flags |= CCW_FLAG_SLI; 4962 ccw->count = 32; 4963 ccw->cda = (__u32)(addr_t) cqr->data; 4964 cqr->startdev = device; 4965 cqr->memdev = device; 4966 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 4967 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4968 cqr->retries = 2; /* set retry counter to enable basic ERP */ 4969 cqr->expires = 2 * HZ; 4970 cqr->buildclk = get_tod_clock(); 4971 cqr->status = DASD_CQR_FILLED; 4972 4973 rc = dasd_sleep_on_immediatly(cqr); 4974 if (!rc) 4975 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags); 4976 4977 if (useglobal) 4978 mutex_unlock(&dasd_reserve_mutex); 4979 else 4980 dasd_sfree_request(cqr, cqr->memdev); 4981 return rc; 4982} 4983 4984/* 4985 * Reserve device ioctl. 4986 * Options are set to 'synchronous wait for interrupt' and 4987 * 'timeout the request'. This leads to a terminate IO if 4988 * the interrupt is outstanding for a certain time. 4989 */ 4990static int 4991dasd_eckd_reserve(struct dasd_device *device) 4992{ 4993 struct dasd_ccw_req *cqr; 4994 int rc; 4995 struct ccw1 *ccw; 4996 int useglobal; 4997 4998 if (!capable(CAP_SYS_ADMIN)) 4999 return -EACCES; 5000 5001 useglobal = 0; 5002 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 5003 if (IS_ERR(cqr)) { 5004 mutex_lock(&dasd_reserve_mutex); 5005 useglobal = 1; 5006 cqr = &dasd_reserve_req->cqr; 5007 memset(cqr, 0, sizeof(*cqr)); 5008 memset(&dasd_reserve_req->ccw, 0, 5009 sizeof(dasd_reserve_req->ccw)); 5010 cqr->cpaddr = &dasd_reserve_req->ccw; 5011 cqr->data = &dasd_reserve_req->data; 5012 cqr->magic = DASD_ECKD_MAGIC; 5013 } 5014 ccw = cqr->cpaddr; 5015 ccw->cmd_code = DASD_ECKD_CCW_RESERVE; 5016 ccw->flags |= CCW_FLAG_SLI; 5017 ccw->count = 32; 5018 ccw->cda = (__u32)(addr_t) cqr->data; 5019 cqr->startdev = device; 5020 cqr->memdev = device; 5021 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5022 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5023 cqr->retries = 2; /* set retry counter to enable basic ERP */ 5024 cqr->expires = 2 * HZ; 5025 cqr->buildclk = get_tod_clock(); 5026 cqr->status = DASD_CQR_FILLED; 5027 5028 rc = dasd_sleep_on_immediatly(cqr); 5029 if (!rc) 5030 set_bit(DASD_FLAG_IS_RESERVED, &device->flags); 5031 5032 if (useglobal) 5033 mutex_unlock(&dasd_reserve_mutex); 5034 else 5035 dasd_sfree_request(cqr, cqr->memdev); 5036 return rc; 5037} 5038 5039/* 5040 * Steal lock ioctl - unconditional reserve device. 5041 * Buils a channel programm to break a device's reservation. 5042 * (unconditional reserve) 5043 */ 5044static int 5045dasd_eckd_steal_lock(struct dasd_device *device) 5046{ 5047 struct dasd_ccw_req *cqr; 5048 int rc; 5049 struct ccw1 *ccw; 5050 int useglobal; 5051 5052 if (!capable(CAP_SYS_ADMIN)) 5053 return -EACCES; 5054 5055 useglobal = 0; 5056 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 5057 if (IS_ERR(cqr)) { 5058 mutex_lock(&dasd_reserve_mutex); 5059 useglobal = 1; 5060 cqr = &dasd_reserve_req->cqr; 5061 memset(cqr, 0, sizeof(*cqr)); 5062 memset(&dasd_reserve_req->ccw, 0, 5063 sizeof(dasd_reserve_req->ccw)); 5064 cqr->cpaddr = &dasd_reserve_req->ccw; 5065 cqr->data = &dasd_reserve_req->data; 5066 cqr->magic = DASD_ECKD_MAGIC; 5067 } 5068 ccw = cqr->cpaddr; 5069 ccw->cmd_code = DASD_ECKD_CCW_SLCK; 5070 ccw->flags |= CCW_FLAG_SLI; 5071 ccw->count = 32; 5072 ccw->cda = (__u32)(addr_t) cqr->data; 5073 cqr->startdev = device; 5074 cqr->memdev = device; 5075 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5076 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5077 cqr->retries = 2; /* set retry counter to enable basic ERP */ 5078 cqr->expires = 2 * HZ; 5079 cqr->buildclk = get_tod_clock(); 5080 cqr->status = DASD_CQR_FILLED; 5081 5082 rc = dasd_sleep_on_immediatly(cqr); 5083 if (!rc) 5084 set_bit(DASD_FLAG_IS_RESERVED, &device->flags); 5085 5086 if (useglobal) 5087 mutex_unlock(&dasd_reserve_mutex); 5088 else 5089 dasd_sfree_request(cqr, cqr->memdev); 5090 return rc; 5091} 5092 5093/* 5094 * SNID - Sense Path Group ID 5095 * This ioctl may be used in situations where I/O is stalled due to 5096 * a reserve, so if the normal dasd_smalloc_request fails, we use the 5097 * preallocated dasd_reserve_req. 5098 */ 5099static int dasd_eckd_snid(struct dasd_device *device, 5100 void __user *argp) 5101{ 5102 struct dasd_ccw_req *cqr; 5103 int rc; 5104 struct ccw1 *ccw; 5105 int useglobal; 5106 struct dasd_snid_ioctl_data usrparm; 5107 5108 if (!capable(CAP_SYS_ADMIN)) 5109 return -EACCES; 5110 5111 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 5112 return -EFAULT; 5113 5114 useglobal = 0; 5115 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 5116 sizeof(struct dasd_snid_data), device, 5117 NULL); 5118 if (IS_ERR(cqr)) { 5119 mutex_lock(&dasd_reserve_mutex); 5120 useglobal = 1; 5121 cqr = &dasd_reserve_req->cqr; 5122 memset(cqr, 0, sizeof(*cqr)); 5123 memset(&dasd_reserve_req->ccw, 0, 5124 sizeof(dasd_reserve_req->ccw)); 5125 cqr->cpaddr = &dasd_reserve_req->ccw; 5126 cqr->data = &dasd_reserve_req->data; 5127 cqr->magic = DASD_ECKD_MAGIC; 5128 } 5129 ccw = cqr->cpaddr; 5130 ccw->cmd_code = DASD_ECKD_CCW_SNID; 5131 ccw->flags |= CCW_FLAG_SLI; 5132 ccw->count = 12; 5133 ccw->cda = (__u32)(addr_t) cqr->data; 5134 cqr->startdev = device; 5135 cqr->memdev = device; 5136 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5137 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5138 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 5139 cqr->retries = 5; 5140 cqr->expires = 10 * HZ; 5141 cqr->buildclk = get_tod_clock(); 5142 cqr->status = DASD_CQR_FILLED; 5143 cqr->lpm = usrparm.path_mask; 5144 5145 rc = dasd_sleep_on_immediatly(cqr); 5146 /* verify that I/O processing didn't modify the path mask */ 5147 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask)) 5148 rc = -EIO; 5149 if (!rc) { 5150 usrparm.data = *((struct dasd_snid_data *)cqr->data); 5151 if (copy_to_user(argp, &usrparm, sizeof(usrparm))) 5152 rc = -EFAULT; 5153 } 5154 5155 if (useglobal) 5156 mutex_unlock(&dasd_reserve_mutex); 5157 else 5158 dasd_sfree_request(cqr, cqr->memdev); 5159 return rc; 5160} 5161 5162/* 5163 * Read performance statistics 5164 */ 5165static int 5166dasd_eckd_performance(struct dasd_device *device, void __user *argp) 5167{ 5168 struct dasd_psf_prssd_data *prssdp; 5169 struct dasd_rssd_perf_stats_t *stats; 5170 struct dasd_ccw_req *cqr; 5171 struct ccw1 *ccw; 5172 int rc; 5173 5174 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5175 (sizeof(struct dasd_psf_prssd_data) + 5176 sizeof(struct dasd_rssd_perf_stats_t)), 5177 device, NULL); 5178 if (IS_ERR(cqr)) { 5179 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5180 "Could not allocate initialization request"); 5181 return PTR_ERR(cqr); 5182 } 5183 cqr->startdev = device; 5184 cqr->memdev = device; 5185 cqr->retries = 0; 5186 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5187 cqr->expires = 10 * HZ; 5188 5189 /* Prepare for Read Subsystem Data */ 5190 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5191 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 5192 prssdp->order = PSF_ORDER_PRSSD; 5193 prssdp->suborder = 0x01; /* Performance Statistics */ 5194 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ 5195 5196 ccw = cqr->cpaddr; 5197 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5198 ccw->count = sizeof(struct dasd_psf_prssd_data); 5199 ccw->flags |= CCW_FLAG_CC; 5200 ccw->cda = (__u32)(addr_t) prssdp; 5201 5202 /* Read Subsystem Data - Performance Statistics */ 5203 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 5204 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t)); 5205 5206 ccw++; 5207 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5208 ccw->count = sizeof(struct dasd_rssd_perf_stats_t); 5209 ccw->cda = (__u32)(addr_t) stats; 5210 5211 cqr->buildclk = get_tod_clock(); 5212 cqr->status = DASD_CQR_FILLED; 5213 rc = dasd_sleep_on(cqr); 5214 if (rc == 0) { 5215 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5216 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 5217 if (copy_to_user(argp, stats, 5218 sizeof(struct dasd_rssd_perf_stats_t))) 5219 rc = -EFAULT; 5220 } 5221 dasd_sfree_request(cqr, cqr->memdev); 5222 return rc; 5223} 5224 5225/* 5226 * Get attributes (cache operations) 5227 * Returnes the cache attributes used in Define Extend (DE). 5228 */ 5229static int 5230dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp) 5231{ 5232 struct dasd_eckd_private *private = device->private; 5233 struct attrib_data_t attrib = private->attrib; 5234 int rc; 5235 5236 if (!capable(CAP_SYS_ADMIN)) 5237 return -EACCES; 5238 if (!argp) 5239 return -EINVAL; 5240 5241 rc = 0; 5242 if (copy_to_user(argp, (long *) &attrib, 5243 sizeof(struct attrib_data_t))) 5244 rc = -EFAULT; 5245 5246 return rc; 5247} 5248 5249/* 5250 * Set attributes (cache operations) 5251 * Stores the attributes for cache operation to be used in Define Extend (DE). 5252 */ 5253static int 5254dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp) 5255{ 5256 struct dasd_eckd_private *private = device->private; 5257 struct attrib_data_t attrib; 5258 5259 if (!capable(CAP_SYS_ADMIN)) 5260 return -EACCES; 5261 if (!argp) 5262 return -EINVAL; 5263 5264 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t))) 5265 return -EFAULT; 5266 private->attrib = attrib; 5267 5268 dev_info(&device->cdev->dev, 5269 "The DASD cache mode was set to %x (%i cylinder prestage)\n", 5270 private->attrib.operation, private->attrib.nr_cyl); 5271 return 0; 5272} 5273 5274/* 5275 * Issue syscall I/O to EMC Symmetrix array. 5276 * CCWs are PSF and RSSD 5277 */ 5278static int dasd_symm_io(struct dasd_device *device, void __user *argp) 5279{ 5280 struct dasd_symmio_parms usrparm; 5281 char *psf_data, *rssd_result; 5282 struct dasd_ccw_req *cqr; 5283 struct ccw1 *ccw; 5284 char psf0, psf1; 5285 int rc; 5286 5287 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) 5288 return -EACCES; 5289 psf0 = psf1 = 0; 5290 5291 /* Copy parms from caller */ 5292 rc = -EFAULT; 5293 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 5294 goto out; 5295 if (is_compat_task()) { 5296 /* Make sure pointers are sane even on 31 bit. */ 5297 rc = -EINVAL; 5298 if ((usrparm.psf_data >> 32) != 0) 5299 goto out; 5300 if ((usrparm.rssd_result >> 32) != 0) 5301 goto out; 5302 usrparm.psf_data &= 0x7fffffffULL; 5303 usrparm.rssd_result &= 0x7fffffffULL; 5304 } 5305 /* at least 2 bytes are accessed and should be allocated */ 5306 if (usrparm.psf_data_len < 2) { 5307 DBF_DEV_EVENT(DBF_WARNING, device, 5308 "Symmetrix ioctl invalid data length %d", 5309 usrparm.psf_data_len); 5310 rc = -EINVAL; 5311 goto out; 5312 } 5313 /* alloc I/O data area */ 5314 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); 5315 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); 5316 if (!psf_data || !rssd_result) { 5317 rc = -ENOMEM; 5318 goto out_free; 5319 } 5320 5321 /* get syscall header from user space */ 5322 rc = -EFAULT; 5323 if (copy_from_user(psf_data, 5324 (void __user *)(unsigned long) usrparm.psf_data, 5325 usrparm.psf_data_len)) 5326 goto out_free; 5327 psf0 = psf_data[0]; 5328 psf1 = psf_data[1]; 5329 5330 /* setup CCWs for PSF + RSSD */ 5331 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL); 5332 if (IS_ERR(cqr)) { 5333 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5334 "Could not allocate initialization request"); 5335 rc = PTR_ERR(cqr); 5336 goto out_free; 5337 } 5338 5339 cqr->startdev = device; 5340 cqr->memdev = device; 5341 cqr->retries = 3; 5342 cqr->expires = 10 * HZ; 5343 cqr->buildclk = get_tod_clock(); 5344 cqr->status = DASD_CQR_FILLED; 5345 5346 /* Build the ccws */ 5347 ccw = cqr->cpaddr; 5348 5349 /* PSF ccw */ 5350 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5351 ccw->count = usrparm.psf_data_len; 5352 ccw->flags |= CCW_FLAG_CC; 5353 ccw->cda = (__u32)(addr_t) psf_data; 5354 5355 ccw++; 5356 5357 /* RSSD ccw */ 5358 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5359 ccw->count = usrparm.rssd_result_len; 5360 ccw->flags = CCW_FLAG_SLI ; 5361 ccw->cda = (__u32)(addr_t) rssd_result; 5362 5363 rc = dasd_sleep_on(cqr); 5364 if (rc) 5365 goto out_sfree; 5366 5367 rc = -EFAULT; 5368 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result, 5369 rssd_result, usrparm.rssd_result_len)) 5370 goto out_sfree; 5371 rc = 0; 5372 5373out_sfree: 5374 dasd_sfree_request(cqr, cqr->memdev); 5375out_free: 5376 kfree(rssd_result); 5377 kfree(psf_data); 5378out: 5379 DBF_DEV_EVENT(DBF_WARNING, device, 5380 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d", 5381 (int) psf0, (int) psf1, rc); 5382 return rc; 5383} 5384 5385static int 5386dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) 5387{ 5388 struct dasd_device *device = block->base; 5389 5390 switch (cmd) { 5391 case BIODASDGATTR: 5392 return dasd_eckd_get_attrib(device, argp); 5393 case BIODASDSATTR: 5394 return dasd_eckd_set_attrib(device, argp); 5395 case BIODASDPSRD: 5396 return dasd_eckd_performance(device, argp); 5397 case BIODASDRLSE: 5398 return dasd_eckd_release(device); 5399 case BIODASDRSRV: 5400 return dasd_eckd_reserve(device); 5401 case BIODASDSLCK: 5402 return dasd_eckd_steal_lock(device); 5403 case BIODASDSNID: 5404 return dasd_eckd_snid(device, argp); 5405 case BIODASDSYMMIO: 5406 return dasd_symm_io(device, argp); 5407 default: 5408 return -ENOTTY; 5409 } 5410} 5411 5412/* 5413 * Dump the range of CCWs into 'page' buffer 5414 * and return number of printed chars. 5415 */ 5416static int 5417dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) 5418{ 5419 int len, count; 5420 char *datap; 5421 5422 len = 0; 5423 while (from <= to) { 5424 len += sprintf(page + len, PRINTK_HEADER 5425 " CCW %p: %08X %08X DAT:", 5426 from, ((int *) from)[0], ((int *) from)[1]); 5427 5428 /* get pointer to data (consider IDALs) */ 5429 if (from->flags & CCW_FLAG_IDA) 5430 datap = (char *) *((addr_t *) (addr_t) from->cda); 5431 else 5432 datap = (char *) ((addr_t) from->cda); 5433 5434 /* dump data (max 32 bytes) */ 5435 for (count = 0; count < from->count && count < 32; count++) { 5436 if (count % 8 == 0) len += sprintf(page + len, " "); 5437 if (count % 4 == 0) len += sprintf(page + len, " "); 5438 len += sprintf(page + len, "%02x", datap[count]); 5439 } 5440 len += sprintf(page + len, "\n"); 5441 from++; 5442 } 5443 return len; 5444} 5445 5446static void 5447dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb, 5448 char *reason) 5449{ 5450 u64 *sense; 5451 u64 *stat; 5452 5453 sense = (u64 *) dasd_get_sense(irb); 5454 stat = (u64 *) &irb->scsw; 5455 if (sense) { 5456 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : " 5457 "%016llx %016llx %016llx %016llx", 5458 reason, *stat, *((u32 *) (stat + 1)), 5459 sense[0], sense[1], sense[2], sense[3]); 5460 } else { 5461 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s", 5462 reason, *stat, *((u32 *) (stat + 1)), 5463 "NO VALID SENSE"); 5464 } 5465} 5466 5467/* 5468 * Print sense data and related channel program. 5469 * Parts are printed because printk buffer is only 1024 bytes. 5470 */ 5471static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, 5472 struct dasd_ccw_req *req, struct irb *irb) 5473{ 5474 char *page; 5475 struct ccw1 *first, *last, *fail, *from, *to; 5476 int len, sl, sct; 5477 5478 page = (char *) get_zeroed_page(GFP_ATOMIC); 5479 if (page == NULL) { 5480 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5481 "No memory to dump sense data\n"); 5482 return; 5483 } 5484 /* dump the sense data */ 5485 len = sprintf(page, PRINTK_HEADER 5486 " I/O status report for device %s:\n", 5487 dev_name(&device->cdev->dev)); 5488 len += sprintf(page + len, PRINTK_HEADER 5489 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 5490 "CS:%02X RC:%d\n", 5491 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 5492 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 5493 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 5494 req ? req->intrc : 0); 5495 len += sprintf(page + len, PRINTK_HEADER 5496 " device %s: Failing CCW: %p\n", 5497 dev_name(&device->cdev->dev), 5498 (void *) (addr_t) irb->scsw.cmd.cpa); 5499 if (irb->esw.esw0.erw.cons) { 5500 for (sl = 0; sl < 4; sl++) { 5501 len += sprintf(page + len, PRINTK_HEADER 5502 " Sense(hex) %2d-%2d:", 5503 (8 * sl), ((8 * sl) + 7)); 5504 5505 for (sct = 0; sct < 8; sct++) { 5506 len += sprintf(page + len, " %02x", 5507 irb->ecw[8 * sl + sct]); 5508 } 5509 len += sprintf(page + len, "\n"); 5510 } 5511 5512 if (irb->ecw[27] & DASD_SENSE_BIT_0) { 5513 /* 24 Byte Sense Data */ 5514 sprintf(page + len, PRINTK_HEADER 5515 " 24 Byte: %x MSG %x, " 5516 "%s MSGb to SYSOP\n", 5517 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, 5518 irb->ecw[1] & 0x10 ? "" : "no"); 5519 } else { 5520 /* 32 Byte Sense Data */ 5521 sprintf(page + len, PRINTK_HEADER 5522 " 32 Byte: Format: %x " 5523 "Exception class %x\n", 5524 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); 5525 } 5526 } else { 5527 sprintf(page + len, PRINTK_HEADER 5528 " SORRY - NO VALID SENSE AVAILABLE\n"); 5529 } 5530 printk(KERN_ERR "%s", page); 5531 5532 if (req) { 5533 /* req == NULL for unsolicited interrupts */ 5534 /* dump the Channel Program (max 140 Bytes per line) */ 5535 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */ 5536 first = req->cpaddr; 5537 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 5538 to = min(first + 6, last); 5539 len = sprintf(page, PRINTK_HEADER 5540 " Related CP in req: %p\n", req); 5541 dasd_eckd_dump_ccw_range(first, to, page + len); 5542 printk(KERN_ERR "%s", page); 5543 5544 /* print failing CCW area (maximum 4) */ 5545 /* scsw->cda is either valid or zero */ 5546 len = 0; 5547 from = ++to; 5548 fail = (struct ccw1 *)(addr_t) 5549 irb->scsw.cmd.cpa; /* failing CCW */ 5550 if (from < fail - 2) { 5551 from = fail - 2; /* there is a gap - print header */ 5552 len += sprintf(page, PRINTK_HEADER "......\n"); 5553 } 5554 to = min(fail + 1, last); 5555 len += dasd_eckd_dump_ccw_range(from, to, page + len); 5556 5557 /* print last CCWs (maximum 2) */ 5558 from = max(from, ++to); 5559 if (from < last - 1) { 5560 from = last - 1; /* there is a gap - print header */ 5561 len += sprintf(page + len, PRINTK_HEADER "......\n"); 5562 } 5563 len += dasd_eckd_dump_ccw_range(from, last, page + len); 5564 if (len > 0) 5565 printk(KERN_ERR "%s", page); 5566 } 5567 free_page((unsigned long) page); 5568} 5569 5570 5571/* 5572 * Print sense data from a tcw. 5573 */ 5574static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, 5575 struct dasd_ccw_req *req, struct irb *irb) 5576{ 5577 char *page; 5578 int len, sl, sct, residual; 5579 struct tsb *tsb; 5580 u8 *sense, *rcq; 5581 5582 page = (char *) get_zeroed_page(GFP_ATOMIC); 5583 if (page == NULL) { 5584 DBF_DEV_EVENT(DBF_WARNING, device, " %s", 5585 "No memory to dump sense data"); 5586 return; 5587 } 5588 /* dump the sense data */ 5589 len = sprintf(page, PRINTK_HEADER 5590 " I/O status report for device %s:\n", 5591 dev_name(&device->cdev->dev)); 5592 len += sprintf(page + len, PRINTK_HEADER 5593 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 5594 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n", 5595 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 5596 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 5597 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 5598 irb->scsw.tm.fcxs, 5599 (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq, 5600 req ? req->intrc : 0); 5601 len += sprintf(page + len, PRINTK_HEADER 5602 " device %s: Failing TCW: %p\n", 5603 dev_name(&device->cdev->dev), 5604 (void *) (addr_t) irb->scsw.tm.tcw); 5605 5606 tsb = NULL; 5607 sense = NULL; 5608 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01)) 5609 tsb = tcw_get_tsb( 5610 (struct tcw *)(unsigned long)irb->scsw.tm.tcw); 5611 5612 if (tsb) { 5613 len += sprintf(page + len, PRINTK_HEADER 5614 " tsb->length %d\n", tsb->length); 5615 len += sprintf(page + len, PRINTK_HEADER 5616 " tsb->flags %x\n", tsb->flags); 5617 len += sprintf(page + len, PRINTK_HEADER 5618 " tsb->dcw_offset %d\n", tsb->dcw_offset); 5619 len += sprintf(page + len, PRINTK_HEADER 5620 " tsb->count %d\n", tsb->count); 5621 residual = tsb->count - 28; 5622 len += sprintf(page + len, PRINTK_HEADER 5623 " residual %d\n", residual); 5624 5625 switch (tsb->flags & 0x07) { 5626 case 1: /* tsa_iostat */ 5627 len += sprintf(page + len, PRINTK_HEADER 5628 " tsb->tsa.iostat.dev_time %d\n", 5629 tsb->tsa.iostat.dev_time); 5630 len += sprintf(page + len, PRINTK_HEADER 5631 " tsb->tsa.iostat.def_time %d\n", 5632 tsb->tsa.iostat.def_time); 5633 len += sprintf(page + len, PRINTK_HEADER 5634 " tsb->tsa.iostat.queue_time %d\n", 5635 tsb->tsa.iostat.queue_time); 5636 len += sprintf(page + len, PRINTK_HEADER 5637 " tsb->tsa.iostat.dev_busy_time %d\n", 5638 tsb->tsa.iostat.dev_busy_time); 5639 len += sprintf(page + len, PRINTK_HEADER 5640 " tsb->tsa.iostat.dev_act_time %d\n", 5641 tsb->tsa.iostat.dev_act_time); 5642 sense = tsb->tsa.iostat.sense; 5643 break; 5644 case 2: /* ts_ddpc */ 5645 len += sprintf(page + len, PRINTK_HEADER 5646 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); 5647 for (sl = 0; sl < 2; sl++) { 5648 len += sprintf(page + len, PRINTK_HEADER 5649 " tsb->tsa.ddpc.rcq %2d-%2d: ", 5650 (8 * sl), ((8 * sl) + 7)); 5651 rcq = tsb->tsa.ddpc.rcq; 5652 for (sct = 0; sct < 8; sct++) { 5653 len += sprintf(page + len, " %02x", 5654 rcq[8 * sl + sct]); 5655 } 5656 len += sprintf(page + len, "\n"); 5657 } 5658 sense = tsb->tsa.ddpc.sense; 5659 break; 5660 case 3: /* tsa_intrg */ 5661 len += sprintf(page + len, PRINTK_HEADER 5662 " tsb->tsa.intrg.: not supported yet\n"); 5663 break; 5664 } 5665 5666 if (sense) { 5667 for (sl = 0; sl < 4; sl++) { 5668 len += sprintf(page + len, PRINTK_HEADER 5669 " Sense(hex) %2d-%2d:", 5670 (8 * sl), ((8 * sl) + 7)); 5671 for (sct = 0; sct < 8; sct++) { 5672 len += sprintf(page + len, " %02x", 5673 sense[8 * sl + sct]); 5674 } 5675 len += sprintf(page + len, "\n"); 5676 } 5677 5678 if (sense[27] & DASD_SENSE_BIT_0) { 5679 /* 24 Byte Sense Data */ 5680 sprintf(page + len, PRINTK_HEADER 5681 " 24 Byte: %x MSG %x, " 5682 "%s MSGb to SYSOP\n", 5683 sense[7] >> 4, sense[7] & 0x0f, 5684 sense[1] & 0x10 ? "" : "no"); 5685 } else { 5686 /* 32 Byte Sense Data */ 5687 sprintf(page + len, PRINTK_HEADER 5688 " 32 Byte: Format: %x " 5689 "Exception class %x\n", 5690 sense[6] & 0x0f, sense[22] >> 4); 5691 } 5692 } else { 5693 sprintf(page + len, PRINTK_HEADER 5694 " SORRY - NO VALID SENSE AVAILABLE\n"); 5695 } 5696 } else { 5697 sprintf(page + len, PRINTK_HEADER 5698 " SORRY - NO TSB DATA AVAILABLE\n"); 5699 } 5700 printk(KERN_ERR "%s", page); 5701 free_page((unsigned long) page); 5702} 5703 5704static void dasd_eckd_dump_sense(struct dasd_device *device, 5705 struct dasd_ccw_req *req, struct irb *irb) 5706{ 5707 u8 *sense = dasd_get_sense(irb); 5708 5709 if (scsw_is_tm(&irb->scsw)) { 5710 /* 5711 * In some cases the 'File Protected' or 'Incorrect Length' 5712 * error might be expected and log messages shouldn't be written 5713 * then. Check if the according suppress bit is set. 5714 */ 5715 if (sense && (sense[1] & SNS1_FILE_PROTECTED) && 5716 test_bit(DASD_CQR_SUPPRESS_FP, &req->flags)) 5717 return; 5718 if (scsw_cstat(&irb->scsw) == 0x40 && 5719 test_bit(DASD_CQR_SUPPRESS_IL, &req->flags)) 5720 return; 5721 5722 dasd_eckd_dump_sense_tcw(device, req, irb); 5723 } else { 5724 /* 5725 * In some cases the 'Command Reject' or 'No Record Found' 5726 * error might be expected and log messages shouldn't be 5727 * written then. Check if the according suppress bit is set. 5728 */ 5729 if (sense && sense[0] & SNS0_CMD_REJECT && 5730 test_bit(DASD_CQR_SUPPRESS_CR, &req->flags)) 5731 return; 5732 5733 if (sense && sense[1] & SNS1_NO_REC_FOUND && 5734 test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags)) 5735 return; 5736 5737 dasd_eckd_dump_sense_ccw(device, req, irb); 5738 } 5739} 5740 5741static int dasd_eckd_pm_freeze(struct dasd_device *device) 5742{ 5743 /* 5744 * the device should be disconnected from our LCU structure 5745 * on restore we will reconnect it and reread LCU specific 5746 * information like PAV support that might have changed 5747 */ 5748 dasd_alias_remove_device(device); 5749 dasd_alias_disconnect_device_from_lcu(device); 5750 5751 return 0; 5752} 5753 5754static int dasd_eckd_restore_device(struct dasd_device *device) 5755{ 5756 struct dasd_eckd_private *private = device->private; 5757 struct dasd_eckd_characteristics temp_rdc_data; 5758 int rc; 5759 struct dasd_uid temp_uid; 5760 unsigned long flags; 5761 unsigned long cqr_flags = 0; 5762 5763 /* Read Configuration Data */ 5764 rc = dasd_eckd_read_conf(device); 5765 if (rc) { 5766 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 5767 "Read configuration data failed, rc=%d", rc); 5768 goto out_err; 5769 } 5770 5771 dasd_eckd_get_uid(device, &temp_uid); 5772 /* Generate device unique id */ 5773 rc = dasd_eckd_generate_uid(device); 5774 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 5775 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0) 5776 dev_err(&device->cdev->dev, "The UID of the DASD has " 5777 "changed\n"); 5778 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 5779 if (rc) 5780 goto out_err; 5781 5782 /* register lcu with alias handling, enable PAV if this is a new lcu */ 5783 rc = dasd_alias_make_device_known_to_lcu(device); 5784 if (rc) 5785 goto out_err; 5786 5787 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags); 5788 dasd_eckd_validate_server(device, cqr_flags); 5789 5790 /* RE-Read Configuration Data */ 5791 rc = dasd_eckd_read_conf(device); 5792 if (rc) { 5793 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 5794 "Read configuration data failed, rc=%d", rc); 5795 goto out_err2; 5796 } 5797 5798 /* Read Feature Codes */ 5799 dasd_eckd_read_features(device); 5800 5801 /* Read Volume Information */ 5802 dasd_eckd_read_vol_info(device); 5803 5804 /* Read Extent Pool Information */ 5805 dasd_eckd_read_ext_pool_info(device); 5806 5807 /* Read Device Characteristics */ 5808 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 5809 &temp_rdc_data, 64); 5810 if (rc) { 5811 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 5812 "Read device characteristic failed, rc=%d", rc); 5813 goto out_err2; 5814 } 5815 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 5816 memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data)); 5817 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 5818 5819 /* add device to alias management */ 5820 dasd_alias_add_device(device); 5821 5822 return 0; 5823 5824out_err2: 5825 dasd_alias_disconnect_device_from_lcu(device); 5826out_err: 5827 return -1; 5828} 5829 5830static int dasd_eckd_reload_device(struct dasd_device *device) 5831{ 5832 struct dasd_eckd_private *private = device->private; 5833 int rc, old_base; 5834 char print_uid[60]; 5835 struct dasd_uid uid; 5836 unsigned long flags; 5837 5838 /* 5839 * remove device from alias handling to prevent new requests 5840 * from being scheduled on the wrong alias device 5841 */ 5842 dasd_alias_remove_device(device); 5843 5844 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 5845 old_base = private->uid.base_unit_addr; 5846 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 5847 5848 /* Read Configuration Data */ 5849 rc = dasd_eckd_read_conf(device); 5850 if (rc) 5851 goto out_err; 5852 5853 rc = dasd_eckd_generate_uid(device); 5854 if (rc) 5855 goto out_err; 5856 /* 5857 * update unit address configuration and 5858 * add device to alias management 5859 */ 5860 dasd_alias_update_add_device(device); 5861 5862 dasd_eckd_get_uid(device, &uid); 5863 5864 if (old_base != uid.base_unit_addr) { 5865 if (strlen(uid.vduit) > 0) 5866 snprintf(print_uid, sizeof(print_uid), 5867 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial, 5868 uid.ssid, uid.base_unit_addr, uid.vduit); 5869 else 5870 snprintf(print_uid, sizeof(print_uid), 5871 "%s.%s.%04x.%02x", uid.vendor, uid.serial, 5872 uid.ssid, uid.base_unit_addr); 5873 5874 dev_info(&device->cdev->dev, 5875 "An Alias device was reassigned to a new base device " 5876 "with UID: %s\n", print_uid); 5877 } 5878 return 0; 5879 5880out_err: 5881 return -1; 5882} 5883 5884static int dasd_eckd_read_message_buffer(struct dasd_device *device, 5885 struct dasd_rssd_messages *messages, 5886 __u8 lpum) 5887{ 5888 struct dasd_rssd_messages *message_buf; 5889 struct dasd_psf_prssd_data *prssdp; 5890 struct dasd_ccw_req *cqr; 5891 struct ccw1 *ccw; 5892 int rc; 5893 5894 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5895 (sizeof(struct dasd_psf_prssd_data) + 5896 sizeof(struct dasd_rssd_messages)), 5897 device, NULL); 5898 if (IS_ERR(cqr)) { 5899 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5900 "Could not allocate read message buffer request"); 5901 return PTR_ERR(cqr); 5902 } 5903 5904 cqr->lpm = lpum; 5905retry: 5906 cqr->startdev = device; 5907 cqr->memdev = device; 5908 cqr->block = NULL; 5909 cqr->expires = 10 * HZ; 5910 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 5911 /* dasd_sleep_on_immediatly does not do complex error 5912 * recovery so clear erp flag and set retry counter to 5913 * do basic erp */ 5914 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5915 cqr->retries = 256; 5916 5917 /* Prepare for Read Subsystem Data */ 5918 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5919 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 5920 prssdp->order = PSF_ORDER_PRSSD; 5921 prssdp->suborder = 0x03; /* Message Buffer */ 5922 /* all other bytes of prssdp must be zero */ 5923 5924 ccw = cqr->cpaddr; 5925 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5926 ccw->count = sizeof(struct dasd_psf_prssd_data); 5927 ccw->flags |= CCW_FLAG_CC; 5928 ccw->flags |= CCW_FLAG_SLI; 5929 ccw->cda = (__u32)(addr_t) prssdp; 5930 5931 /* Read Subsystem Data - message buffer */ 5932 message_buf = (struct dasd_rssd_messages *) (prssdp + 1); 5933 memset(message_buf, 0, sizeof(struct dasd_rssd_messages)); 5934 5935 ccw++; 5936 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5937 ccw->count = sizeof(struct dasd_rssd_messages); 5938 ccw->flags |= CCW_FLAG_SLI; 5939 ccw->cda = (__u32)(addr_t) message_buf; 5940 5941 cqr->buildclk = get_tod_clock(); 5942 cqr->status = DASD_CQR_FILLED; 5943 rc = dasd_sleep_on_immediatly(cqr); 5944 if (rc == 0) { 5945 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5946 message_buf = (struct dasd_rssd_messages *) 5947 (prssdp + 1); 5948 memcpy(messages, message_buf, 5949 sizeof(struct dasd_rssd_messages)); 5950 } else if (cqr->lpm) { 5951 /* 5952 * on z/VM we might not be able to do I/O on the requested path 5953 * but instead we get the required information on any path 5954 * so retry with open path mask 5955 */ 5956 cqr->lpm = 0; 5957 goto retry; 5958 } else 5959 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 5960 "Reading messages failed with rc=%d\n" 5961 , rc); 5962 dasd_sfree_request(cqr, cqr->memdev); 5963 return rc; 5964} 5965 5966static int dasd_eckd_query_host_access(struct dasd_device *device, 5967 struct dasd_psf_query_host_access *data) 5968{ 5969 struct dasd_eckd_private *private = device->private; 5970 struct dasd_psf_query_host_access *host_access; 5971 struct dasd_psf_prssd_data *prssdp; 5972 struct dasd_ccw_req *cqr; 5973 struct ccw1 *ccw; 5974 int rc; 5975 5976 /* not available for HYPER PAV alias devices */ 5977 if (!device->block && private->lcu->pav == HYPER_PAV) 5978 return -EOPNOTSUPP; 5979 5980 /* may not be supported by the storage server */ 5981 if (!(private->features.feature[14] & 0x80)) 5982 return -EOPNOTSUPP; 5983 5984 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5985 sizeof(struct dasd_psf_prssd_data) + 1, 5986 device, NULL); 5987 if (IS_ERR(cqr)) { 5988 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5989 "Could not allocate read message buffer request"); 5990 return PTR_ERR(cqr); 5991 } 5992 host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA); 5993 if (!host_access) { 5994 dasd_sfree_request(cqr, device); 5995 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5996 "Could not allocate host_access buffer"); 5997 return -ENOMEM; 5998 } 5999 cqr->startdev = device; 6000 cqr->memdev = device; 6001 cqr->block = NULL; 6002 cqr->retries = 256; 6003 cqr->expires = 10 * HZ; 6004 6005 /* Prepare for Read Subsystem Data */ 6006 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 6007 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 6008 prssdp->order = PSF_ORDER_PRSSD; 6009 prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */ 6010 /* LSS and Volume that will be queried */ 6011 prssdp->lss = private->ned->ID; 6012 prssdp->volume = private->ned->unit_addr; 6013 /* all other bytes of prssdp must be zero */ 6014 6015 ccw = cqr->cpaddr; 6016 ccw->cmd_code = DASD_ECKD_CCW_PSF; 6017 ccw->count = sizeof(struct dasd_psf_prssd_data); 6018 ccw->flags |= CCW_FLAG_CC; 6019 ccw->flags |= CCW_FLAG_SLI; 6020 ccw->cda = (__u32)(addr_t) prssdp; 6021 6022 /* Read Subsystem Data - query host access */ 6023 ccw++; 6024 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 6025 ccw->count = sizeof(struct dasd_psf_query_host_access); 6026 ccw->flags |= CCW_FLAG_SLI; 6027 ccw->cda = (__u32)(addr_t) host_access; 6028 6029 cqr->buildclk = get_tod_clock(); 6030 cqr->status = DASD_CQR_FILLED; 6031 /* the command might not be supported, suppress error message */ 6032 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 6033 rc = dasd_sleep_on_interruptible(cqr); 6034 if (rc == 0) { 6035 *data = *host_access; 6036 } else { 6037 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 6038 "Reading host access data failed with rc=%d\n", 6039 rc); 6040 rc = -EOPNOTSUPP; 6041 } 6042 6043 dasd_sfree_request(cqr, cqr->memdev); 6044 kfree(host_access); 6045 return rc; 6046} 6047/* 6048 * return number of grouped devices 6049 */ 6050static int dasd_eckd_host_access_count(struct dasd_device *device) 6051{ 6052 struct dasd_psf_query_host_access *access; 6053 struct dasd_ckd_path_group_entry *entry; 6054 struct dasd_ckd_host_information *info; 6055 int count = 0; 6056 int rc, i; 6057 6058 access = kzalloc(sizeof(*access), GFP_NOIO); 6059 if (!access) { 6060 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 6061 "Could not allocate access buffer"); 6062 return -ENOMEM; 6063 } 6064 rc = dasd_eckd_query_host_access(device, access); 6065 if (rc) { 6066 kfree(access); 6067 return rc; 6068 } 6069 6070 info = (struct dasd_ckd_host_information *) 6071 access->host_access_information; 6072 for (i = 0; i < info->entry_count; i++) { 6073 entry = (struct dasd_ckd_path_group_entry *) 6074 (info->entry + i * info->entry_size); 6075 if (entry->status_flags & DASD_ECKD_PG_GROUPED) 6076 count++; 6077 } 6078 6079 kfree(access); 6080 return count; 6081} 6082 6083/* 6084 * write host access information to a sequential file 6085 */ 6086static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m) 6087{ 6088 struct dasd_psf_query_host_access *access; 6089 struct dasd_ckd_path_group_entry *entry; 6090 struct dasd_ckd_host_information *info; 6091 char sysplex[9] = ""; 6092 int rc, i; 6093 6094 access = kzalloc(sizeof(*access), GFP_NOIO); 6095 if (!access) { 6096 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 6097 "Could not allocate access buffer"); 6098 return -ENOMEM; 6099 } 6100 rc = dasd_eckd_query_host_access(device, access); 6101 if (rc) { 6102 kfree(access); 6103 return rc; 6104 } 6105 6106 info = (struct dasd_ckd_host_information *) 6107 access->host_access_information; 6108 for (i = 0; i < info->entry_count; i++) { 6109 entry = (struct dasd_ckd_path_group_entry *) 6110 (info->entry + i * info->entry_size); 6111 /* PGID */ 6112 seq_printf(m, "pgid %*phN\n", 11, entry->pgid); 6113 /* FLAGS */ 6114 seq_printf(m, "status_flags %02x\n", entry->status_flags); 6115 /* SYSPLEX NAME */ 6116 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1); 6117 EBCASC(sysplex, sizeof(sysplex)); 6118 seq_printf(m, "sysplex_name %8s\n", sysplex); 6119 /* SUPPORTED CYLINDER */ 6120 seq_printf(m, "supported_cylinder %d\n", entry->cylinder); 6121 /* TIMESTAMP */ 6122 seq_printf(m, "timestamp %lu\n", (unsigned long) 6123 entry->timestamp); 6124 } 6125 kfree(access); 6126 6127 return 0; 6128} 6129 6130/* 6131 * Perform Subsystem Function - CUIR response 6132 */ 6133static int 6134dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, 6135 __u32 message_id, __u8 lpum) 6136{ 6137 struct dasd_psf_cuir_response *psf_cuir; 6138 int pos = pathmask_to_pos(lpum); 6139 struct dasd_ccw_req *cqr; 6140 struct ccw1 *ccw; 6141 int rc; 6142 6143 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , 6144 sizeof(struct dasd_psf_cuir_response), 6145 device, NULL); 6146 6147 if (IS_ERR(cqr)) { 6148 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 6149 "Could not allocate PSF-CUIR request"); 6150 return PTR_ERR(cqr); 6151 } 6152 6153 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data; 6154 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE; 6155 psf_cuir->cc = response; 6156 psf_cuir->chpid = device->path[pos].chpid; 6157 psf_cuir->message_id = message_id; 6158 psf_cuir->cssid = device->path[pos].cssid; 6159 psf_cuir->ssid = device->path[pos].ssid; 6160 ccw = cqr->cpaddr; 6161 ccw->cmd_code = DASD_ECKD_CCW_PSF; 6162 ccw->cda = (__u32)(addr_t)psf_cuir; 6163 ccw->flags = CCW_FLAG_SLI; 6164 ccw->count = sizeof(struct dasd_psf_cuir_response); 6165 6166 cqr->startdev = device; 6167 cqr->memdev = device; 6168 cqr->block = NULL; 6169 cqr->retries = 256; 6170 cqr->expires = 10*HZ; 6171 cqr->buildclk = get_tod_clock(); 6172 cqr->status = DASD_CQR_FILLED; 6173 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 6174 6175 rc = dasd_sleep_on(cqr); 6176 6177 dasd_sfree_request(cqr, cqr->memdev); 6178 return rc; 6179} 6180 6181/* 6182 * return configuration data that is referenced by record selector 6183 * if a record selector is specified or per default return the 6184 * conf_data pointer for the path specified by lpum 6185 */ 6186static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device, 6187 __u8 lpum, 6188 struct dasd_cuir_message *cuir) 6189{ 6190 struct dasd_conf_data *conf_data; 6191 int path, pos; 6192 6193 if (cuir->record_selector == 0) 6194 goto out; 6195 for (path = 0x80, pos = 0; path; path >>= 1, pos++) { 6196 conf_data = device->path[pos].conf_data; 6197 if (conf_data->gneq.record_selector == 6198 cuir->record_selector) 6199 return conf_data; 6200 } 6201out: 6202 return device->path[pathmask_to_pos(lpum)].conf_data; 6203} 6204 6205/* 6206 * This function determines the scope of a reconfiguration request by 6207 * analysing the path and device selection data provided in the CUIR request. 6208 * Returns a path mask containing CUIR affected paths for the give device. 6209 * 6210 * If the CUIR request does not contain the required information return the 6211 * path mask of the path the attention message for the CUIR request was reveived 6212 * on. 6213 */ 6214static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum, 6215 struct dasd_cuir_message *cuir) 6216{ 6217 struct dasd_conf_data *ref_conf_data; 6218 unsigned long bitmask = 0, mask = 0; 6219 struct dasd_conf_data *conf_data; 6220 unsigned int pos, path; 6221 char *ref_gneq, *gneq; 6222 char *ref_ned, *ned; 6223 int tbcpm = 0; 6224 6225 /* if CUIR request does not specify the scope use the path 6226 the attention message was presented on */ 6227 if (!cuir->ned_map || 6228 !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2])) 6229 return lpum; 6230 6231 /* get reference conf data */ 6232 ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir); 6233 /* reference ned is determined by ned_map field */ 6234 pos = 8 - ffs(cuir->ned_map); 6235 ref_ned = (char *)&ref_conf_data->neds[pos]; 6236 ref_gneq = (char *)&ref_conf_data->gneq; 6237 /* transfer 24 bit neq_map to mask */ 6238 mask = cuir->neq_map[2]; 6239 mask |= cuir->neq_map[1] << 8; 6240 mask |= cuir->neq_map[0] << 16; 6241 6242 for (path = 0; path < 8; path++) { 6243 /* initialise data per path */ 6244 bitmask = mask; 6245 conf_data = device->path[path].conf_data; 6246 pos = 8 - ffs(cuir->ned_map); 6247 ned = (char *) &conf_data->neds[pos]; 6248 /* compare reference ned and per path ned */ 6249 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0) 6250 continue; 6251 gneq = (char *)&conf_data->gneq; 6252 /* compare reference gneq and per_path gneq under 6253 24 bit mask where mask bit 0 equals byte 7 of 6254 the gneq and mask bit 24 equals byte 31 */ 6255 while (bitmask) { 6256 pos = ffs(bitmask) - 1; 6257 if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1) 6258 != 0) 6259 break; 6260 clear_bit(pos, &bitmask); 6261 } 6262 if (bitmask) 6263 continue; 6264 /* device and path match the reference values 6265 add path to CUIR scope */ 6266 tbcpm |= 0x80 >> path; 6267 } 6268 return tbcpm; 6269} 6270 6271static void dasd_eckd_cuir_notify_user(struct dasd_device *device, 6272 unsigned long paths, int action) 6273{ 6274 int pos; 6275 6276 while (paths) { 6277 /* get position of bit in mask */ 6278 pos = 8 - ffs(paths); 6279 /* get channel path descriptor from this position */ 6280 if (action == CUIR_QUIESCE) 6281 pr_warn("Service on the storage server caused path %x.%02x to go offline", 6282 device->path[pos].cssid, 6283 device->path[pos].chpid); 6284 else if (action == CUIR_RESUME) 6285 pr_info("Path %x.%02x is back online after service on the storage server", 6286 device->path[pos].cssid, 6287 device->path[pos].chpid); 6288 clear_bit(7 - pos, &paths); 6289 } 6290} 6291 6292static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum, 6293 struct dasd_cuir_message *cuir) 6294{ 6295 unsigned long tbcpm; 6296 6297 tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir); 6298 /* nothing to do if path is not in use */ 6299 if (!(dasd_path_get_opm(device) & tbcpm)) 6300 return 0; 6301 if (!(dasd_path_get_opm(device) & ~tbcpm)) { 6302 /* no path would be left if the CUIR action is taken 6303 return error */ 6304 return -EINVAL; 6305 } 6306 /* remove device from operational path mask */ 6307 dasd_path_remove_opm(device, tbcpm); 6308 dasd_path_add_cuirpm(device, tbcpm); 6309 return tbcpm; 6310} 6311 6312/* 6313 * walk through all devices and build a path mask to quiesce them 6314 * return an error if the last path to a device would be removed 6315 * 6316 * if only part of the devices are quiesced and an error 6317 * occurs no onlining necessary, the storage server will 6318 * notify the already set offline devices again 6319 */ 6320static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum, 6321 struct dasd_cuir_message *cuir) 6322{ 6323 struct dasd_eckd_private *private = device->private; 6324 struct alias_pav_group *pavgroup, *tempgroup; 6325 struct dasd_device *dev, *n; 6326 unsigned long paths = 0; 6327 unsigned long flags; 6328 int tbcpm; 6329 6330 /* active devices */ 6331 list_for_each_entry_safe(dev, n, &private->lcu->active_devices, 6332 alias_list) { 6333 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6334 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6335 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags); 6336 if (tbcpm < 0) 6337 goto out_err; 6338 paths |= tbcpm; 6339 } 6340 /* inactive devices */ 6341 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices, 6342 alias_list) { 6343 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6344 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6345 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags); 6346 if (tbcpm < 0) 6347 goto out_err; 6348 paths |= tbcpm; 6349 } 6350 /* devices in PAV groups */ 6351 list_for_each_entry_safe(pavgroup, tempgroup, 6352 &private->lcu->grouplist, group) { 6353 list_for_each_entry_safe(dev, n, &pavgroup->baselist, 6354 alias_list) { 6355 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6356 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6357 spin_unlock_irqrestore( 6358 get_ccwdev_lock(dev->cdev), flags); 6359 if (tbcpm < 0) 6360 goto out_err; 6361 paths |= tbcpm; 6362 } 6363 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist, 6364 alias_list) { 6365 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6366 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6367 spin_unlock_irqrestore( 6368 get_ccwdev_lock(dev->cdev), flags); 6369 if (tbcpm < 0) 6370 goto out_err; 6371 paths |= tbcpm; 6372 } 6373 } 6374 /* notify user about all paths affected by CUIR action */ 6375 dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE); 6376 return 0; 6377out_err: 6378 return tbcpm; 6379} 6380 6381static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, 6382 struct dasd_cuir_message *cuir) 6383{ 6384 struct dasd_eckd_private *private = device->private; 6385 struct alias_pav_group *pavgroup, *tempgroup; 6386 struct dasd_device *dev, *n; 6387 unsigned long paths = 0; 6388 int tbcpm; 6389 6390 /* 6391 * the path may have been added through a generic path event before 6392 * only trigger path verification if the path is not already in use 6393 */ 6394 list_for_each_entry_safe(dev, n, 6395 &private->lcu->active_devices, 6396 alias_list) { 6397 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6398 paths |= tbcpm; 6399 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6400 dasd_path_add_tbvpm(dev, tbcpm); 6401 dasd_schedule_device_bh(dev); 6402 } 6403 } 6404 list_for_each_entry_safe(dev, n, 6405 &private->lcu->inactive_devices, 6406 alias_list) { 6407 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6408 paths |= tbcpm; 6409 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6410 dasd_path_add_tbvpm(dev, tbcpm); 6411 dasd_schedule_device_bh(dev); 6412 } 6413 } 6414 /* devices in PAV groups */ 6415 list_for_each_entry_safe(pavgroup, tempgroup, 6416 &private->lcu->grouplist, 6417 group) { 6418 list_for_each_entry_safe(dev, n, 6419 &pavgroup->baselist, 6420 alias_list) { 6421 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6422 paths |= tbcpm; 6423 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6424 dasd_path_add_tbvpm(dev, tbcpm); 6425 dasd_schedule_device_bh(dev); 6426 } 6427 } 6428 list_for_each_entry_safe(dev, n, 6429 &pavgroup->aliaslist, 6430 alias_list) { 6431 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6432 paths |= tbcpm; 6433 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6434 dasd_path_add_tbvpm(dev, tbcpm); 6435 dasd_schedule_device_bh(dev); 6436 } 6437 } 6438 } 6439 /* notify user about all paths affected by CUIR action */ 6440 dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME); 6441 return 0; 6442} 6443 6444static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages, 6445 __u8 lpum) 6446{ 6447 struct dasd_cuir_message *cuir = messages; 6448 int response; 6449 6450 DBF_DEV_EVENT(DBF_WARNING, device, 6451 "CUIR request: %016llx %016llx %016llx %08x", 6452 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2], 6453 ((u32 *)cuir)[3]); 6454 6455 if (cuir->code == CUIR_QUIESCE) { 6456 /* quiesce */ 6457 if (dasd_eckd_cuir_quiesce(device, lpum, cuir)) 6458 response = PSF_CUIR_LAST_PATH; 6459 else 6460 response = PSF_CUIR_COMPLETED; 6461 } else if (cuir->code == CUIR_RESUME) { 6462 /* resume */ 6463 dasd_eckd_cuir_resume(device, lpum, cuir); 6464 response = PSF_CUIR_COMPLETED; 6465 } else 6466 response = PSF_CUIR_NOT_SUPPORTED; 6467 6468 dasd_eckd_psf_cuir_response(device, response, 6469 cuir->message_id, lpum); 6470 DBF_DEV_EVENT(DBF_WARNING, device, 6471 "CUIR response: %d on message ID %08x", response, 6472 cuir->message_id); 6473 /* to make sure there is no attention left schedule work again */ 6474 device->discipline->check_attention(device, lpum); 6475} 6476 6477static void dasd_eckd_oos_resume(struct dasd_device *device) 6478{ 6479 struct dasd_eckd_private *private = device->private; 6480 struct alias_pav_group *pavgroup, *tempgroup; 6481 struct dasd_device *dev, *n; 6482 unsigned long flags; 6483 6484 spin_lock_irqsave(&private->lcu->lock, flags); 6485 list_for_each_entry_safe(dev, n, &private->lcu->active_devices, 6486 alias_list) { 6487 if (dev->stopped & DASD_STOPPED_NOSPC) 6488 dasd_generic_space_avail(dev); 6489 } 6490 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices, 6491 alias_list) { 6492 if (dev->stopped & DASD_STOPPED_NOSPC) 6493 dasd_generic_space_avail(dev); 6494 } 6495 /* devices in PAV groups */ 6496 list_for_each_entry_safe(pavgroup, tempgroup, 6497 &private->lcu->grouplist, 6498 group) { 6499 list_for_each_entry_safe(dev, n, &pavgroup->baselist, 6500 alias_list) { 6501 if (dev->stopped & DASD_STOPPED_NOSPC) 6502 dasd_generic_space_avail(dev); 6503 } 6504 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist, 6505 alias_list) { 6506 if (dev->stopped & DASD_STOPPED_NOSPC) 6507 dasd_generic_space_avail(dev); 6508 } 6509 } 6510 spin_unlock_irqrestore(&private->lcu->lock, flags); 6511} 6512 6513static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages, 6514 __u8 lpum) 6515{ 6516 struct dasd_oos_message *oos = messages; 6517 6518 switch (oos->code) { 6519 case REPO_WARN: 6520 case POOL_WARN: 6521 dev_warn(&device->cdev->dev, 6522 "Extent pool usage has reached a critical value\n"); 6523 dasd_eckd_oos_resume(device); 6524 break; 6525 case REPO_EXHAUST: 6526 case POOL_EXHAUST: 6527 dev_warn(&device->cdev->dev, 6528 "Extent pool is exhausted\n"); 6529 break; 6530 case REPO_RELIEVE: 6531 case POOL_RELIEVE: 6532 dev_info(&device->cdev->dev, 6533 "Extent pool physical space constraint has been relieved\n"); 6534 break; 6535 } 6536 6537 /* In any case, update related data */ 6538 dasd_eckd_read_ext_pool_info(device); 6539 6540 /* to make sure there is no attention left schedule work again */ 6541 device->discipline->check_attention(device, lpum); 6542} 6543 6544static void dasd_eckd_check_attention_work(struct work_struct *work) 6545{ 6546 struct check_attention_work_data *data; 6547 struct dasd_rssd_messages *messages; 6548 struct dasd_device *device; 6549 int rc; 6550 6551 data = container_of(work, struct check_attention_work_data, worker); 6552 device = data->device; 6553 messages = kzalloc(sizeof(*messages), GFP_KERNEL); 6554 if (!messages) { 6555 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 6556 "Could not allocate attention message buffer"); 6557 goto out; 6558 } 6559 rc = dasd_eckd_read_message_buffer(device, messages, data->lpum); 6560 if (rc) 6561 goto out; 6562 6563 if (messages->length == ATTENTION_LENGTH_CUIR && 6564 messages->format == ATTENTION_FORMAT_CUIR) 6565 dasd_eckd_handle_cuir(device, messages, data->lpum); 6566 if (messages->length == ATTENTION_LENGTH_OOS && 6567 messages->format == ATTENTION_FORMAT_OOS) 6568 dasd_eckd_handle_oos(device, messages, data->lpum); 6569 6570out: 6571 dasd_put_device(device); 6572 kfree(messages); 6573 kfree(data); 6574} 6575 6576static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum) 6577{ 6578 struct check_attention_work_data *data; 6579 6580 data = kzalloc(sizeof(*data), GFP_ATOMIC); 6581 if (!data) 6582 return -ENOMEM; 6583 INIT_WORK(&data->worker, dasd_eckd_check_attention_work); 6584 dasd_get_device(device); 6585 data->device = device; 6586 data->lpum = lpum; 6587 schedule_work(&data->worker); 6588 return 0; 6589} 6590 6591static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum) 6592{ 6593 if (~lpum & dasd_path_get_opm(device)) { 6594 dasd_path_add_nohpfpm(device, lpum); 6595 dasd_path_remove_opm(device, lpum); 6596 dev_err(&device->cdev->dev, 6597 "Channel path %02X lost HPF functionality and is disabled\n", 6598 lpum); 6599 return 1; 6600 } 6601 return 0; 6602} 6603 6604static void dasd_eckd_disable_hpf_device(struct dasd_device *device) 6605{ 6606 struct dasd_eckd_private *private = device->private; 6607 6608 dev_err(&device->cdev->dev, 6609 "High Performance FICON disabled\n"); 6610 private->fcx_max_data = 0; 6611} 6612 6613static int dasd_eckd_hpf_enabled(struct dasd_device *device) 6614{ 6615 struct dasd_eckd_private *private = device->private; 6616 6617 return private->fcx_max_data ? 1 : 0; 6618} 6619 6620static void dasd_eckd_handle_hpf_error(struct dasd_device *device, 6621 struct irb *irb) 6622{ 6623 struct dasd_eckd_private *private = device->private; 6624 6625 if (!private->fcx_max_data) { 6626 /* sanity check for no HPF, the error makes no sense */ 6627 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 6628 "Trying to disable HPF for a non HPF device"); 6629 return; 6630 } 6631 if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) { 6632 dasd_eckd_disable_hpf_device(device); 6633 } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) { 6634 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum)) 6635 return; 6636 dasd_eckd_disable_hpf_device(device); 6637 dasd_path_set_tbvpm(device, 6638 dasd_path_get_hpfpm(device)); 6639 } 6640 /* 6641 * prevent that any new I/O ist started on the device and schedule a 6642 * requeue of existing requests 6643 */ 6644 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 6645 dasd_schedule_requeue(device); 6646} 6647 6648/* 6649 * Initialize block layer request queue. 6650 */ 6651static void dasd_eckd_setup_blk_queue(struct dasd_block *block) 6652{ 6653 unsigned int logical_block_size = block->bp_block; 6654 struct request_queue *q = block->request_queue; 6655 struct dasd_device *device = block->base; 6656 int max; 6657 6658 if (device->features & DASD_FEATURE_USERAW) { 6659 /* 6660 * the max_blocks value for raw_track access is 256 6661 * it is higher than the native ECKD value because we 6662 * only need one ccw per track 6663 * so the max_hw_sectors are 6664 * 2048 x 512B = 1024kB = 16 tracks 6665 */ 6666 max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift; 6667 } else { 6668 max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift; 6669 } 6670 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 6671 q->limits.max_dev_sectors = max; 6672 blk_queue_logical_block_size(q, logical_block_size); 6673 blk_queue_max_hw_sectors(q, max); 6674 blk_queue_max_segments(q, USHRT_MAX); 6675 /* With page sized segments each segment can be translated into one idaw/tidaw */ 6676 blk_queue_max_segment_size(q, PAGE_SIZE); 6677 blk_queue_segment_boundary(q, PAGE_SIZE - 1); 6678} 6679 6680static struct ccw_driver dasd_eckd_driver = { 6681 .driver = { 6682 .name = "dasd-eckd", 6683 .owner = THIS_MODULE, 6684 }, 6685 .ids = dasd_eckd_ids, 6686 .probe = dasd_eckd_probe, 6687 .remove = dasd_generic_remove, 6688 .set_offline = dasd_generic_set_offline, 6689 .set_online = dasd_eckd_set_online, 6690 .notify = dasd_generic_notify, 6691 .path_event = dasd_generic_path_event, 6692 .shutdown = dasd_generic_shutdown, 6693 .freeze = dasd_generic_pm_freeze, 6694 .thaw = dasd_generic_restore_device, 6695 .restore = dasd_generic_restore_device, 6696 .uc_handler = dasd_generic_uc_handler, 6697 .int_class = IRQIO_DAS, 6698}; 6699 6700static struct dasd_discipline dasd_eckd_discipline = { 6701 .owner = THIS_MODULE, 6702 .name = "ECKD", 6703 .ebcname = "ECKD", 6704 .check_device = dasd_eckd_check_characteristics, 6705 .uncheck_device = dasd_eckd_uncheck_device, 6706 .do_analysis = dasd_eckd_do_analysis, 6707 .pe_handler = dasd_eckd_pe_handler, 6708 .basic_to_ready = dasd_eckd_basic_to_ready, 6709 .online_to_ready = dasd_eckd_online_to_ready, 6710 .basic_to_known = dasd_eckd_basic_to_known, 6711 .setup_blk_queue = dasd_eckd_setup_blk_queue, 6712 .fill_geometry = dasd_eckd_fill_geometry, 6713 .start_IO = dasd_start_IO, 6714 .term_IO = dasd_term_IO, 6715 .handle_terminated_request = dasd_eckd_handle_terminated_request, 6716 .format_device = dasd_eckd_format_device, 6717 .check_device_format = dasd_eckd_check_device_format, 6718 .erp_action = dasd_eckd_erp_action, 6719 .erp_postaction = dasd_eckd_erp_postaction, 6720 .check_for_device_change = dasd_eckd_check_for_device_change, 6721 .build_cp = dasd_eckd_build_alias_cp, 6722 .free_cp = dasd_eckd_free_alias_cp, 6723 .dump_sense = dasd_eckd_dump_sense, 6724 .dump_sense_dbf = dasd_eckd_dump_sense_dbf, 6725 .fill_info = dasd_eckd_fill_info, 6726 .ioctl = dasd_eckd_ioctl, 6727 .freeze = dasd_eckd_pm_freeze, 6728 .restore = dasd_eckd_restore_device, 6729 .reload = dasd_eckd_reload_device, 6730 .get_uid = dasd_eckd_get_uid, 6731 .kick_validate = dasd_eckd_kick_validate_server, 6732 .check_attention = dasd_eckd_check_attention, 6733 .host_access_count = dasd_eckd_host_access_count, 6734 .hosts_print = dasd_hosts_print, 6735 .handle_hpf_error = dasd_eckd_handle_hpf_error, 6736 .disable_hpf = dasd_eckd_disable_hpf_device, 6737 .hpf_enabled = dasd_eckd_hpf_enabled, 6738 .reset_path = dasd_eckd_reset_path, 6739 .is_ese = dasd_eckd_is_ese, 6740 .space_allocated = dasd_eckd_space_allocated, 6741 .space_configured = dasd_eckd_space_configured, 6742 .logical_capacity = dasd_eckd_logical_capacity, 6743 .release_space = dasd_eckd_release_space, 6744 .ext_pool_id = dasd_eckd_ext_pool_id, 6745 .ext_size = dasd_eckd_ext_size, 6746 .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel, 6747 .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld, 6748 .ext_pool_oos = dasd_eckd_ext_pool_oos, 6749 .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust, 6750 .ese_format = dasd_eckd_ese_format, 6751 .ese_read = dasd_eckd_ese_read, 6752}; 6753 6754static int __init 6755dasd_eckd_init(void) 6756{ 6757 int ret; 6758 6759 ASCEBC(dasd_eckd_discipline.ebcname, 4); 6760 dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req), 6761 GFP_KERNEL | GFP_DMA); 6762 if (!dasd_reserve_req) 6763 return -ENOMEM; 6764 dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req), 6765 GFP_KERNEL | GFP_DMA); 6766 if (!dasd_vol_info_req) { 6767 kfree(dasd_reserve_req); 6768 return -ENOMEM; 6769 } 6770 pe_handler_worker = kmalloc(sizeof(*pe_handler_worker), 6771 GFP_KERNEL | GFP_DMA); 6772 if (!pe_handler_worker) { 6773 kfree(dasd_reserve_req); 6774 kfree(dasd_vol_info_req); 6775 return -ENOMEM; 6776 } 6777 rawpadpage = (void *)__get_free_page(GFP_KERNEL); 6778 if (!rawpadpage) { 6779 kfree(pe_handler_worker); 6780 kfree(dasd_reserve_req); 6781 kfree(dasd_vol_info_req); 6782 return -ENOMEM; 6783 } 6784 ret = ccw_driver_register(&dasd_eckd_driver); 6785 if (!ret) 6786 wait_for_device_probe(); 6787 else { 6788 kfree(pe_handler_worker); 6789 kfree(dasd_reserve_req); 6790 kfree(dasd_vol_info_req); 6791 free_page((unsigned long)rawpadpage); 6792 } 6793 return ret; 6794} 6795 6796static void __exit 6797dasd_eckd_cleanup(void) 6798{ 6799 ccw_driver_unregister(&dasd_eckd_driver); 6800 kfree(pe_handler_worker); 6801 kfree(dasd_reserve_req); 6802 free_page((unsigned long)rawpadpage); 6803} 6804 6805module_init(dasd_eckd_init); 6806module_exit(dasd_eckd_cleanup); 6807