Lines Matching refs:cqr
86 struct dasd_ccw_req cqr;
93 struct dasd_ccw_req cqr;
109 struct dasd_ccw_req cqr;
816 struct dasd_ccw_req *cqr,
830 ccw = cqr->cpaddr;
835 cqr->magic = DASD_ECKD_MAGIC;
837 cqr->startdev = device;
838 cqr->memdev = device;
839 cqr->block = NULL;
840 cqr->expires = 10*HZ;
841 cqr->lpm = lpm;
842 cqr->retries = 256;
843 cqr->buildclk = get_tod_clock();
844 cqr->status = DASD_CQR_FILLED;
845 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
850 * if the cqr is not done and needs some error recovery
854 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
859 if (cqr->status != DASD_CQR_DONE) {
860 ccw = cqr->cpaddr;
869 dasd_wakeup_cb(cqr, data);
873 struct dasd_ccw_req *cqr,
887 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
888 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
889 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
890 cqr->retries = 5;
891 cqr->callback = read_conf_cb;
892 rc = dasd_sleep_on_immediatly(cqr);
903 struct dasd_ccw_req *cqr;
919 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
922 if (IS_ERR(cqr)) {
928 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
929 cqr->callback = read_conf_cb;
930 ret = dasd_sleep_on(cqr);
934 dasd_sfree_request(cqr, cqr->memdev);
1232 memset(&data->cqr, 0, sizeof(data->cqr));
1233 data->cqr.cpaddr = &data->ccw;
1234 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1282 memset(&data->cqr, 0, sizeof(data->cqr));
1283 data->cqr.cpaddr = &data->ccw;
1284 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1480 struct dasd_ccw_req *cqr;
1485 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
1489 if (IS_ERR(cqr)) {
1492 return PTR_ERR(cqr);
1494 cqr->startdev = device;
1495 cqr->memdev = device;
1496 cqr->block = NULL;
1497 cqr->retries = 256;
1498 cqr->expires = 10 * HZ;
1501 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1507 ccw = cqr->cpaddr;
1522 cqr->buildclk = get_tod_clock();
1523 cqr->status = DASD_CQR_FILLED;
1524 rc = dasd_sleep_on(cqr);
1526 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1533 dasd_sfree_request(cqr, cqr->memdev);
1543 struct dasd_ccw_req *cqr;
1554 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1556 if (IS_ERR(cqr)) {
1561 cqr = &dasd_vol_info_req->cqr;
1562 memset(cqr, 0, sizeof(*cqr));
1564 cqr->cpaddr = &dasd_vol_info_req->ccw;
1565 cqr->data = &dasd_vol_info_req->data;
1566 cqr->magic = DASD_ECKD_MAGIC;
1570 prssdp = cqr->data;
1576 ccw = cqr->cpaddr;
1592 cqr->buildclk = get_tod_clock();
1593 cqr->status = DASD_CQR_FILLED;
1594 cqr->startdev = device;
1595 cqr->memdev = device;
1596 cqr->block = NULL;
1597 cqr->retries = 256;
1598 cqr->expires = device->default_expires * HZ;
1600 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1602 rc = dasd_sleep_on_interruptible(cqr);
1613 dasd_sfree_request(cqr, cqr->memdev);
1692 struct dasd_ccw_req *cqr)
1703 if (cqr->block)
1704 data->base = cqr->block->base;
1705 else if (cqr->basedev)
1706 data->base = cqr->basedev;
1738 struct dasd_ccw_req *cqr;
1747 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1749 if (IS_ERR(cqr)) {
1752 return PTR_ERR(cqr);
1756 prssdp = cqr->data;
1761 ccw = cqr->cpaddr;
1776 cqr->buildclk = get_tod_clock();
1777 cqr->status = DASD_CQR_FILLED;
1778 cqr->startdev = device;
1779 cqr->memdev = device;
1780 cqr->block = NULL;
1781 cqr->retries = 256;
1782 cqr->expires = device->default_expires * HZ;
1784 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1786 rc = dasd_sleep_on_interruptible(cqr);
1794 dasd_sfree_request(cqr, cqr->memdev);
1849 struct dasd_ccw_req *cqr;
1853 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1857 if (IS_ERR(cqr)) {
1860 return cqr;
1862 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1869 ccw = cqr->cpaddr;
1874 cqr->startdev = device;
1875 cqr->memdev = device;
1876 cqr->block = NULL;
1877 cqr->retries = 256;
1878 cqr->expires = 10*HZ;
1879 cqr->buildclk = get_tod_clock();
1880 cqr->status = DASD_CQR_FILLED;
1881 return cqr;
1893 struct dasd_ccw_req *cqr;
1896 cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1897 if (IS_ERR(cqr))
1898 return PTR_ERR(cqr);
1904 cqr->flags |= flags;
1906 rc = dasd_sleep_on(cqr);
1910 else if (cqr->intrc == -EAGAIN)
1913 dasd_sfree_request(cqr, cqr->memdev);
2153 struct dasd_ccw_req *cqr;
2160 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
2162 if (IS_ERR(cqr))
2163 return cqr;
2164 ccw = cqr->cpaddr;
2166 define_extent(ccw++, cqr->data, 0, 1,
2168 LO_data = cqr->data + sizeof(struct DE_eckd_data);
2196 cqr->block = NULL;
2197 cqr->startdev = device;
2198 cqr->memdev = device;
2199 cqr->retries = 255;
2200 cqr->buildclk = get_tod_clock();
2201 cqr->status = DASD_CQR_FILLED;
2203 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2205 return cqr;
2226 * This is the callback function for the init_analysis cqr. It saves
2421 struct dasd_ccw_req *cqr;
2444 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2445 if (IS_ERR(cqr))
2446 return cqr;
2450 itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2456 cqr->cpaddr = itcw_get_tcw(itcw);
2476 cqr->cpmode = 1;
2477 cqr->startdev = startdev;
2478 cqr->memdev = startdev;
2479 cqr->basedev = base;
2480 cqr->retries = startdev->default_retries;
2481 cqr->expires = startdev->default_expires * HZ;
2482 cqr->buildclk = get_tod_clock();
2483 cqr->status = DASD_CQR_FILLED;
2485 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
2486 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2488 return cqr;
2491 dasd_sfree_request(cqr, startdev);
2506 struct dasd_ccw_req *cqr;
2537 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2538 if (IS_ERR(cqr))
2539 return cqr;
2542 data = cqr->data;
2543 ccw = cqr->cpaddr;
2570 cqr->startdev = startdev;
2571 cqr->memdev = startdev;
2572 cqr->basedev = base;
2573 cqr->retries = DASD_RETRIES;
2574 cqr->expires = startdev->default_expires * HZ;
2575 cqr->buildclk = get_tod_clock();
2576 cqr->status = DASD_CQR_FILLED;
2578 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2580 return cqr;
2914 struct dasd_ccw_req *cqr, *n;
2952 cqr = dasd_eckd_format_build_ccw_req(base, fdata,
2955 if (IS_ERR(cqr)) {
2956 rc = PTR_ERR(cqr);
2970 list_add_tail(&cqr->blocklist, &format_queue);
2983 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
2984 device = cqr->startdev;
2987 if (cqr->status == DASD_CQR_FAILED) {
2993 sense = dasd_get_sense(&cqr->irb);
2994 memcpy(irb, &cqr->irb, sizeof(*irb));
2998 list_del_init(&cqr->blocklist);
2999 dasd_ffree_request(cqr, device);
3039 struct dasd_ccw_req *cqr)
3041 struct dasd_block *block = cqr->block;
3047 if (cqr->trkcount != atomic_read(&block->trkcount)) {
3084 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
3086 struct dasd_device *device = cqr->startdev;
3090 clear_format_track(format, cqr->basedev->block);
3092 dasd_ffree_request(cqr, device);
3096 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
3113 req = dasd_get_callback_data(cqr);
3114 block = cqr->block;
3138 if (test_and_set_format_track(format, cqr)) {
3140 cqr->retries++;
3179 static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
3198 req = (struct request *) cqr->callback_data;
3199 base = cqr->block->base;
3201 block = cqr->block;
3238 cqr->proc_bytes = blk_count * blksize;
3469 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
3471 if (cqr->retries < 0) {
3472 cqr->status = DASD_CQR_FAILED;
3475 cqr->status = DASD_CQR_FILLED;
3476 if (cqr->block && (cqr->startdev != cqr->block->base)) {
3477 dasd_eckd_reset_ccw_to_base_io(cqr);
3478 cqr->startdev = cqr->block->base;
3479 cqr->lpm = dasd_path_get_opm(cqr->block->base);
3484 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3486 struct dasd_device *device = (struct dasd_device *) cqr->startdev;
3503 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3509 struct dasd_ccw_req *cqr,
3566 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
3664 struct dasd_ccw_req *cqr;
3688 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
3689 if (IS_ERR(cqr)) {
3692 return cqr;
3695 ras_data = cqr->data;
3719 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
3738 ccw = cqr->cpaddr;
3739 ccw->cda = (__u32)(addr_t)cqr->data;
3743 cqr->startdev = device;
3744 cqr->memdev = device;
3745 cqr->block = block;
3746 cqr->retries = 256;
3747 cqr->expires = device->default_expires * HZ;
3748 cqr->buildclk = get_tod_clock();
3749 cqr->status = DASD_CQR_FILLED;
3751 return cqr;
3756 struct dasd_ccw_req *cqr;
3759 cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
3760 if (IS_ERR(cqr))
3761 return PTR_ERR(cqr);
3763 rc = dasd_sleep_on_interruptible(cqr);
3765 dasd_sfree_request(cqr, cqr->memdev);
3775 struct dasd_ccw_req *cqr, *n;
3801 cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
3802 if (IS_ERR(cqr)) {
3803 rc = PTR_ERR(cqr);
3814 list_add_tail(&cqr->blocklist, &ras_queue);
3822 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
3823 device = cqr->startdev;
3827 list_del_init(&cqr->blocklist);
3829 dasd_sfree_request(cqr, device);
3866 struct dasd_ccw_req *cqr;
3927 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3929 if (IS_ERR(cqr))
3930 return cqr;
3931 ccw = cqr->cpaddr;
3934 if (prefix(ccw++, cqr->data, first_trk,
3939 dasd_sfree_request(cqr, startdev);
3942 idaws = (unsigned long *) (cqr->data +
3945 if (define_extent(ccw++, cqr->data, first_trk,
3950 dasd_sfree_request(cqr, startdev);
3953 idaws = (unsigned long *) (cqr->data +
4022 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4023 cqr->startdev = startdev;
4024 cqr->memdev = startdev;
4025 cqr->block = block;
4026 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4027 cqr->lpm = dasd_path_get_ppm(startdev);
4028 cqr->retries = startdev->default_retries;
4029 cqr->buildclk = get_tod_clock();
4030 cqr->status = DASD_CQR_FILLED;
4034 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4035 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4036 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4039 return cqr;
4056 struct dasd_ccw_req *cqr;
4094 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
4096 if (IS_ERR(cqr))
4097 return cqr;
4098 ccw = cqr->cpaddr;
4106 if (prefix_LRE(ccw++, cqr->data, first_trk,
4114 dasd_sfree_request(cqr, startdev);
4125 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
4163 dasd_sfree_request(cqr, startdev);
4169 dasd_sfree_request(cqr, startdev);
4201 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4202 cqr->startdev = startdev;
4203 cqr->memdev = startdev;
4204 cqr->block = block;
4205 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4206 cqr->lpm = dasd_path_get_ppm(startdev);
4207 cqr->retries = startdev->default_retries;
4208 cqr->buildclk = get_tod_clock();
4209 cqr->status = DASD_CQR_FILLED;
4213 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4215 return cqr;
4387 struct dasd_ccw_req *cqr;
4433 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
4435 if (IS_ERR(cqr))
4436 return cqr;
4445 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
4450 cqr->cpaddr = itcw_get_tcw(itcw);
4524 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4525 cqr->cpmode = 1;
4526 cqr->startdev = startdev;
4527 cqr->memdev = startdev;
4528 cqr->block = block;
4529 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4530 cqr->lpm = dasd_path_get_ppm(startdev);
4531 cqr->retries = startdev->default_retries;
4532 cqr->buildclk = get_tod_clock();
4533 cqr->status = DASD_CQR_FILLED;
4537 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4538 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4539 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4542 return cqr;
4544 dasd_sfree_request(cqr, startdev);
4563 struct dasd_ccw_req *cqr;
4594 cqr = NULL;
4599 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4604 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4605 (PTR_ERR(cqr) != -ENOMEM))
4606 cqr = NULL;
4610 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4615 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4616 (PTR_ERR(cqr) != -ENOMEM))
4617 cqr = NULL;
4619 if (!cqr)
4620 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4625 return cqr;
4639 struct dasd_ccw_req *cqr;
4706 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
4708 if (IS_ERR(cqr))
4709 return cqr;
4711 ccw = cqr->cpaddr;
4712 data = cqr->data;
4726 idaws = (unsigned long *)(cqr->data + size);
4766 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4767 cqr->startdev = startdev;
4768 cqr->memdev = startdev;
4769 cqr->block = block;
4770 cqr->expires = startdev->default_expires * HZ;
4771 cqr->lpm = dasd_path_get_ppm(startdev);
4772 cqr->retries = startdev->default_retries;
4773 cqr->buildclk = get_tod_clock();
4774 cqr->status = DASD_CQR_FILLED;
4776 return cqr;
4781 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
4794 private = cqr->block->base->private;
4795 blksize = cqr->block->bp_block;
4797 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4798 ccw = cqr->cpaddr;
4827 status = cqr->status == DASD_CQR_DONE;
4828 dasd_sfree_request(cqr, cqr->memdev);
4833 * Modify ccw/tcw in cqr so it can be started on a base device.
4835 * Note that this is not enough to restart the cqr!
4836 * Either reset cqr->startdev as well (summary unit check handling)
4837 * or restart via separate cqr (as in ERP handling).
4839 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4847 if (cqr->cpmode == 1) {
4848 tcw = cqr->cpaddr;
4855 ccw = cqr->cpaddr;
4856 pfxdata = cqr->data;
4873 struct dasd_ccw_req *cqr;
4885 cqr = dasd_eckd_build_cp_raw(startdev, block, req);
4887 cqr = dasd_eckd_build_cp(startdev, block, req);
4888 if (IS_ERR(cqr))
4891 return cqr;
4894 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
4900 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
4901 private = cqr->memdev->private;
4903 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
4904 return dasd_eckd_free_cp(cqr, req);
4938 struct dasd_ccw_req *cqr;
4947 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4948 if (IS_ERR(cqr)) {
4951 cqr = &dasd_reserve_req->cqr;
4952 memset(cqr, 0, sizeof(*cqr));
4955 cqr->cpaddr = &dasd_reserve_req->ccw;
4956 cqr->data = &dasd_reserve_req->data;
4957 cqr->magic = DASD_ECKD_MAGIC;
4959 ccw = cqr->cpaddr;
4963 ccw->cda = (__u32)(addr_t) cqr->data;
4964 cqr->startdev = device;
4965 cqr->memdev = device;
4966 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4967 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4968 cqr->retries = 2; /* set retry counter to enable basic ERP */
4969 cqr->expires = 2 * HZ;
4970 cqr->buildclk = get_tod_clock();
4971 cqr->status = DASD_CQR_FILLED;
4973 rc = dasd_sleep_on_immediatly(cqr);
4980 dasd_sfree_request(cqr, cqr->memdev);
4993 struct dasd_ccw_req *cqr;
5002 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5003 if (IS_ERR(cqr)) {
5006 cqr = &dasd_reserve_req->cqr;
5007 memset(cqr, 0, sizeof(*cqr));
5010 cqr->cpaddr = &dasd_reserve_req->ccw;
5011 cqr->data = &dasd_reserve_req->data;
5012 cqr->magic = DASD_ECKD_MAGIC;
5014 ccw = cqr->cpaddr;
5018 ccw->cda = (__u32)(addr_t) cqr->data;
5019 cqr->startdev = device;
5020 cqr->memdev = device;
5021 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5022 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5023 cqr->retries = 2; /* set retry counter to enable basic ERP */
5024 cqr->expires = 2 * HZ;
5025 cqr->buildclk = get_tod_clock();
5026 cqr->status = DASD_CQR_FILLED;
5028 rc = dasd_sleep_on_immediatly(cqr);
5035 dasd_sfree_request(cqr, cqr->memdev);
5047 struct dasd_ccw_req *cqr;
5056 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5057 if (IS_ERR(cqr)) {
5060 cqr = &dasd_reserve_req->cqr;
5061 memset(cqr, 0, sizeof(*cqr));
5064 cqr->cpaddr = &dasd_reserve_req->ccw;
5065 cqr->data = &dasd_reserve_req->data;
5066 cqr->magic = DASD_ECKD_MAGIC;
5068 ccw = cqr->cpaddr;
5072 ccw->cda = (__u32)(addr_t) cqr->data;
5073 cqr->startdev = device;
5074 cqr->memdev = device;
5075 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5076 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5077 cqr->retries = 2; /* set retry counter to enable basic ERP */
5078 cqr->expires = 2 * HZ;
5079 cqr->buildclk = get_tod_clock();
5080 cqr->status = DASD_CQR_FILLED;
5082 rc = dasd_sleep_on_immediatly(cqr);
5089 dasd_sfree_request(cqr, cqr->memdev);
5102 struct dasd_ccw_req *cqr;
5115 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
5118 if (IS_ERR(cqr)) {
5121 cqr = &dasd_reserve_req->cqr;
5122 memset(cqr, 0, sizeof(*cqr));
5125 cqr->cpaddr = &dasd_reserve_req->ccw;
5126 cqr->data = &dasd_reserve_req->data;
5127 cqr->magic = DASD_ECKD_MAGIC;
5129 ccw = cqr->cpaddr;
5133 ccw->cda = (__u32)(addr_t) cqr->data;
5134 cqr->startdev = device;
5135 cqr->memdev = device;
5136 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5137 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5138 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
5139 cqr->retries = 5;
5140 cqr->expires = 10 * HZ;
5141 cqr->buildclk = get_tod_clock();
5142 cqr->status = DASD_CQR_FILLED;
5143 cqr->lpm = usrparm.path_mask;
5145 rc = dasd_sleep_on_immediatly(cqr);
5147 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
5150 usrparm.data = *((struct dasd_snid_data *)cqr->data);
5158 dasd_sfree_request(cqr, cqr->memdev);
5170 struct dasd_ccw_req *cqr;
5174 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5178 if (IS_ERR(cqr)) {
5181 return PTR_ERR(cqr);
5183 cqr->startdev = device;
5184 cqr->memdev = device;
5185 cqr->retries = 0;
5186 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5187 cqr->expires = 10 * HZ;
5190 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5196 ccw = cqr->cpaddr;
5211 cqr->buildclk = get_tod_clock();
5212 cqr->status = DASD_CQR_FILLED;
5213 rc = dasd_sleep_on(cqr);
5215 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5221 dasd_sfree_request(cqr, cqr->memdev);
5282 struct dasd_ccw_req *cqr;
5331 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
5332 if (IS_ERR(cqr)) {
5335 rc = PTR_ERR(cqr);
5339 cqr->startdev = device;
5340 cqr->memdev = device;
5341 cqr->retries = 3;
5342 cqr->expires = 10 * HZ;
5343 cqr->buildclk = get_tod_clock();
5344 cqr->status = DASD_CQR_FILLED;
5347 ccw = cqr->cpaddr;
5363 rc = dasd_sleep_on(cqr);
5374 dasd_sfree_request(cqr, cqr->memdev);
5890 struct dasd_ccw_req *cqr;
5894 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5898 if (IS_ERR(cqr)) {
5901 return PTR_ERR(cqr);
5904 cqr->lpm = lpum;
5906 cqr->startdev = device;
5907 cqr->memdev = device;
5908 cqr->block = NULL;
5909 cqr->expires = 10 * HZ;
5910 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5914 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5915 cqr->retries = 256;
5918 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5924 ccw = cqr->cpaddr;
5941 cqr->buildclk = get_tod_clock();
5942 cqr->status = DASD_CQR_FILLED;
5943 rc = dasd_sleep_on_immediatly(cqr);
5945 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5950 } else if (cqr->lpm) {
5956 cqr->lpm = 0;
5962 dasd_sfree_request(cqr, cqr->memdev);
5972 struct dasd_ccw_req *cqr;
5984 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5987 if (IS_ERR(cqr)) {
5990 return PTR_ERR(cqr);
5994 dasd_sfree_request(cqr, device);
5999 cqr->startdev = device;
6000 cqr->memdev = device;
6001 cqr->block = NULL;
6002 cqr->retries = 256;
6003 cqr->expires = 10 * HZ;
6006 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
6015 ccw = cqr->cpaddr;
6029 cqr->buildclk = get_tod_clock();
6030 cqr->status = DASD_CQR_FILLED;
6032 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
6033 rc = dasd_sleep_on_interruptible(cqr);
6043 dasd_sfree_request(cqr, cqr->memdev);
6139 struct dasd_ccw_req *cqr;
6143 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
6147 if (IS_ERR(cqr)) {
6150 return PTR_ERR(cqr);
6153 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
6160 ccw = cqr->cpaddr;
6166 cqr->startdev = device;
6167 cqr->memdev = device;
6168 cqr->block = NULL;
6169 cqr->retries = 256;
6170 cqr->expires = 10*HZ;
6171 cqr->buildclk = get_tod_clock();
6172 cqr->status = DASD_CQR_FILLED;
6173 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
6175 rc = dasd_sleep_on(cqr);
6177 dasd_sfree_request(cqr, cqr->memdev);