Lines Matching defs:cqr

86 	struct dasd_ccw_req cqr;
93 struct dasd_ccw_req cqr;
109 struct dasd_ccw_req cqr;
818 struct dasd_ccw_req *cqr,
832 ccw = cqr->cpaddr;
837 cqr->magic = DASD_ECKD_MAGIC;
839 cqr->startdev = device;
840 cqr->memdev = device;
841 cqr->block = NULL;
842 cqr->expires = 10*HZ;
843 cqr->lpm = lpm;
844 cqr->retries = 256;
845 cqr->buildclk = get_tod_clock();
846 cqr->status = DASD_CQR_FILLED;
847 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
852 * if the cqr is not done and needs some error recovery
856 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
861 if (cqr->status != DASD_CQR_DONE) {
862 ccw = cqr->cpaddr;
871 dasd_wakeup_cb(cqr, data);
875 struct dasd_ccw_req *cqr,
889 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
890 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
891 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
892 cqr->retries = 5;
893 cqr->callback = read_conf_cb;
894 rc = dasd_sleep_on_immediatly(cqr);
905 struct dasd_ccw_req *cqr;
921 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
924 if (IS_ERR(cqr)) {
930 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
931 cqr->callback = read_conf_cb;
932 ret = dasd_sleep_on(cqr);
936 dasd_sfree_request(cqr, cqr->memdev);
1262 memset(&data->cqr, 0, sizeof(data->cqr));
1263 data->cqr.cpaddr = &data->ccw;
1264 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1312 memset(&data->cqr, 0, sizeof(data->cqr));
1313 data->cqr.cpaddr = &data->ccw;
1314 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1521 struct dasd_ccw_req *cqr;
1526 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
1530 if (IS_ERR(cqr)) {
1533 return PTR_ERR(cqr);
1535 cqr->startdev = device;
1536 cqr->memdev = device;
1537 cqr->block = NULL;
1538 cqr->retries = 256;
1539 cqr->expires = 10 * HZ;
1542 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1548 ccw = cqr->cpaddr;
1563 cqr->buildclk = get_tod_clock();
1564 cqr->status = DASD_CQR_FILLED;
1565 rc = dasd_sleep_on(cqr);
1567 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1574 dasd_sfree_request(cqr, cqr->memdev);
1584 struct dasd_ccw_req *cqr;
1595 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1597 if (IS_ERR(cqr)) {
1602 cqr = &dasd_vol_info_req->cqr;
1603 memset(cqr, 0, sizeof(*cqr));
1605 cqr->cpaddr = &dasd_vol_info_req->ccw;
1606 cqr->data = &dasd_vol_info_req->data;
1607 cqr->magic = DASD_ECKD_MAGIC;
1611 prssdp = cqr->data;
1617 ccw = cqr->cpaddr;
1633 cqr->buildclk = get_tod_clock();
1634 cqr->status = DASD_CQR_FILLED;
1635 cqr->startdev = device;
1636 cqr->memdev = device;
1637 cqr->block = NULL;
1638 cqr->retries = 256;
1639 cqr->expires = device->default_expires * HZ;
1641 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1643 rc = dasd_sleep_on_interruptible(cqr);
1654 dasd_sfree_request(cqr, cqr->memdev);
1733 struct dasd_ccw_req *cqr)
1744 if (cqr->block)
1745 data->base = cqr->block->base;
1746 else if (cqr->basedev)
1747 data->base = cqr->basedev;
1779 struct dasd_ccw_req *cqr;
1788 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1790 if (IS_ERR(cqr)) {
1793 return PTR_ERR(cqr);
1797 prssdp = cqr->data;
1802 ccw = cqr->cpaddr;
1817 cqr->buildclk = get_tod_clock();
1818 cqr->status = DASD_CQR_FILLED;
1819 cqr->startdev = device;
1820 cqr->memdev = device;
1821 cqr->block = NULL;
1822 cqr->retries = 256;
1823 cqr->expires = device->default_expires * HZ;
1825 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1827 rc = dasd_sleep_on_interruptible(cqr);
1835 dasd_sfree_request(cqr, cqr->memdev);
1890 struct dasd_ccw_req *cqr;
1894 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1898 if (IS_ERR(cqr)) {
1901 return cqr;
1903 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1910 ccw = cqr->cpaddr;
1915 cqr->startdev = device;
1916 cqr->memdev = device;
1917 cqr->block = NULL;
1918 cqr->retries = 256;
1919 cqr->expires = 10*HZ;
1920 cqr->buildclk = get_tod_clock();
1921 cqr->status = DASD_CQR_FILLED;
1922 return cqr;
1934 struct dasd_ccw_req *cqr;
1937 cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1938 if (IS_ERR(cqr))
1939 return PTR_ERR(cqr);
1945 cqr->flags |= flags;
1947 rc = dasd_sleep_on(cqr);
1951 else if (cqr->intrc == -EAGAIN)
1954 dasd_sfree_request(cqr, cqr->memdev);
2241 struct dasd_ccw_req *cqr;
2248 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
2250 if (IS_ERR(cqr))
2251 return cqr;
2252 ccw = cqr->cpaddr;
2254 define_extent(ccw++, cqr->data, 0, 1,
2256 LO_data = cqr->data + sizeof(struct DE_eckd_data);
2284 cqr->block = NULL;
2285 cqr->startdev = device;
2286 cqr->memdev = device;
2287 cqr->retries = 255;
2288 cqr->buildclk = get_tod_clock();
2289 cqr->status = DASD_CQR_FILLED;
2291 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2293 return cqr;
2314 * This is the callback function for the init_analysis cqr. It saves
2509 struct dasd_ccw_req *cqr;
2532 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2533 if (IS_ERR(cqr))
2534 return cqr;
2538 itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2544 cqr->cpaddr = itcw_get_tcw(itcw);
2564 cqr->cpmode = 1;
2565 cqr->startdev = startdev;
2566 cqr->memdev = startdev;
2567 cqr->basedev = base;
2568 cqr->retries = startdev->default_retries;
2569 cqr->expires = startdev->default_expires * HZ;
2570 cqr->buildclk = get_tod_clock();
2571 cqr->status = DASD_CQR_FILLED;
2573 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
2574 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2576 return cqr;
2579 dasd_sfree_request(cqr, startdev);
2594 struct dasd_ccw_req *cqr;
2625 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2626 if (IS_ERR(cqr))
2627 return cqr;
2630 data = cqr->data;
2631 ccw = cqr->cpaddr;
2658 cqr->startdev = startdev;
2659 cqr->memdev = startdev;
2660 cqr->basedev = base;
2661 cqr->retries = DASD_RETRIES;
2662 cqr->expires = startdev->default_expires * HZ;
2663 cqr->buildclk = get_tod_clock();
2664 cqr->status = DASD_CQR_FILLED;
2666 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2668 return cqr;
3002 struct dasd_ccw_req *cqr, *n;
3040 cqr = dasd_eckd_format_build_ccw_req(base, fdata,
3043 if (IS_ERR(cqr)) {
3044 rc = PTR_ERR(cqr);
3058 list_add_tail(&cqr->blocklist, &format_queue);
3071 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
3072 device = cqr->startdev;
3075 if (cqr->status == DASD_CQR_FAILED) {
3081 sense = dasd_get_sense(&cqr->irb);
3082 memcpy(irb, &cqr->irb, sizeof(*irb));
3086 list_del_init(&cqr->blocklist);
3087 dasd_ffree_request(cqr, device);
3127 struct dasd_ccw_req *cqr)
3129 struct dasd_block *block = cqr->block;
3135 if (cqr->trkcount != atomic_read(&block->trkcount)) {
3172 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
3174 struct dasd_device *device = cqr->startdev;
3178 clear_format_track(format, cqr->basedev->block);
3180 dasd_ffree_request(cqr, device);
3184 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
3201 req = dasd_get_callback_data(cqr);
3202 block = cqr->block;
3226 if (test_and_set_format_track(format, cqr)) {
3228 cqr->retries++;
3267 static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
3286 req = (struct request *) cqr->callback_data;
3287 base = cqr->block->base;
3289 block = cqr->block;
3326 cqr->proc_bytes = blk_count * blksize;
3557 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
3559 if (cqr->retries < 0) {
3560 cqr->status = DASD_CQR_FAILED;
3563 cqr->status = DASD_CQR_FILLED;
3564 if (cqr->block && (cqr->startdev != cqr->block->base)) {
3565 dasd_eckd_reset_ccw_to_base_io(cqr);
3566 cqr->startdev = cqr->block->base;
3567 cqr->lpm = dasd_path_get_opm(cqr->block->base);
3572 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3574 struct dasd_device *device = (struct dasd_device *) cqr->startdev;
3591 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3597 struct dasd_ccw_req *cqr,
3654 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
3772 struct dasd_ccw_req *cqr;
3801 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
3802 if (IS_ERR(cqr)) {
3805 return cqr;
3808 ras_data = cqr->data;
3834 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
3853 ccw = cqr->cpaddr;
3854 ccw->cda = (__u32)virt_to_phys(cqr->data);
3858 cqr->startdev = device;
3859 cqr->memdev = device;
3860 cqr->block = block;
3861 cqr->retries = 256;
3862 cqr->expires = device->default_expires * HZ;
3863 cqr->buildclk = get_tod_clock();
3864 cqr->status = DASD_CQR_FILLED;
3866 return cqr;
3871 struct dasd_ccw_req *cqr;
3874 cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
3875 if (IS_ERR(cqr))
3876 return PTR_ERR(cqr);
3878 rc = dasd_sleep_on_interruptible(cqr);
3880 dasd_sfree_request(cqr, cqr->memdev);
3890 struct dasd_ccw_req *cqr, *n;
3916 cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
3917 if (IS_ERR(cqr)) {
3918 rc = PTR_ERR(cqr);
3929 list_add_tail(&cqr->blocklist, &ras_queue);
3937 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
3938 device = cqr->startdev;
3942 list_del_init(&cqr->blocklist);
3944 dasd_sfree_request(cqr, device);
3981 struct dasd_ccw_req *cqr;
4042 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
4044 if (IS_ERR(cqr))
4045 return cqr;
4046 ccw = cqr->cpaddr;
4049 if (prefix(ccw++, cqr->data, first_trk,
4054 dasd_sfree_request(cqr, startdev);
4057 idaws = (unsigned long *) (cqr->data +
4060 if (define_extent(ccw++, cqr->data, first_trk,
4065 dasd_sfree_request(cqr, startdev);
4068 idaws = (unsigned long *) (cqr->data +
4137 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4138 cqr->startdev = startdev;
4139 cqr->memdev = startdev;
4140 cqr->block = block;
4141 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4142 cqr->lpm = dasd_path_get_ppm(startdev);
4143 cqr->retries = startdev->default_retries;
4144 cqr->buildclk = get_tod_clock();
4145 cqr->status = DASD_CQR_FILLED;
4149 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4150 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4151 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4154 return cqr;
4171 struct dasd_ccw_req *cqr;
4209 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
4211 if (IS_ERR(cqr))
4212 return cqr;
4213 ccw = cqr->cpaddr;
4221 if (prefix_LRE(ccw++, cqr->data, first_trk,
4229 dasd_sfree_request(cqr, startdev);
4240 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
4278 dasd_sfree_request(cqr, startdev);
4284 dasd_sfree_request(cqr, startdev);
4316 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4317 cqr->startdev = startdev;
4318 cqr->memdev = startdev;
4319 cqr->block = block;
4320 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4321 cqr->lpm = dasd_path_get_ppm(startdev);
4322 cqr->retries = startdev->default_retries;
4323 cqr->buildclk = get_tod_clock();
4324 cqr->status = DASD_CQR_FILLED;
4328 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4330 return cqr;
4502 struct dasd_ccw_req *cqr;
4548 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
4550 if (IS_ERR(cqr))
4551 return cqr;
4560 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
4565 cqr->cpaddr = itcw_get_tcw(itcw);
4639 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4640 cqr->cpmode = 1;
4641 cqr->startdev = startdev;
4642 cqr->memdev = startdev;
4643 cqr->block = block;
4644 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
4645 cqr->lpm = dasd_path_get_ppm(startdev);
4646 cqr->retries = startdev->default_retries;
4647 cqr->buildclk = get_tod_clock();
4648 cqr->status = DASD_CQR_FILLED;
4652 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4653 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4654 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4657 return cqr;
4659 dasd_sfree_request(cqr, startdev);
4678 struct dasd_ccw_req *cqr;
4709 cqr = NULL;
4714 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4719 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4720 (PTR_ERR(cqr) != -ENOMEM))
4721 cqr = NULL;
4725 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4730 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4731 (PTR_ERR(cqr) != -ENOMEM))
4732 cqr = NULL;
4734 if (!cqr)
4735 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4740 return cqr;
4754 struct dasd_ccw_req *cqr;
4821 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
4823 if (IS_ERR(cqr))
4824 return cqr;
4826 ccw = cqr->cpaddr;
4827 data = cqr->data;
4841 idaws = (unsigned long *)(cqr->data + size);
4881 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4882 cqr->startdev = startdev;
4883 cqr->memdev = startdev;
4884 cqr->block = block;
4885 cqr->expires = startdev->default_expires * HZ;
4886 cqr->lpm = dasd_path_get_ppm(startdev);
4887 cqr->retries = startdev->default_retries;
4888 cqr->buildclk = get_tod_clock();
4889 cqr->status = DASD_CQR_FILLED;
4891 return cqr;
4896 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
4909 private = cqr->block->base->private;
4910 blksize = cqr->block->bp_block;
4912 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4913 ccw = cqr->cpaddr;
4942 status = cqr->status == DASD_CQR_DONE;
4943 dasd_sfree_request(cqr, cqr->memdev);
4948 * Modify ccw/tcw in cqr so it can be started on a base device.
4950 * Note that this is not enough to restart the cqr!
4951 * Either reset cqr->startdev as well (summary unit check handling)
4952 * or restart via separate cqr (as in ERP handling).
4954 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4962 if (cqr->cpmode == 1) {
4963 tcw = cqr->cpaddr;
4970 ccw = cqr->cpaddr;
4971 pfxdata = cqr->data;
4988 struct dasd_ccw_req *cqr;
5000 cqr = dasd_eckd_build_cp_raw(startdev, block, req);
5002 cqr = dasd_eckd_build_cp(startdev, block, req);
5003 if (IS_ERR(cqr))
5006 return cqr;
5009 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
5015 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
5016 private = cqr->memdev->private;
5018 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
5019 return dasd_eckd_free_cp(cqr, req);
5053 struct dasd_ccw_req *cqr;
5062 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5063 if (IS_ERR(cqr)) {
5066 cqr = &dasd_reserve_req->cqr;
5067 memset(cqr, 0, sizeof(*cqr));
5070 cqr->cpaddr = &dasd_reserve_req->ccw;
5071 cqr->data = &dasd_reserve_req->data;
5072 cqr->magic = DASD_ECKD_MAGIC;
5074 ccw = cqr->cpaddr;
5078 ccw->cda = (__u32)virt_to_phys(cqr->data);
5079 cqr->startdev = device;
5080 cqr->memdev = device;
5081 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5082 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5083 cqr->retries = 2; /* set retry counter to enable basic ERP */
5084 cqr->expires = 2 * HZ;
5085 cqr->buildclk = get_tod_clock();
5086 cqr->status = DASD_CQR_FILLED;
5088 rc = dasd_sleep_on_immediatly(cqr);
5095 dasd_sfree_request(cqr, cqr->memdev);
5108 struct dasd_ccw_req *cqr;
5117 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5118 if (IS_ERR(cqr)) {
5121 cqr = &dasd_reserve_req->cqr;
5122 memset(cqr, 0, sizeof(*cqr));
5125 cqr->cpaddr = &dasd_reserve_req->ccw;
5126 cqr->data = &dasd_reserve_req->data;
5127 cqr->magic = DASD_ECKD_MAGIC;
5129 ccw = cqr->cpaddr;
5133 ccw->cda = (__u32)virt_to_phys(cqr->data);
5134 cqr->startdev = device;
5135 cqr->memdev = device;
5136 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5137 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5138 cqr->retries = 2; /* set retry counter to enable basic ERP */
5139 cqr->expires = 2 * HZ;
5140 cqr->buildclk = get_tod_clock();
5141 cqr->status = DASD_CQR_FILLED;
5143 rc = dasd_sleep_on_immediatly(cqr);
5150 dasd_sfree_request(cqr, cqr->memdev);
5162 struct dasd_ccw_req *cqr;
5171 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5172 if (IS_ERR(cqr)) {
5175 cqr = &dasd_reserve_req->cqr;
5176 memset(cqr, 0, sizeof(*cqr));
5179 cqr->cpaddr = &dasd_reserve_req->ccw;
5180 cqr->data = &dasd_reserve_req->data;
5181 cqr->magic = DASD_ECKD_MAGIC;
5183 ccw = cqr->cpaddr;
5187 ccw->cda = (__u32)virt_to_phys(cqr->data);
5188 cqr->startdev = device;
5189 cqr->memdev = device;
5190 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5191 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5192 cqr->retries = 2; /* set retry counter to enable basic ERP */
5193 cqr->expires = 2 * HZ;
5194 cqr->buildclk = get_tod_clock();
5195 cqr->status = DASD_CQR_FILLED;
5197 rc = dasd_sleep_on_immediatly(cqr);
5204 dasd_sfree_request(cqr, cqr->memdev);
5217 struct dasd_ccw_req *cqr;
5230 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
5233 if (IS_ERR(cqr)) {
5236 cqr = &dasd_reserve_req->cqr;
5237 memset(cqr, 0, sizeof(*cqr));
5240 cqr->cpaddr = &dasd_reserve_req->ccw;
5241 cqr->data = &dasd_reserve_req->data;
5242 cqr->magic = DASD_ECKD_MAGIC;
5244 ccw = cqr->cpaddr;
5248 ccw->cda = (__u32)virt_to_phys(cqr->data);
5249 cqr->startdev = device;
5250 cqr->memdev = device;
5251 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5252 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5253 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
5254 cqr->retries = 5;
5255 cqr->expires = 10 * HZ;
5256 cqr->buildclk = get_tod_clock();
5257 cqr->status = DASD_CQR_FILLED;
5258 cqr->lpm = usrparm.path_mask;
5260 rc = dasd_sleep_on_immediatly(cqr);
5262 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
5265 usrparm.data = *((struct dasd_snid_data *)cqr->data);
5273 dasd_sfree_request(cqr, cqr->memdev);
5285 struct dasd_ccw_req *cqr;
5289 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5293 if (IS_ERR(cqr)) {
5296 return PTR_ERR(cqr);
5298 cqr->startdev = device;
5299 cqr->memdev = device;
5300 cqr->retries = 0;
5301 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5302 cqr->expires = 10 * HZ;
5305 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5311 ccw = cqr->cpaddr;
5326 cqr->buildclk = get_tod_clock();
5327 cqr->status = DASD_CQR_FILLED;
5328 rc = dasd_sleep_on(cqr);
5330 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5336 dasd_sfree_request(cqr, cqr->memdev);
5397 struct dasd_ccw_req *cqr;
5446 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
5447 if (IS_ERR(cqr)) {
5450 rc = PTR_ERR(cqr);
5454 cqr->startdev = device;
5455 cqr->memdev = device;
5456 cqr->retries = 3;
5457 cqr->expires = 10 * HZ;
5458 cqr->buildclk = get_tod_clock();
5459 cqr->status = DASD_CQR_FILLED;
5462 ccw = cqr->cpaddr;
5478 rc = dasd_sleep_on(cqr);
5489 dasd_sfree_request(cqr, cqr->memdev);
5909 struct dasd_ccw_req *cqr;
5913 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5917 if (IS_ERR(cqr)) {
5920 return PTR_ERR(cqr);
5923 cqr->lpm = lpum;
5925 cqr->startdev = device;
5926 cqr->memdev = device;
5927 cqr->block = NULL;
5928 cqr->expires = 10 * HZ;
5929 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5933 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5934 cqr->retries = 256;
5937 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5943 ccw = cqr->cpaddr;
5960 cqr->buildclk = get_tod_clock();
5961 cqr->status = DASD_CQR_FILLED;
5962 rc = dasd_sleep_on_immediatly(cqr);
5964 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5969 } else if (cqr->lpm) {
5975 cqr->lpm = 0;
5981 dasd_sfree_request(cqr, cqr->memdev);
5991 struct dasd_ccw_req *cqr;
6003 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
6006 if (IS_ERR(cqr)) {
6009 return PTR_ERR(cqr);
6013 dasd_sfree_request(cqr, device);
6018 cqr->startdev = device;
6019 cqr->memdev = device;
6020 cqr->block = NULL;
6021 cqr->retries = 256;
6022 cqr->expires = 10 * HZ;
6025 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
6034 ccw = cqr->cpaddr;
6048 cqr->buildclk = get_tod_clock();
6049 cqr->status = DASD_CQR_FILLED;
6051 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
6052 rc = dasd_sleep_on_interruptible(cqr);
6062 dasd_sfree_request(cqr, cqr->memdev);
6250 struct dasd_ccw_req *cqr;
6254 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
6257 if (IS_ERR(cqr)) {
6260 return PTR_ERR(cqr);
6262 cqr->startdev = device;
6263 cqr->memdev = device;
6264 cqr->block = NULL;
6265 cqr->retries = 256;
6266 cqr->expires = 10 * HZ;
6269 prssdp = (struct dasd_psf_prssd_data *)cqr->data;
6276 ccw = cqr->cpaddr;
6290 cqr->buildclk = get_tod_clock();
6291 cqr->status = DASD_CQR_FILLED;
6293 rc = dasd_sleep_on_interruptible(cqr);
6303 dasd_sfree_request(cqr, cqr->memdev);
6312 struct dasd_ccw_req *cqr;
6316 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 1, device, NULL);
6317 if (IS_ERR(cqr)) {
6320 return PTR_ERR(cqr);
6322 cqr->startdev = device;
6323 cqr->memdev = device;
6324 cqr->block = NULL;
6325 cqr->retries = 1;
6326 cqr->expires = 10 * HZ;
6328 ccw = cqr->cpaddr;
6332 cqr->buildclk = get_tod_clock();
6333 cqr->status = DASD_CQR_FILLED;
6335 rc = dasd_sleep_on_interruptible(cqr);
6341 dasd_sfree_request(cqr, cqr->memdev);
6359 struct dasd_ccw_req *cqr;
6363 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
6367 if (IS_ERR(cqr)) {
6370 return PTR_ERR(cqr);
6373 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
6380 ccw = cqr->cpaddr;
6386 cqr->startdev = device;
6387 cqr->memdev = device;
6388 cqr->block = NULL;
6389 cqr->retries = 256;
6390 cqr->expires = 10*HZ;
6391 cqr->buildclk = get_tod_clock();
6392 cqr->status = DASD_CQR_FILLED;
6393 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
6395 rc = dasd_sleep_on(cqr);
6397 dasd_sfree_request(cqr, cqr->memdev);