Lines Matching refs:pd
98 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
100 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
122 struct pktcdvd_device *pd = dev_get_drvdata(dev);
124 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
131 struct pktcdvd_device *pd = dev_get_drvdata(dev);
133 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
140 struct pktcdvd_device *pd = dev_get_drvdata(dev);
142 return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
149 struct pktcdvd_device *pd = dev_get_drvdata(dev);
151 return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
158 struct pktcdvd_device *pd = dev_get_drvdata(dev);
160 return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
167 struct pktcdvd_device *pd = dev_get_drvdata(dev);
170 pd->stats.pkt_started = 0;
171 pd->stats.pkt_ended = 0;
172 pd->stats.secs_w = 0;
173 pd->stats.secs_rg = 0;
174 pd->stats.secs_r = 0;
198 struct pktcdvd_device *pd = dev_get_drvdata(dev);
201 spin_lock(&pd->lock);
202 n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
203 spin_unlock(&pd->lock);
228 struct pktcdvd_device *pd = dev_get_drvdata(dev);
231 spin_lock(&pd->lock);
232 n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
233 spin_unlock(&pd->lock);
241 struct pktcdvd_device *pd = dev_get_drvdata(dev);
248 spin_lock(&pd->lock);
249 pd->write_congestion_off = val;
250 init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
251 spin_unlock(&pd->lock);
259 struct pktcdvd_device *pd = dev_get_drvdata(dev);
262 spin_lock(&pd->lock);
263 n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
264 spin_unlock(&pd->lock);
272 struct pktcdvd_device *pd = dev_get_drvdata(dev);
279 spin_lock(&pd->lock);
280 pd->write_congestion_on = val;
281 init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
282 spin_unlock(&pd->lock);
305 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
308 pd->dev = device_create_with_groups(&class_pktcdvd, NULL,
309 MKDEV(0, 0), pd, pkt_groups,
310 "%s", pd->disk->disk_name);
311 if (IS_ERR(pd->dev))
312 pd->dev = NULL;
316 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
319 device_unregister(pd->dev);
337 struct pktcdvd_device *pd = pkt_devs[idx];
338 if (!pd)
341 pd->disk->disk_name,
342 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
343 MAJOR(pd->bdev->bd_dev),
344 MINOR(pd->bdev->bd_dev));
419 static void pkt_count_states(struct pktcdvd_device *pd, int *states)
427 spin_lock(&pd->cdrw.active_list_lock);
428 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
431 spin_unlock(&pd->cdrw.active_list_lock);
436 struct pktcdvd_device *pd = m->private;
440 seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name, pd->bdev);
443 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
445 if (pd->settings.write_type == 0)
451 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
452 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
454 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
456 if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
458 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
465 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
466 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
467 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
468 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
469 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
472 seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
473 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
474 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
475 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
476 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
477 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
480 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
481 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
482 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", pd->current_sector);
484 pkt_count_states(pd, states);
489 pd->write_congestion_off,
490 pd->write_congestion_on);
495 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
499 pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root);
500 if (!pd->dfs_d_root)
503 pd->dfs_f_info = debugfs_create_file("info", 0444, pd->dfs_d_root,
504 pd, &pkt_seq_fops);
507 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
511 debugfs_remove(pd->dfs_f_info);
512 debugfs_remove(pd->dfs_d_root);
513 pd->dfs_f_info = NULL;
514 pd->dfs_d_root = NULL;
531 static void pkt_bio_finished(struct pktcdvd_device *pd)
533 struct device *ddev = disk_to_dev(pd->disk);
535 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
536 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
538 atomic_set(&pd->iosched.attention, 1);
539 wake_up(&pd->wqueue);
606 static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
610 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
612 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
615 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
618 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
622 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
625 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
627 pkt_shrink_pktlist(pd);
631 pkt->pd = pd;
632 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
646 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
648 rb_erase(&node->rb_node, &pd->bio_queue);
649 mempool_free(node, &pd->rb_pool);
650 pd->bio_queue_size--;
651 BUG_ON(pd->bio_queue_size < 0);
655 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
657 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
659 struct rb_node *n = pd->bio_queue.rb_node;
664 BUG_ON(pd->bio_queue_size > 0);
689 * Insert a node into the pd->bio_queue rb tree.
691 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
693 struct rb_node **p = &pd->bio_queue.rb_node;
707 rb_insert_color(&node->rb_node, &pd->bio_queue);
708 pd->bio_queue_size++;
715 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
717 struct request_queue *q = bdev_get_queue(pd->bdev);
765 static void pkt_dump_sense(struct pktcdvd_device *pd,
768 struct device *ddev = disk_to_dev(pd->disk);
783 static int pkt_flush_cache(struct pktcdvd_device *pd)
798 return pkt_generic_packet(pd, &cgc);
804 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
817 ret = pkt_generic_packet(pd, &cgc);
819 pkt_dump_sense(pd, &cgc);
828 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
830 spin_lock(&pd->iosched.lock);
832 bio_list_add(&pd->iosched.read_queue, bio);
834 bio_list_add(&pd->iosched.write_queue, bio);
835 spin_unlock(&pd->iosched.lock);
837 atomic_set(&pd->iosched.attention, 1);
838 wake_up(&pd->wqueue);
857 static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
859 struct device *ddev = disk_to_dev(pd->disk);
861 if (atomic_read(&pd->iosched.attention) == 0)
863 atomic_set(&pd->iosched.attention, 0);
869 spin_lock(&pd->iosched.lock);
870 reads_queued = !bio_list_empty(&pd->iosched.read_queue);
871 writes_queued = !bio_list_empty(&pd->iosched.write_queue);
872 spin_unlock(&pd->iosched.lock);
877 if (pd->iosched.writing) {
879 spin_lock(&pd->iosched.lock);
880 bio = bio_list_peek(&pd->iosched.write_queue);
881 spin_unlock(&pd->iosched.lock);
883 pd->iosched.last_write))
886 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
890 pkt_flush_cache(pd);
891 pd->iosched.writing = 0;
895 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
899 pd->iosched.writing = 1;
903 spin_lock(&pd->iosched.lock);
904 if (pd->iosched.writing)
905 bio = bio_list_pop(&pd->iosched.write_queue);
907 bio = bio_list_pop(&pd->iosched.read_queue);
908 spin_unlock(&pd->iosched.lock);
914 pd->iosched.successive_reads +=
917 pd->iosched.successive_reads = 0;
918 pd->iosched.last_write = bio_end_sector(bio);
920 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
921 if (pd->read_speed == pd->write_speed) {
922 pd->read_speed = MAX_SPEED;
923 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
926 if (pd->read_speed != pd->write_speed) {
927 pd->read_speed = pd->write_speed;
928 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
932 atomic_inc(&pd->cdrw.pending_bios);
941 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
943 struct device *ddev = disk_to_dev(pd->disk);
945 if ((pd->settings.size << 9) / CD_FRAMESIZE <= queue_max_segments(q)) {
949 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
953 if ((pd->settings.size << 9) / PAGE_SIZE <= queue_max_segments(q)) {
958 set_bit(PACKET_MERGE_SEGS, &pd->flags);
969 struct pktcdvd_device *pd = pkt->pd;
970 BUG_ON(!pd);
972 dev_dbg(disk_to_dev(pd->disk), "bio=%p sec0=%llx sec=%llx err=%d\n",
980 wake_up(&pd->wqueue);
982 pkt_bio_finished(pd);
988 struct pktcdvd_device *pd = pkt->pd;
989 BUG_ON(!pd);
991 dev_dbg(disk_to_dev(pd->disk), "id=%d, err=%d\n", pkt->id, bio->bi_status);
993 pd->stats.pkt_ended++;
996 pkt_bio_finished(pd);
999 wake_up(&pd->wqueue);
1005 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1007 struct device *ddev = disk_to_dev(pd->disk);
1027 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1050 bio_init(bio, pd->bdev, bio->bi_inline_vecs, 1, REQ_OP_READ);
1063 pkt_queue_bio(pd, bio);
1069 pd->stats.pkt_started++;
1070 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
1077 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
1081 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
1082 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
1093 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1096 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
1098 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
1120 static int pkt_handle_queue(struct pktcdvd_device *pd)
1122 struct device *ddev = disk_to_dev(pd->disk);
1129 atomic_set(&pd->scan_queue, 0);
1131 if (list_empty(&pd->cdrw.pkt_free_list)) {
1139 spin_lock(&pd->lock);
1140 first_node = pkt_rbtree_find(pd, pd->current_sector);
1142 n = rb_first(&pd->bio_queue);
1149 zone = get_zone(bio->bi_iter.bi_sector, pd);
1150 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1160 n = rb_first(&pd->bio_queue);
1167 spin_unlock(&pd->lock);
1173 pkt = pkt_get_packet_data(pd, zone);
1175 pd->current_sector = zone + pd->settings.size;
1177 BUG_ON(pkt->frames != pd->settings.size >> 2);
1184 spin_lock(&pd->lock);
1186 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1187 sector_t tmp = get_zone(node->bio->bi_iter.bi_sector, pd);
1193 pkt_rbtree_erase(pd, node);
1202 if (pd->congested &&
1203 pd->bio_queue_size <= pd->write_congestion_off) {
1204 pd->congested = false;
1205 wake_up_var(&pd->congested);
1207 spin_unlock(&pd->lock);
1213 spin_lock(&pd->cdrw.active_list_lock);
1214 list_add(&pkt->list, &pd->cdrw.pkt_active_list);
1215 spin_unlock(&pd->cdrw.active_list_lock);
1260 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1262 struct device *ddev = disk_to_dev(pd->disk);
1265 bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames,
1292 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
1299 pkt_queue_bio(pd, pkt->w_bio);
1316 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1318 struct device *ddev = disk_to_dev(pd->disk);
1329 pkt_gather_data(pd, pkt);
1340 pkt_start_write(pd, pkt);
1371 static void pkt_handle_packets(struct pktcdvd_device *pd)
1373 struct device *ddev = disk_to_dev(pd->disk);
1379 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1382 pkt_run_state_machine(pd, pkt);
1389 spin_lock(&pd->cdrw.active_list_lock);
1390 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1393 pkt_put_packet_data(pd, pkt);
1395 atomic_set(&pd->scan_queue, 1);
1398 spin_unlock(&pd->cdrw.active_list_lock);
1407 struct pktcdvd_device *pd = foobar;
1408 struct device *ddev = disk_to_dev(pd->disk);
1422 add_wait_queue(&pd->wqueue, &wait);
1427 if (atomic_read(&pd->scan_queue) > 0)
1431 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1437 if (atomic_read(&pd->iosched.attention) != 0)
1441 pkt_count_states(pd, states);
1446 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1458 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1473 remove_wait_queue(&pd->wqueue, &wait);
1482 while (pkt_handle_queue(pd))
1488 pkt_handle_packets(pd);
1493 pkt_iosched_process_queue(pd);
1499 static void pkt_print_settings(struct pktcdvd_device *pd)
1501 dev_info(disk_to_dev(pd->disk), "%s packets, %u blocks, Mode-%c disc\n",
1502 pd->settings.fp ? "Fixed" : "Variable",
1503 pd->settings.size >> 2,
1504 pd->settings.block_mode == 8 ? '1' : '2');
1507 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1515 return pkt_generic_packet(pd, cgc);
1518 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1526 return pkt_generic_packet(pd, cgc);
1529 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1540 ret = pkt_generic_packet(pd, &cgc);
1554 return pkt_generic_packet(pd, &cgc);
1557 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1569 ret = pkt_generic_packet(pd, &cgc);
1580 return pkt_generic_packet(pd, &cgc);
1583 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1591 ret = pkt_get_disc_info(pd, &di);
1596 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1603 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1622 * write mode select package based on pd->settings
1624 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1626 struct device *ddev = disk_to_dev(pd->disk);
1634 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1640 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1642 pkt_dump_sense(pd, &cgc);
1647 pd->mode_offset = get_unaligned_be16(&buffer[6]);
1656 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1658 pkt_dump_sense(pd, &cgc);
1665 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1667 wp->fp = pd->settings.fp;
1668 wp->track_mode = pd->settings.track_mode;
1669 wp->write_type = pd->settings.write_type;
1670 wp->data_block_type = pd->settings.block_mode;
1696 wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1699 ret = pkt_mode_select(pd, &cgc);
1701 pkt_dump_sense(pd, &cgc);
1705 pkt_print_settings(pd);
1712 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1714 struct device *ddev = disk_to_dev(pd->disk);
1716 switch (pd->mmc3_profile) {
1747 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1749 struct device *ddev = disk_to_dev(pd->disk);
1751 switch (pd->mmc3_profile) {
1760 dev_dbg(ddev, "Wrong disc profile (%x)\n", pd->mmc3_profile);
1791 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1793 struct device *ddev = disk_to_dev(pd->disk);
1803 ret = pkt_generic_packet(pd, &cgc);
1804 pd->mmc3_profile = ret ? 0xffff : get_unaligned_be16(&buf[6]);
1809 ret = pkt_get_disc_info(pd, &di);
1815 if (!pkt_writable_disc(pd, &di))
1818 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1821 ret = pkt_get_track_info(pd, track, 1, &ti);
1827 if (!pkt_writable_track(pd, &ti)) {
1836 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1837 if (pd->settings.size == 0) {
1841 if (pd->settings.size > PACKET_MAX_SECTORS) {
1845 pd->settings.fp = ti.fp;
1846 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1849 pd->nwa = be32_to_cpu(ti.next_writable);
1850 set_bit(PACKET_NWA_VALID, &pd->flags);
1859 pd->lra = be32_to_cpu(ti.last_rec_address);
1860 set_bit(PACKET_LRA_VALID, &pd->flags);
1862 pd->lra = 0xffffffff;
1863 set_bit(PACKET_LRA_VALID, &pd->flags);
1869 pd->settings.link_loss = 7;
1870 pd->settings.write_type = 0; /* packet */
1871 pd->settings.track_mode = ti.track_mode;
1878 pd->settings.block_mode = PACKET_BLOCK_MODE1;
1881 pd->settings.block_mode = PACKET_BLOCK_MODE2;
1893 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd)
1895 struct device *ddev = disk_to_dev(pd->disk);
1904 cgc.buflen = pd->mode_offset + 12;
1911 ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
1920 buf[pd->mode_offset + 10] |= (set << 2);
1923 ret = pkt_mode_select(pd, &cgc);
1926 pkt_dump_sense(pd, &cgc);
1932 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1939 return pkt_generic_packet(pd, &cgc);
1945 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
1954 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1958 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1960 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
1962 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1964 pkt_dump_sense(pd, &cgc);
2006 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
2009 struct device *ddev = disk_to_dev(pd->disk);
2022 ret = pkt_generic_packet(pd, &cgc);
2024 pkt_dump_sense(pd, &cgc);
2037 ret = pkt_generic_packet(pd, &cgc);
2039 pkt_dump_sense(pd, &cgc);
2080 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
2082 struct device *ddev = disk_to_dev(pd->disk);
2094 ret = pkt_generic_packet(pd, &cgc);
2096 pkt_dump_sense(pd, &cgc);
2100 static int pkt_open_write(struct pktcdvd_device *pd)
2102 struct device *ddev = disk_to_dev(pd->disk);
2106 ret = pkt_probe_settings(pd);
2112 ret = pkt_set_write_settings(pd);
2118 pkt_write_caching(pd);
2120 ret = pkt_get_max_speed(pd, &write_speed);
2123 switch (pd->mmc3_profile) {
2130 ret = pkt_media_speed(pd, &media_write_speed);
2139 ret = pkt_set_speed(pd, write_speed, read_speed);
2144 pd->write_speed = write_speed;
2145 pd->read_speed = read_speed;
2147 ret = pkt_perform_opc(pd);
2157 static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
2159 struct device *ddev = disk_to_dev(pd->disk);
2170 bdev = blkdev_get_by_dev(pd->bdev->bd_dev, BLK_OPEN_READ, pd, NULL);
2176 ret = pkt_get_last_written(pd, &lba);
2182 set_capacity(pd->disk, lba << 2);
2183 set_capacity_and_notify(pd->bdev->bd_disk, lba << 2);
2185 q = bdev_get_queue(pd->bdev);
2187 ret = pkt_open_write(pd);
2194 blk_queue_max_hw_sectors(q, pd->settings.size);
2195 set_bit(PACKET_WRITABLE, &pd->flags);
2197 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2198 clear_bit(PACKET_WRITABLE, &pd->flags);
2201 ret = pkt_set_segment_merging(pd, q);
2206 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2217 blkdev_put(bdev, pd);
2226 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2228 struct device *ddev = disk_to_dev(pd->disk);
2230 if (flush && pkt_flush_cache(pd))
2233 pkt_lock_door(pd, 0);
2235 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2236 blkdev_put(pd->bdev, pd);
2238 pkt_shrink_pktlist(pd);
2252 struct pktcdvd_device *pd = NULL;
2257 pd = pkt_find_dev_from_minor(disk->first_minor);
2258 if (!pd) {
2262 BUG_ON(pd->refcnt < 0);
2264 pd->refcnt++;
2265 if (pd->refcnt > 1) {
2267 !test_bit(PACKET_WRITABLE, &pd->flags)) {
2272 ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE);
2286 pd->refcnt--;
2295 struct pktcdvd_device *pd = disk->private_data;
2299 pd->refcnt--;
2300 BUG_ON(pd->refcnt < 0);
2301 if (pd->refcnt == 0) {
2302 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2303 pkt_release_dev(pd, flush);
2313 struct pktcdvd_device *pd = psd->pd;
2319 pkt_bio_finished(pd);
2322 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
2325 bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
2328 psd->pd = pd;
2332 pd->stats.secs_r += bio_sectors(bio);
2333 pkt_queue_bio(pd, cloned_bio);
2338 struct pktcdvd_device *pd = q->queuedata;
2344 zone = get_zone(bio->bi_iter.bi_sector, pd);
2350 spin_lock(&pd->cdrw.active_list_lock);
2352 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2363 wake_up(&pd->wqueue);
2366 spin_unlock(&pd->cdrw.active_list_lock);
2374 spin_unlock(&pd->cdrw.active_list_lock);
2381 spin_lock(&pd->lock);
2382 if (pd->write_congestion_on > 0
2383 && pd->bio_queue_size >= pd->write_congestion_on) {
2386 init_wait_var_entry(&wqe, &pd->congested, 0);
2388 prepare_to_wait_event(__var_waitqueue(&pd->congested),
2391 if (pd->bio_queue_size <= pd->write_congestion_off)
2393 pd->congested = true;
2394 spin_unlock(&pd->lock);
2396 spin_lock(&pd->lock);
2399 spin_unlock(&pd->lock);
2404 node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
2406 spin_lock(&pd->lock);
2407 BUG_ON(pd->bio_queue_size < 0);
2408 was_empty = (pd->bio_queue_size == 0);
2409 pkt_rbtree_insert(pd, node);
2410 spin_unlock(&pd->lock);
2415 atomic_set(&pd->scan_queue, 1);
2418 wake_up(&pd->wqueue);
2419 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2424 wake_up(&pd->wqueue);
2430 struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
2431 struct device *ddev = disk_to_dev(pd->disk);
2445 pkt_make_request_read(pd, bio);
2449 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2460 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
2461 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2464 BUG_ON(last_zone != zone + pd->settings.size);
2482 static void pkt_init_queue(struct pktcdvd_device *pd)
2484 struct request_queue *q = pd->disk->queue;
2488 q->queuedata = pd;
2491 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2493 struct device *ddev = disk_to_dev(pd->disk);
2498 if (pd->pkt_dev == dev) {
2530 pd->bdev = bdev;
2533 pkt_init_queue(pd);
2535 atomic_set(&pd->cdrw.pending_bios, 0);
2536 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name);
2537 if (IS_ERR(pd->cdrw.thread)) {
2542 proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd);
2556 struct pktcdvd_device *pd = bdev->bd_disk->private_data;
2557 struct device *ddev = disk_to_dev(pd->disk);
2569 if (pd->refcnt == 1)
2570 pkt_lock_door(pd, 0);
2597 struct pktcdvd_device *pd = disk->private_data;
2600 if (!pd)
2602 if (!pd->bdev)
2604 attached_disk = pd->bdev->bd_disk;
2633 struct pktcdvd_device *pd;
2647 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2648 if (!pd)
2651 ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
2656 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2657 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2658 spin_lock_init(&pd->cdrw.active_list_lock);
2660 spin_lock_init(&pd->lock);
2661 spin_lock_init(&pd->iosched.lock);
2662 bio_list_init(&pd->iosched.read_queue);
2663 bio_list_init(&pd->iosched.write_queue);
2664 init_waitqueue_head(&pd->wqueue);
2665 pd->bio_queue = RB_ROOT;
2667 pd->write_congestion_on = write_congestion_on;
2668 pd->write_congestion_off = write_congestion_off;
2674 pd->disk = disk;
2681 disk->private_data = pd;
2683 pd->pkt_dev = MKDEV(pktdev_major, idx);
2684 ret = pkt_new_dev(pd, dev);
2689 disk->events = pd->bdev->bd_disk->events;
2695 pkt_sysfs_dev_new(pd);
2696 pkt_debugfs_dev_new(pd);
2698 pkt_devs[idx] = pd;
2700 *pkt_dev = pd->pkt_dev;
2708 mempool_exit(&pd->rb_pool);
2709 kfree(pd);
2721 struct pktcdvd_device *pd;
2729 pd = pkt_devs[idx];
2730 if (pd && (pd->pkt_dev == pkt_dev))
2739 if (pd->refcnt > 0) {
2744 ddev = disk_to_dev(pd->disk);
2746 if (!IS_ERR(pd->cdrw.thread))
2747 kthread_stop(pd->cdrw.thread);
2751 pkt_debugfs_dev_remove(pd);
2752 pkt_sysfs_dev_remove(pd);
2754 blkdev_put(pd->bdev, NULL);
2756 remove_proc_entry(pd->disk->disk_name, pkt_proc);
2759 del_gendisk(pd->disk);
2760 put_disk(pd->disk);
2762 mempool_exit(&pd->rb_pool);
2763 kfree(pd);
2775 struct pktcdvd_device *pd;
2779 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2780 if (pd) {
2781 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2782 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);