Lines Matching refs:pd
75 #define pkt_err(pd, fmt, ...) \
76 pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
77 #define pkt_notice(pd, fmt, ...) \
78 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
79 #define pkt_info(pd, fmt, ...) \
80 pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
82 #define pkt_dbg(level, pd, fmt, ...) \
86 pd->name, __func__, ##__VA_ARGS__); \
88 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
111 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
113 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
119 static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
130 p->pd = pd;
210 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
214 n = sprintf(data, "%lu\n", pd->stats.pkt_started);
217 n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
220 n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
223 n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
226 n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
229 spin_lock(&pd->lock);
230 v = pd->bio_queue_size;
231 spin_unlock(&pd->lock);
235 spin_lock(&pd->lock);
236 v = pd->write_congestion_off;
237 spin_unlock(&pd->lock);
241 spin_lock(&pd->lock);
242 v = pd->write_congestion_on;
243 spin_unlock(&pd->lock);
270 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
274 pd->stats.pkt_started = 0;
275 pd->stats.pkt_ended = 0;
276 pd->stats.secs_w = 0;
277 pd->stats.secs_rg = 0;
278 pd->stats.secs_r = 0;
282 spin_lock(&pd->lock);
283 pd->write_congestion_off = val;
284 init_write_congestion_marks(&pd->write_congestion_off,
285 &pd->write_congestion_on);
286 spin_unlock(&pd->lock);
290 spin_lock(&pd->lock);
291 pd->write_congestion_on = val;
292 init_write_congestion_marks(&pd->write_congestion_off,
293 &pd->write_congestion_on);
294 spin_unlock(&pd->lock);
314 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
317 pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
318 "%s", pd->name);
319 if (IS_ERR(pd->dev))
320 pd->dev = NULL;
322 if (pd->dev) {
323 pd->kobj_stat = pkt_kobj_create(pd, "stat",
324 &pd->dev->kobj,
326 pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
327 &pd->dev->kobj,
332 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
334 pkt_kobj_remove(pd->kobj_stat);
335 pkt_kobj_remove(pd->kobj_wqueue);
337 device_unregister(pd->dev);
360 struct pktcdvd_device *pd = pkt_devs[idx];
361 if (!pd)
364 pd->name,
365 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
366 MAJOR(pd->bdev->bd_dev),
367 MINOR(pd->bdev->bd_dev));
473 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
477 pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
478 if (!pd->dfs_d_root)
481 pd->dfs_f_info = debugfs_create_file("info", 0444,
482 pd->dfs_d_root, pd, &debug_fops);
485 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
489 debugfs_remove(pd->dfs_f_info);
490 debugfs_remove(pd->dfs_d_root);
491 pd->dfs_f_info = NULL;
492 pd->dfs_d_root = NULL;
509 static void pkt_bio_finished(struct pktcdvd_device *pd)
511 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
512 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
513 pkt_dbg(2, pd, "queue empty\n");
514 atomic_set(&pd->iosched.attention, 1);
515 wake_up(&pd->wqueue);
591 static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
595 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
597 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
600 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
603 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
607 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
610 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
612 pkt_shrink_pktlist(pd);
616 pkt->pd = pd;
617 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
631 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
633 rb_erase(&node->rb_node, &pd->bio_queue);
634 mempool_free(node, &pd->rb_pool);
635 pd->bio_queue_size--;
636 BUG_ON(pd->bio_queue_size < 0);
640 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
642 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
644 struct rb_node *n = pd->bio_queue.rb_node;
649 BUG_ON(pd->bio_queue_size > 0);
674 * Insert a node into the pd->bio_queue rb tree.
676 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
678 struct rb_node **p = &pd->bio_queue.rb_node;
692 rb_insert_color(&node->rb_node, &pd->bio_queue);
693 pd->bio_queue_size++;
700 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
702 struct request_queue *q = bdev_get_queue(pd->bdev);
725 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
748 static void pkt_dump_sense(struct pktcdvd_device *pd,
754 pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
759 pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
765 static int pkt_flush_cache(struct pktcdvd_device *pd)
780 return pkt_generic_packet(pd, &cgc);
786 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
801 ret = pkt_generic_packet(pd, &cgc);
803 pkt_dump_sense(pd, &cgc);
812 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
814 spin_lock(&pd->iosched.lock);
816 bio_list_add(&pd->iosched.read_queue, bio);
818 bio_list_add(&pd->iosched.write_queue, bio);
819 spin_unlock(&pd->iosched.lock);
821 atomic_set(&pd->iosched.attention, 1);
822 wake_up(&pd->wqueue);
841 static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
844 if (atomic_read(&pd->iosched.attention) == 0)
846 atomic_set(&pd->iosched.attention, 0);
852 spin_lock(&pd->iosched.lock);
853 reads_queued = !bio_list_empty(&pd->iosched.read_queue);
854 writes_queued = !bio_list_empty(&pd->iosched.write_queue);
855 spin_unlock(&pd->iosched.lock);
860 if (pd->iosched.writing) {
862 spin_lock(&pd->iosched.lock);
863 bio = bio_list_peek(&pd->iosched.write_queue);
864 spin_unlock(&pd->iosched.lock);
866 pd->iosched.last_write))
869 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
870 pkt_dbg(2, pd, "write, waiting\n");
873 pkt_flush_cache(pd);
874 pd->iosched.writing = 0;
878 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
879 pkt_dbg(2, pd, "read, waiting\n");
882 pd->iosched.writing = 1;
886 spin_lock(&pd->iosched.lock);
887 if (pd->iosched.writing)
888 bio = bio_list_pop(&pd->iosched.write_queue);
890 bio = bio_list_pop(&pd->iosched.read_queue);
891 spin_unlock(&pd->iosched.lock);
897 pd->iosched.successive_reads +=
900 pd->iosched.successive_reads = 0;
901 pd->iosched.last_write = bio_end_sector(bio);
903 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
904 if (pd->read_speed == pd->write_speed) {
905 pd->read_speed = MAX_SPEED;
906 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
909 if (pd->read_speed != pd->write_speed) {
910 pd->read_speed = pd->write_speed;
911 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
915 atomic_inc(&pd->cdrw.pending_bios);
924 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
926 if ((pd->settings.size << 9) / CD_FRAMESIZE
931 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
933 } else if ((pd->settings.size << 9) / PAGE_SIZE
939 set_bit(PACKET_MERGE_SEGS, &pd->flags);
942 pkt_err(pd, "cdrom max_phys_segments too small\n");
950 struct pktcdvd_device *pd = pkt->pd;
951 BUG_ON(!pd);
953 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
961 wake_up(&pd->wqueue);
963 pkt_bio_finished(pd);
969 struct pktcdvd_device *pd = pkt->pd;
970 BUG_ON(!pd);
972 pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
974 pd->stats.pkt_ended++;
976 pkt_bio_finished(pd);
979 wake_up(&pd->wqueue);
985 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1006 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1015 pkt_dbg(2, pd, "zone %llx cached\n",
1032 bio_set_dev(bio, pd->bdev);
1038 pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
1045 pkt_queue_bio(pd, bio);
1050 pkt_dbg(2, pd, "need %d frames for zone %llx\n",
1052 pd->stats.pkt_started++;
1053 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
1060 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
1064 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
1065 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
1076 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1079 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
1081 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
1092 pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
1103 static int pkt_handle_queue(struct pktcdvd_device *pd)
1112 atomic_set(&pd->scan_queue, 0);
1114 if (list_empty(&pd->cdrw.pkt_free_list)) {
1115 pkt_dbg(2, pd, "no pkt\n");
1122 spin_lock(&pd->lock);
1123 first_node = pkt_rbtree_find(pd, pd->current_sector);
1125 n = rb_first(&pd->bio_queue);
1132 zone = get_zone(bio->bi_iter.bi_sector, pd);
1133 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1143 n = rb_first(&pd->bio_queue);
1150 spin_unlock(&pd->lock);
1152 pkt_dbg(2, pd, "no bio\n");
1156 pkt = pkt_get_packet_data(pd, zone);
1158 pd->current_sector = zone + pd->settings.size;
1160 BUG_ON(pkt->frames != pd->settings.size >> 2);
1167 spin_lock(&pd->lock);
1168 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1169 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1171 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
1172 get_zone(bio->bi_iter.bi_sector, pd));
1173 if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
1175 pkt_rbtree_erase(pd, node);
1183 wakeup = (pd->write_congestion_on > 0
1184 && pd->bio_queue_size <= pd->write_congestion_off);
1185 spin_unlock(&pd->lock);
1187 clear_bdi_congested(pd->disk->queue->backing_dev_info,
1195 spin_lock(&pd->cdrw.active_list_lock);
1196 list_add(&pkt->list, &pd->cdrw.pkt_active_list);
1197 spin_unlock(&pd->cdrw.active_list_lock);
1206 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1212 bio_set_dev(pkt->w_bio, pd->bdev);
1224 pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
1235 pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
1238 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
1246 pkt_queue_bio(pd, pkt->w_bio);
1263 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1265 pkt_dbg(2, pd, "pkt %d\n", pkt->id);
1274 pkt_gather_data(pd, pkt);
1285 pkt_start_write(pd, pkt);
1301 pkt_dbg(2, pd, "No recovery possible\n");
1316 static void pkt_handle_packets(struct pktcdvd_device *pd)
1323 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1326 pkt_run_state_machine(pd, pkt);
1333 spin_lock(&pd->cdrw.active_list_lock);
1334 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1337 pkt_put_packet_data(pd, pkt);
1339 atomic_set(&pd->scan_queue, 1);
1342 spin_unlock(&pd->cdrw.active_list_lock);
1345 static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1353 spin_lock(&pd->cdrw.active_list_lock);
1354 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1357 spin_unlock(&pd->cdrw.active_list_lock);
1366 struct pktcdvd_device *pd = foobar;
1379 add_wait_queue(&pd->wqueue, &wait);
1384 if (atomic_read(&pd->scan_queue) > 0)
1388 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1394 if (atomic_read(&pd->iosched.attention) != 0)
1400 pkt_count_states(pd, states);
1401 pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1407 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1412 pkt_dbg(2, pd, "sleeping\n");
1414 pkt_dbg(2, pd, "wake up\n");
1419 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1434 remove_wait_queue(&pd->wqueue, &wait);
1443 while (pkt_handle_queue(pd))
1449 pkt_handle_packets(pd);
1454 pkt_iosched_process_queue(pd);
1460 static void pkt_print_settings(struct pktcdvd_device *pd)
1462 pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
1463 pd->settings.fp ? "Fixed" : "Variable",
1464 pd->settings.size >> 2,
1465 pd->settings.block_mode == 8 ? '1' : '2');
1468 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1477 return pkt_generic_packet(pd, cgc);
1480 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1489 return pkt_generic_packet(pd, cgc);
1492 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1503 ret = pkt_generic_packet(pd, &cgc);
1517 return pkt_generic_packet(pd, &cgc);
1520 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1533 ret = pkt_generic_packet(pd, &cgc);
1544 return pkt_generic_packet(pd, &cgc);
1547 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1555 ret = pkt_get_disc_info(pd, &di);
1560 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1567 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1586 * write mode select package based on pd->settings
1588 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1597 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1603 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1605 pkt_dump_sense(pd, &cgc);
1610 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1619 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1621 pkt_dump_sense(pd, &cgc);
1628 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1630 wp->fp = pd->settings.fp;
1631 wp->track_mode = pd->settings.track_mode;
1632 wp->write_type = pd->settings.write_type;
1633 wp->data_block_type = pd->settings.block_mode;
1656 pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
1659 wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1662 ret = pkt_mode_select(pd, &cgc);
1664 pkt_dump_sense(pd, &cgc);
1668 pkt_print_settings(pd);
1675 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1677 switch (pd->mmc3_profile) {
1701 pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1708 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1710 switch (pd->mmc3_profile) {
1719 pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
1720 pd->mmc3_profile);
1729 pkt_notice(pd, "unknown disc - no track?\n");
1734 pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
1739 pkt_notice(pd, "disc not erasable\n");
1744 pkt_err(pd, "can't write to last track (reserved)\n");
1751 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1762 ret = pkt_generic_packet(pd, &cgc);
1763 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1768 ret = pkt_get_disc_info(pd, &di);
1770 pkt_err(pd, "failed get_disc\n");
1774 if (!pkt_writable_disc(pd, &di))
1777 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1780 ret = pkt_get_track_info(pd, track, 1, &ti);
1782 pkt_err(pd, "failed get_track\n");
1786 if (!pkt_writable_track(pd, &ti)) {
1787 pkt_err(pd, "can't write to this track\n");
1795 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1796 if (pd->settings.size == 0) {
1797 pkt_notice(pd, "detected zero packet size!\n");
1800 if (pd->settings.size > PACKET_MAX_SECTORS) {
1801 pkt_err(pd, "packet size is too big\n");
1804 pd->settings.fp = ti.fp;
1805 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1808 pd->nwa = be32_to_cpu(ti.next_writable);
1809 set_bit(PACKET_NWA_VALID, &pd->flags);
1818 pd->lra = be32_to_cpu(ti.last_rec_address);
1819 set_bit(PACKET_LRA_VALID, &pd->flags);
1821 pd->lra = 0xffffffff;
1822 set_bit(PACKET_LRA_VALID, &pd->flags);
1828 pd->settings.link_loss = 7;
1829 pd->settings.write_type = 0; /* packet */
1830 pd->settings.track_mode = ti.track_mode;
1837 pd->settings.block_mode = PACKET_BLOCK_MODE1;
1840 pd->settings.block_mode = PACKET_BLOCK_MODE2;
1843 pkt_err(pd, "unknown data mode\n");
1852 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
1862 cgc.buflen = pd->mode_offset + 12;
1869 ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
1873 buf[pd->mode_offset + 10] |= (!!set << 2);
1876 ret = pkt_mode_select(pd, &cgc);
1878 pkt_err(pd, "write caching control failed\n");
1879 pkt_dump_sense(pd, &cgc);
1881 pkt_notice(pd, "enabled write caching\n");
1885 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1892 return pkt_generic_packet(pd, &cgc);
1898 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
1907 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1911 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1913 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
1915 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1917 pkt_dump_sense(pd, &cgc);
1959 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
1974 ret = pkt_generic_packet(pd, &cgc);
1976 pkt_dump_sense(pd, &cgc);
1989 ret = pkt_generic_packet(pd, &cgc);
1991 pkt_dump_sense(pd, &cgc);
1996 pkt_notice(pd, "disc type is not CD-RW\n");
2000 pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
2020 pkt_notice(pd, "unknown disc sub-type %d\n", st);
2024 pkt_info(pd, "maximum media speed: %d\n", *speed);
2027 pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
2032 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
2038 pkt_dbg(2, pd, "Performing OPC\n");
2045 ret = pkt_generic_packet(pd, &cgc);
2047 pkt_dump_sense(pd, &cgc);
2051 static int pkt_open_write(struct pktcdvd_device *pd)
2056 ret = pkt_probe_settings(pd);
2058 pkt_dbg(2, pd, "failed probe\n");
2062 ret = pkt_set_write_settings(pd);
2064 pkt_dbg(1, pd, "failed saving write settings\n");
2068 pkt_write_caching(pd, USE_WCACHING);
2070 ret = pkt_get_max_speed(pd, &write_speed);
2073 switch (pd->mmc3_profile) {
2077 pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
2080 ret = pkt_media_speed(pd, &media_write_speed);
2084 pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
2089 ret = pkt_set_speed(pd, write_speed, read_speed);
2091 pkt_dbg(1, pd, "couldn't set write speed\n");
2094 pd->write_speed = write_speed;
2095 pd->read_speed = read_speed;
2097 ret = pkt_perform_opc(pd);
2099 pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
2108 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
2120 bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd);
2126 ret = pkt_get_last_written(pd, &lba);
2128 pkt_err(pd, "pkt_get_last_written failed\n");
2132 set_capacity(pd->disk, lba << 2);
2133 set_capacity(pd->bdev->bd_disk, lba << 2);
2134 bd_set_nr_sectors(pd->bdev, lba << 2);
2136 q = bdev_get_queue(pd->bdev);
2138 ret = pkt_open_write(pd);
2145 blk_queue_max_hw_sectors(q, pd->settings.size);
2146 set_bit(PACKET_WRITABLE, &pd->flags);
2148 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2149 clear_bit(PACKET_WRITABLE, &pd->flags);
2152 ret = pkt_set_segment_merging(pd, q);
2157 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2158 pkt_err(pd, "not enough memory for buffers\n");
2162 pkt_info(pd, "%lukB available on disc\n", lba << 1);
2177 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2179 if (flush && pkt_flush_cache(pd))
2180 pkt_dbg(1, pd, "not flushing cache\n");
2182 pkt_lock_door(pd, 0);
2184 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2185 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
2187 pkt_shrink_pktlist(pd);
2201 struct pktcdvd_device *pd = NULL;
2206 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
2207 if (!pd) {
2211 BUG_ON(pd->refcnt < 0);
2213 pd->refcnt++;
2214 if (pd->refcnt > 1) {
2216 !test_bit(PACKET_WRITABLE, &pd->flags)) {
2221 ret = pkt_open_dev(pd, mode & FMODE_WRITE);
2236 pd->refcnt--;
2245 struct pktcdvd_device *pd = disk->private_data;
2249 pd->refcnt--;
2250 BUG_ON(pd->refcnt < 0);
2251 if (pd->refcnt == 0) {
2252 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2253 pkt_release_dev(pd, flush);
2263 struct pktcdvd_device *pd = psd->pd;
2269 pkt_bio_finished(pd);
2272 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
2277 psd->pd = pd;
2279 bio_set_dev(cloned_bio, pd->bdev);
2282 pd->stats.secs_r += bio_sectors(bio);
2283 pkt_queue_bio(pd, cloned_bio);
2288 struct pktcdvd_device *pd = q->queuedata;
2294 zone = get_zone(bio->bi_iter.bi_sector, pd);
2300 spin_lock(&pd->cdrw.active_list_lock);
2302 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2313 wake_up(&pd->wqueue);
2316 spin_unlock(&pd->cdrw.active_list_lock);
2324 spin_unlock(&pd->cdrw.active_list_lock);
2331 spin_lock(&pd->lock);
2332 if (pd->write_congestion_on > 0
2333 && pd->bio_queue_size >= pd->write_congestion_on) {
2336 spin_unlock(&pd->lock);
2338 spin_lock(&pd->lock);
2339 } while(pd->bio_queue_size > pd->write_congestion_off);
2341 spin_unlock(&pd->lock);
2346 node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
2348 spin_lock(&pd->lock);
2349 BUG_ON(pd->bio_queue_size < 0);
2350 was_empty = (pd->bio_queue_size == 0);
2351 pkt_rbtree_insert(pd, node);
2352 spin_unlock(&pd->lock);
2357 atomic_set(&pd->scan_queue, 1);
2360 wake_up(&pd->wqueue);
2361 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2366 wake_up(&pd->wqueue);
2372 struct pktcdvd_device *pd;
2378 pd = bio->bi_disk->queue->queuedata;
2379 if (!pd) {
2384 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2392 pkt_make_request_read(pd, bio);
2396 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2397 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2403 pkt_err(pd, "wrong bio size\n");
2408 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
2409 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2412 BUG_ON(last_zone != zone + pd->settings.size);
2431 static void pkt_init_queue(struct pktcdvd_device *pd)
2433 struct request_queue *q = pd->disk->queue;
2437 q->queuedata = pd;
2442 struct pktcdvd_device *pd = m->private;
2447 seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
2448 bdevname(pd->bdev, bdev_buf));
2451 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2453 if (pd->settings.write_type == 0)
2459 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2460 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2462 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2464 if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2466 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2473 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2474 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2475 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2476 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2477 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2480 seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2481 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2482 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2483 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2484 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2485 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2488 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2489 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2490 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2492 pkt_count_states(pd, states);
2497 pd->write_congestion_off,
2498 pd->write_congestion_on);
2502 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2508 if (pd->pkt_dev == dev) {
2509 pkt_err(pd, "recursive setup not allowed\n");
2517 pkt_err(pd, "%s already setup\n",
2522 pkt_err(pd, "can't chain pktcdvd devices\n");
2538 pd->bdev = bdev;
2541 pkt_init_queue(pd);
2543 atomic_set(&pd->cdrw.pending_bios, 0);
2544 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2545 if (IS_ERR(pd->cdrw.thread)) {
2546 pkt_err(pd, "can't start kernel thread\n");
2550 proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd);
2551 pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
2563 struct pktcdvd_device *pd = bdev->bd_disk->private_data;
2566 pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
2576 if (pd->refcnt == 1)
2577 pkt_lock_door(pd, 0);
2587 ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
2591 pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
2602 struct pktcdvd_device *pd = disk->private_data;
2605 if (!pd)
2607 if (!pd->bdev)
2609 attached_disk = pd->bdev->bd_disk;
2638 struct pktcdvd_device *pd;
2652 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2653 if (!pd)
2656 ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
2661 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2662 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2663 spin_lock_init(&pd->cdrw.active_list_lock);
2665 spin_lock_init(&pd->lock);
2666 spin_lock_init(&pd->iosched.lock);
2667 bio_list_init(&pd->iosched.read_queue);
2668 bio_list_init(&pd->iosched.write_queue);
2669 sprintf(pd->name, DRIVER_NAME"%d", idx);
2670 init_waitqueue_head(&pd->wqueue);
2671 pd->bio_queue = RB_ROOT;
2673 pd->write_congestion_on = write_congestion_on;
2674 pd->write_congestion_off = write_congestion_off;
2680 pd->disk = disk;
2685 strcpy(disk->disk_name, pd->name);
2686 disk->private_data = pd;
2691 pd->pkt_dev = MKDEV(pktdev_major, idx);
2692 ret = pkt_new_dev(pd, dev);
2697 disk->events = pd->bdev->bd_disk->events;
2701 pkt_sysfs_dev_new(pd);
2702 pkt_debugfs_dev_new(pd);
2704 pkt_devs[idx] = pd;
2706 *pkt_dev = pd->pkt_dev;
2714 mempool_exit(&pd->rb_pool);
2715 kfree(pd);
2727 struct pktcdvd_device *pd;
2734 pd = pkt_devs[idx];
2735 if (pd && (pd->pkt_dev == pkt_dev))
2744 if (pd->refcnt > 0) {
2748 if (!IS_ERR(pd->cdrw.thread))
2749 kthread_stop(pd->cdrw.thread);
2753 pkt_debugfs_dev_remove(pd);
2754 pkt_sysfs_dev_remove(pd);
2756 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
2758 remove_proc_entry(pd->name, pkt_proc);
2759 pkt_dbg(1, pd, "writer unmapped\n");
2761 del_gendisk(pd->disk);
2762 blk_cleanup_queue(pd->disk->queue);
2763 put_disk(pd->disk);
2765 mempool_exit(&pd->rb_pool);
2766 kfree(pd);
2778 struct pktcdvd_device *pd;
2782 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2783 if (pd) {
2784 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2785 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);