Lines Matching refs:port

91 static void vdc_ldc_reset(struct vdc_port *port);
107 static inline int vdc_version_supported(struct vdc_port *port,
110 return port->vio.ver.major == major && port->vio.ver.minor >= minor;
145 struct vdc_port *port = bdev->bd_disk->private_data;
157 if (!vdc_version_supported(port, 1, 1))
159 switch (port->vdisk_mtype) {
179 static void vdc_blk_queue_start(struct vdc_port *port)
181 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
187 if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
188 blk_mq_start_stopped_hw_queues(port->disk->queue, true);
204 struct vdc_port *port = to_vdc_port(vio);
206 cancel_delayed_work(&port->ldc_reset_timer_work);
208 vdc_blk_queue_start(port);
211 static int vdc_handle_unknown(struct vdc_port *port, void *arg)
219 ldc_disconnect(port->vio.lp);
226 struct vdc_port *port = to_vdc_port(vio);
237 pkt.vdisk_block_size = port->vdisk_block_size;
238 pkt.max_xfer_size = port->max_xfer_size;
243 return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
248 struct vdc_port *port = to_vdc_port(vio);
270 if (pkt->vdisk_block_size > port->vdisk_block_size) {
274 port->vdisk_block_size, pkt->vdisk_block_size);
278 port->operations = pkt->operations;
279 port->vdisk_type = pkt->vdisk_type;
280 if (vdc_version_supported(port, 1, 1)) {
281 port->vdisk_size = pkt->vdisk_size;
282 port->vdisk_mtype = pkt->vdisk_mtype;
284 if (pkt->max_xfer_size < port->max_xfer_size)
285 port->max_xfer_size = pkt->max_xfer_size;
286 port->vdisk_block_size = pkt->vdisk_block_size;
288 port->vdisk_phys_blksz = VDC_DEFAULT_BLK_SIZE;
289 if (vdc_version_supported(port, 1, 2))
290 port->vdisk_phys_blksz = pkt->phys_block_size;
300 static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
304 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
307 static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
311 struct vdc_req_entry *rqe = &port->rq_arr[index];
317 ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
323 vdc_end_special(port, desc);
331 vdc_blk_queue_start(port);
334 static int vdc_ack(struct vdc_port *port, void *msgbuf)
336 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
344 vdc_end_one(port, dr, pkt->start_idx);
349 static int vdc_nack(struct vdc_port *port, void *msgbuf)
357 struct vdc_port *port = arg;
358 struct vio_driver_state *vio = &port->vio;
366 queue_work(sunvdc_wq, &port->ldc_reset_work);
406 err = vdc_ack(port, &msgbuf);
408 err = vdc_nack(port, &msgbuf);
410 err = vdc_handle_unknown(port, &msgbuf);
414 err = vdc_handle_unknown(port, &msgbuf);
420 vdc_finish(&port->vio, err, WAITING_FOR_ANY);
425 static int __vdc_tx_trigger(struct vdc_port *port)
427 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
433 .sid = vio_send_sid(&port->vio),
445 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
458 vdc_ldc_reset(port);
464 struct vdc_port *port = req->q->disk->private_data;
465 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
474 if (WARN_ON(port->ring_cookies > MAX_RING_COOKIES))
487 sg_init_table(sg, port->ring_cookies);
496 err = ldc_map_sg(port->vio.lp, sg, nsg,
497 desc->cookies, port->ring_cookies,
504 rqe = &port->rq_arr[dr->prod];
508 desc->req_id = port->req_id;
510 if (port->vdisk_type == VD_DISK_TYPE_DISK) {
516 desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
526 err = __vdc_tx_trigger(port);
530 port->req_id++;
540 struct vdc_port *port = hctx->queue->queuedata;
544 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
548 spin_lock_irqsave(&port->vio.lock, flags);
553 if (unlikely(port->drain)) {
554 spin_unlock_irqrestore(&port->vio.lock, flags);
559 spin_unlock_irqrestore(&port->vio.lock, flags);
565 spin_unlock_irqrestore(&port->vio.lock, flags);
569 spin_unlock_irqrestore(&port->vio.lock, flags);
573 static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
583 if (!(((u64)1 << (u64)op) & port->operations))
655 spin_lock_irqsave(&port->vio.lock, flags);
657 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
664 err = ldc_map_single(port->vio.lp, req_buf, op_len,
665 desc->cookies, port->ring_cookies,
668 spin_unlock_irqrestore(&port->vio.lock, flags);
675 port->vio.cmp = &comp;
678 desc->req_id = port->req_id;
692 err = __vdc_tx_trigger(port);
694 port->req_id++;
696 spin_unlock_irqrestore(&port->vio.lock, flags);
701 port->vio.cmp = NULL;
702 spin_unlock_irqrestore(&port->vio.lock, flags);
713 static int vdc_alloc_tx_ring(struct vdc_port *port)
715 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
721 (sizeof(struct ldc_trans_cookie) * port->ring_cookies);
725 dring = ldc_alloc_exp_dring(port->vio.lp, len,
743 static void vdc_free_tx_ring(struct vdc_port *port)
745 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
748 ldc_free_exp_dring(port->vio.lp, dr->base,
759 static int vdc_port_up(struct vdc_port *port)
766 port->vio.cmp = &comp;
768 vio_port_up(&port->vio);
773 static void vdc_port_down(struct vdc_port *port)
775 ldc_disconnect(port->vio.lp);
776 ldc_unbind(port->vio.lp);
777 vdc_free_tx_ring(port);
778 vio_ldc_free(&port->vio);
785 static int probe_disk(struct vdc_port *port)
791 err = vdc_port_up(port);
798 if (vdc_version_supported(port, 1, 2) && !port->vdisk_phys_blksz)
801 if (vdc_version_supported(port, 1, 1)) {
805 if (port->vdisk_size == -1)
810 err = generic_request(port, VD_OP_GET_DISKGEOM,
817 port->vdisk_size = ((u64)geom.num_cyl *
822 err = blk_mq_alloc_sq_tag_set(&port->tag_set, &vdc_mq_ops,
827 g = blk_mq_alloc_disk(&port->tag_set, port);
830 port->vio.name);
835 port->disk = g;
842 blk_queue_max_segments(q, port->ring_cookies);
843 blk_queue_max_hw_sectors(q, port->max_xfer_size);
845 g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
847 strcpy(g->disk_name, port->disk_name);
851 g->private_data = port;
853 set_capacity(g, port->vdisk_size);
855 if (vdc_version_supported(port, 1, 1)) {
856 switch (port->vdisk_mtype) {
858 pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
864 pr_info(PFX "Virtual DVD %s\n", port->disk_name);
870 pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
875 blk_queue_physical_block_size(q, port->vdisk_phys_blksz);
879 port->vdisk_size, (port->vdisk_size >> (20 - 9)),
880 port->vio.ver.major, port->vio.ver.minor);
882 err = device_add_disk(&port->vio.vdev->dev, g, NULL);
891 blk_mq_free_tag_set(&port->tag_set);
940 * by locating all the virtual-device-port nodes associated
968 struct vdc_port *port;
988 "VIO: Ignoring extra vdisk port %s",
993 port = kzalloc(sizeof(*port), GFP_KERNEL);
994 if (!port) {
1000 snprintf(port->disk_name, sizeof(port->disk_name),
1005 snprintf(port->disk_name, sizeof(port->disk_name),
1007 port->vdisk_size = -1;
1014 port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
1015 INIT_DELAYED_WORK(&port->ldc_reset_timer_work, vdc_ldc_reset_timer_work);
1016 INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
1018 err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
1020 &vdc_vio_ops, port->disk_name);
1024 port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE;
1025 port->max_xfer_size = MAX_XFER_SIZE;
1026 port->ring_cookies = MAX_RING_COOKIES;
1028 err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
1032 err = vdc_alloc_tx_ring(port);
1036 err = probe_disk(port);
1041 * whether the port has been probed.
1043 dev_set_drvdata(&vdev->dev, port);
1050 vdc_free_tx_ring(port);
1053 vio_ldc_free(&port->vio);
1056 kfree(port);
1065 struct vdc_port *port = dev_get_drvdata(&vdev->dev);
1067 if (port) {
1068 blk_mq_stop_hw_queues(port->disk->queue);
1070 flush_work(&port->ldc_reset_work);
1071 cancel_delayed_work_sync(&port->ldc_reset_timer_work);
1072 del_timer_sync(&port->vio.timer);
1074 del_gendisk(port->disk);
1075 put_disk(port->disk);
1076 blk_mq_free_tag_set(&port->tag_set);
1078 vdc_free_tx_ring(port);
1079 vio_ldc_free(&port->vio);
1083 kfree(port);
1087 static void vdc_requeue_inflight(struct vdc_port *port)
1089 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1094 struct vdc_req_entry *rqe = &port->rq_arr[idx];
1097 ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
1103 vdc_end_special(port, desc);
1112 static void vdc_queue_drain(struct vdc_port *port)
1114 struct request_queue *q = port->disk->queue;
1120 port->drain = 1;
1121 spin_unlock_irq(&port->vio.lock);
1126 spin_lock_irq(&port->vio.lock);
1127 port->drain = 0;
1134 struct vdc_port *port;
1137 port = container_of(work, struct vdc_port, ldc_reset_timer_work.work);
1138 vio = &port->vio;
1141 if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
1143 port->disk_name, port->ldc_timeout);
1144 vdc_queue_drain(port);
1145 vdc_blk_queue_start(port);
1152 struct vdc_port *port;
1156 port = container_of(work, struct vdc_port, ldc_reset_work);
1157 vio = &port->vio;
1160 vdc_ldc_reset(port);
1164 static void vdc_ldc_reset(struct vdc_port *port)
1168 assert_spin_locked(&port->vio.lock);
1170 pr_warn(PFX "%s ldc link reset\n", port->disk_name);
1171 blk_mq_stop_hw_queues(port->disk->queue);
1172 vdc_requeue_inflight(port);
1173 vdc_port_down(port);
1175 err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
1177 pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
1181 err = vdc_alloc_tx_ring(port);
1183 pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
1187 if (port->ldc_timeout)
1188 mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
1189 round_jiffies(jiffies + HZ * port->ldc_timeout));
1190 mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
1194 vio_ldc_free(&port->vio);
1199 .type = "vdc-port",