Lines Matching defs:vioch

114 static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch,
119 spin_lock_irqsave(&vioch->lock, flags);
120 cinfo->transport_info = vioch;
122 vioch->cinfo = cinfo;
123 spin_unlock_irqrestore(&vioch->lock, flags);
125 refcount_set(&vioch->users, 1);
128 static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch)
130 return refcount_inc_not_zero(&vioch->users);
133 static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch)
135 if (refcount_dec_and_test(&vioch->users)) {
138 spin_lock_irqsave(&vioch->lock, flags);
139 if (vioch->shutdown_done) {
140 vioch->cinfo = NULL;
141 complete(vioch->shutdown_done);
143 spin_unlock_irqrestore(&vioch->lock, flags);
147 static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
156 spin_lock_irqsave(&vioch->lock, flags);
157 if (!vioch->cinfo || vioch->shutdown_done) {
158 spin_unlock_irqrestore(&vioch->lock, flags);
162 vioch->shutdown_done = &vioch_shutdown_done;
163 if (!vioch->is_rx && vioch->deferred_tx_wq)
165 vioch->deferred_tx_wq = NULL;
166 spin_unlock_irqrestore(&vioch->lock, flags);
168 scmi_vio_channel_release(vioch);
171 wait_for_completion(vioch->shutdown_done);
176 scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch)
181 spin_lock_irqsave(&vioch->free_lock, flags);
182 if (list_empty(&vioch->free_list)) {
183 spin_unlock_irqrestore(&vioch->free_lock, flags);
187 msg = list_first_entry(&vioch->free_list, typeof(*msg), list);
189 spin_unlock_irqrestore(&vioch->free_lock, flags);
204 static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch,
213 spin_lock_irqsave(&vioch->free_lock, flags);
214 list_add_tail(&msg->list, &vioch->free_list);
215 spin_unlock_irqrestore(&vioch->free_lock, flags);
226 static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
232 struct device *dev = &vioch->vqueue->vdev->dev;
236 spin_lock_irqsave(&vioch->lock, flags);
238 rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
242 virtqueue_kick(vioch->vqueue);
244 spin_unlock_irqrestore(&vioch->lock, flags);
251 * vioch->lock MUST NOT have been already acquired.
253 static void scmi_finalize_message(struct scmi_vio_channel *vioch,
256 if (vioch->is_rx)
257 scmi_vio_feed_vq_rx(vioch, msg);
259 scmi_vio_msg_release(vioch, msg);
266 struct scmi_vio_channel *vioch;
272 vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];
275 if (!scmi_vio_channel_acquire(vioch))
278 spin_lock_irqsave(&vioch->lock, flags);
287 spin_unlock_irqrestore(&vioch->lock, flags);
288 scmi_vio_channel_release(vioch);
293 spin_unlock_irqrestore(&vioch->lock, flags);
297 scmi_rx_callback(vioch->cinfo,
300 scmi_finalize_message(vioch, msg);
310 scmi_vio_channel_release(vioch);
317 struct scmi_vio_channel *vioch;
320 vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work);
322 if (!scmi_vio_channel_acquire(vioch))
331 spin_lock_irqsave(&vioch->pending_lock, flags);
334 list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) {
342 scmi_rx_callback(vioch->cinfo,
346 scmi_vio_msg_release(vioch, msg);
349 spin_unlock_irqrestore(&vioch->pending_lock, flags);
352 scmi_vio_complete_cb(vioch->vqueue);
354 scmi_vio_channel_release(vioch);
366 struct scmi_vio_channel *vioch = base_cinfo->transport_info;
368 return vioch->max_msg;
390 struct scmi_vio_channel *channels, *vioch = NULL;
399 vioch = &channels[VIRTIO_SCMI_VQ_TX];
403 vioch = &channels[VIRTIO_SCMI_VQ_RX];
409 return vioch && !vioch->cinfo;
420 struct scmi_vio_channel *vioch;
427 vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index];
430 if (tx && !vioch->deferred_tx_wq) {
433 vioch->deferred_tx_wq =
437 if (!vioch->deferred_tx_wq)
441 vioch->deferred_tx_wq);
445 INIT_WORK(&vioch->deferred_tx_work,
449 for (i = 0; i < vioch->max_msg; i++) {
471 scmi_finalize_message(vioch, msg);
474 scmi_vio_channel_ready(vioch, cinfo);
482 struct scmi_vio_channel *vioch = cinfo->transport_info;
486 * the channels: doing it later holding vioch->lock creates unsafe
489 virtio_break_device(vioch->vqueue->vdev);
490 scmi_vio_channel_cleanup_sync(vioch);
498 struct scmi_vio_channel *vioch = cinfo->transport_info;
506 if (!scmi_vio_channel_acquire(vioch))
509 msg = scmi_virtio_get_free_msg(vioch);
511 scmi_vio_channel_release(vioch);
520 spin_lock_irqsave(&vioch->lock, flags);
529 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
537 rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC);
539 dev_err(vioch->cinfo->dev,
542 virtqueue_kick(vioch->vqueue);
544 spin_unlock_irqrestore(&vioch->lock, flags);
550 scmi_vio_msg_release(vioch, msg);
551 scmi_vio_msg_release(vioch, msg);
554 scmi_vio_channel_release(vioch);
616 struct scmi_vio_channel *vioch = cinfo->transport_info;
619 if (!msg || !scmi_vio_channel_acquire(vioch))
626 if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) {
627 scmi_vio_channel_release(vioch);
634 scmi_vio_msg_release(vioch, msg);
639 scmi_vio_channel_release(vioch);
686 struct scmi_vio_channel *vioch = cinfo->transport_info;
709 if (!scmi_vio_channel_acquire(vioch))
713 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
715 scmi_vio_channel_release(vioch);
719 spin_lock_irqsave(&vioch->lock, flags);
720 virtqueue_disable_cb(vioch->vqueue);
726 while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) {
760 spin_lock(&vioch->pending_lock);
762 &vioch->pending_cmds_list);
763 spin_unlock(&vioch->pending_lock);
778 pending = !virtqueue_enable_cb(vioch->vqueue);
780 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
781 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
784 if (vioch->deferred_tx_wq && (any_prefetched || pending))
785 queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work);
787 spin_unlock_irqrestore(&vioch->lock, flags);
789 scmi_vio_channel_release(vioch);