Lines Matching refs:xfer
306 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
309 * @xfer: The xfer to act upon
312 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
314 * of incorrect association of a late and expired xfer with a live in-flight
365 struct scmi_xfer *xfer)
377 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
403 xfer->hdr.seq = (u16)xfer_id;
412 * @xfer: The xfer to act upon
415 struct scmi_xfer *xfer)
417 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
421 * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
423 * @xfer: The xfer to register
426 * Note that this helper assumes that the xfer to be registered as in-flight
427 * had been built using an xfer sequence number which still corresponds to a
433 scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
437 set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
438 hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
439 xfer->pending = true;
443 * scmi_xfer_inflight_register - Try to register an xfer as in-flight
445 * @xfer: The xfer to register
449 * that was baked into the provided xfer, so it checks at first if it can
450 * be mapped to a free slot and fails with an error if another xfer with the
453 * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
456 static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
463 if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
464 scmi_xfer_inflight_register_unlocked(xfer, minfo);
473 * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
477 * @xfer: The xfer to register
482 struct scmi_xfer *xfer)
486 return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
490 * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
493 * @xfer: The xfer to act upon
498 static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
505 /* Set a new monotonic token as the xfer sequence number */
506 ret = scmi_xfer_token_set(minfo, xfer);
508 scmi_xfer_inflight_register_unlocked(xfer, minfo);
523 * Picks an xfer from the free list @free_xfers (if any available) and perform
527 * allocated xfer, nor it is registered as a pending transaction.
529 * The successfully initialized xfer is refcounted.
533 * Return: An initialized xfer if all went fine, else pointer error.
539 struct scmi_xfer *xfer;
547 /* grab an xfer from the free_list */
548 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
549 hlist_del_init(&xfer->node);
555 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
557 refcount_set(&xfer->users, 1);
558 atomic_set(&xfer->busy, SCMI_XFER_FREE);
561 return xfer;
565 * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
569 * Note that xfer is taken from the TX channel structures.
571 * Return: A valid xfer on Success, or an error-pointer otherwise
575 struct scmi_xfer *xfer;
578 xfer = scmi_xfer_get(handle, &info->tx_minfo);
579 if (!IS_ERR(xfer))
580 xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
582 return xfer;
625 * @xfer: message that was reserved by scmi_xfer_get
627 * After refcount check, possibly release an xfer, clearing the token slot,
628 * removing xfer from @pending_xfers and putting it back into free_xfers.
633 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
638 if (refcount_dec_and_test(&xfer->users)) {
639 if (xfer->pending) {
640 scmi_xfer_token_clear(minfo, xfer);
641 hash_del(&xfer->node);
642 xfer->pending = false;
644 hlist_add_head(&xfer->node, &minfo->free_xfers);
650 * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
653 * @xfer: A reference to the xfer to put
655 * Note that as with other xfer_put() handlers the xfer is really effectively
658 void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
662 xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
663 xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
664 return __scmi_xfer_put(&info->tx_minfo, xfer);
677 * Return: A valid xfer on Success or error otherwise
682 struct scmi_xfer *xfer = NULL;
685 xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
687 return xfer ?: ERR_PTR(-EINVAL);
692 * xfer
696 * @xfer: A reference to the xfer to validate against @msg_type
699 * a pending @xfer; if an asynchronous delayed response is received before the
705 * Context: Assumes to be called with xfer->lock already acquired.
711 struct scmi_xfer *xfer)
719 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
722 xfer->hdr.seq);
726 switch (xfer->state) {
733 xfer->hdr.status = SCMI_SUCCESS;
734 xfer->state = SCMI_XFER_RESP_OK;
735 complete(&xfer->done);
738 xfer->hdr.seq);
754 * scmi_xfer_state_update - Update xfer state
756 * @xfer: A reference to the xfer to update
762 * Context: Assumes to be called on an xfer exclusively acquired using the
765 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
767 xfer->hdr.type = msg_type;
770 if (xfer->hdr.type == MSG_TYPE_COMMAND)
771 xfer->state = SCMI_XFER_RESP_OK;
773 xfer->state = SCMI_XFER_DRESP_OK;
776 static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
780 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
786 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
791 * When a valid xfer is found for the sequence number embedded in the provided
793 * xfer is granted till released with @scmi_xfer_command_release.
795 * Return: A valid @xfer on Success or error otherwise.
802 struct scmi_xfer *xfer;
810 xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
811 if (IS_ERR(xfer)) {
816 return xfer;
818 refcount_inc(&xfer->users);
821 spin_lock_irqsave(&xfer->lock, flags);
822 ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
824 * If a pending xfer was found which was also in a congruent state with
828 * RESP and DRESP for the same xfer.
831 spin_until_cond(scmi_xfer_acquired(xfer));
832 scmi_xfer_state_update(xfer, msg_type);
834 spin_unlock_irqrestore(&xfer->lock, flags);
839 msg_type, xfer_id, msg_hdr, xfer->state);
841 __scmi_xfer_put(minfo, xfer);
842 xfer = ERR_PTR(-EINVAL);
845 return xfer;
849 struct scmi_xfer *xfer)
851 atomic_set(&xfer->busy, SCMI_XFER_FREE);
852 __scmi_xfer_put(&info->tx_minfo, xfer);
865 struct scmi_xfer *xfer;
872 xfer = scmi_xfer_get(cinfo->handle, minfo);
873 if (IS_ERR(xfer)) {
875 PTR_ERR(xfer));
880 unpack_scmi_header(msg_hdr, &xfer->hdr);
882 /* Ensure order between xfer->priv store and following ops */
883 smp_store_mb(xfer->priv, priv);
885 xfer);
887 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
888 xfer->hdr.id, "NOTI", xfer->hdr.seq,
889 xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
891 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
892 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
894 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
895 xfer->hdr.protocol_id, xfer->hdr.seq,
899 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
900 scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
904 __scmi_xfer_put(minfo, xfer);
912 struct scmi_xfer *xfer;
915 xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
916 if (IS_ERR(xfer)) {
926 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
927 xfer->rx.len = info->desc->max_msg_size;
930 /* Ensure order between xfer->priv store and following ops */
931 smp_store_mb(xfer->priv, priv);
932 info->desc->ops->fetch_response(cinfo, xfer);
934 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
935 xfer->hdr.id,
936 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
937 (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
938 (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
939 xfer->hdr.seq, xfer->hdr.status,
940 xfer->rx.buf, xfer->rx.len);
942 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
943 xfer->hdr.protocol_id, xfer->hdr.seq,
944 xfer->hdr.type);
946 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
948 complete(xfer->async_done);
950 complete(&xfer->done);
955 * When in polling mode avoid to queue the Raw xfer on the IRQ
959 if (!xfer->hdr.poll_completion)
960 scmi_raw_message_report(info->raw, xfer,
965 scmi_xfer_command_release(info, xfer);
1003 * @xfer: message that was reserved by xfer_get_init
1006 struct scmi_xfer *xfer)
1011 __scmi_xfer_put(&info->tx_minfo, xfer);
1015 struct scmi_xfer *xfer, ktime_t stop)
1020 * Poll also on xfer->done so that polling can be forcibly terminated
1023 return info->desc->ops->poll_done(cinfo, xfer) ||
1024 try_wait_for_completion(&xfer->done) ||
1030 struct scmi_xfer *xfer, unsigned int timeout_ms)
1034 if (xfer->hdr.poll_completion) {
1041 * Poll on xfer using transport provided .poll_done();
1047 xfer, stop));
1065 spin_lock_irqsave(&xfer->lock, flags);
1066 if (xfer->state == SCMI_XFER_SENT_OK) {
1067 desc->ops->fetch_response(cinfo, xfer);
1068 xfer->state = SCMI_XFER_RESP_OK;
1070 spin_unlock_irqrestore(&xfer->lock, flags);
1074 xfer->hdr.protocol_id, xfer->hdr.id,
1075 !SCMI_XFER_IS_RAW(xfer) ?
1077 xfer->hdr.seq, xfer->hdr.status,
1078 xfer->rx.buf, xfer->rx.len);
1084 scmi_raw_message_report(info->raw, xfer,
1091 if (!wait_for_completion_timeout(&xfer->done,
1107 * @xfer: Reference to the transfer being waited for.
1110 * configuration flags like xfer->hdr.poll_completion.
1115 struct scmi_xfer *xfer)
1120 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
1121 xfer->hdr.protocol_id, xfer->hdr.seq,
1123 xfer->hdr.poll_completion);
1125 return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
1131 * reply to an xfer raw request on a specific channel for the required timeout.
1134 * @xfer: Reference to the transfer being waited for.
1140 struct scmi_xfer *xfer,
1147 ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
1150 pack_scmi_header(&xfer->hdr));
1159 * @xfer: Transfer to initiate and wait for response
1166 struct scmi_xfer *xfer)
1175 if (xfer->hdr.poll_completion &&
1188 xfer->hdr.poll_completion = true;
1195 xfer->hdr.protocol_id = pi->proto->id;
1196 reinit_completion(&xfer->done);
1198 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
1199 xfer->hdr.protocol_id, xfer->hdr.seq,
1200 xfer->hdr.poll_completion);
1203 xfer->hdr.status = SCMI_SUCCESS;
1204 xfer->state = SCMI_XFER_SENT_OK;
1207 * on xfer->state due to the monotonically increasing tokens allocation,
1208 * we must anyway ensure xfer->state initialization is not re-ordered
1210 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
1214 ret = info->desc->ops->send_message(cinfo, xfer);
1220 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1221 xfer->hdr.id, "CMND", xfer->hdr.seq,
1222 xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
1224 ret = scmi_wait_for_message_response(cinfo, xfer);
1225 if (!ret && xfer->hdr.status)
1226 ret = scmi_to_linux_errno(xfer->hdr.status);
1229 info->desc->ops->mark_txdone(cinfo, ret, xfer);
1231 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
1232 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
1238 struct scmi_xfer *xfer)
1243 xfer->rx.len = info->desc->max_msg_size;
1251 * @xfer: Transfer to initiate and wait for response
1271 struct scmi_xfer *xfer)
1276 xfer->async_done = &async_response;
1284 WARN_ON_ONCE(xfer->hdr.poll_completion);
1286 ret = do_xfer(ph, xfer);
1288 if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
1293 } else if (xfer->hdr.status) {
1294 ret = scmi_to_linux_errno(xfer->hdr.status);
1298 xfer->async_done = NULL;
1322 struct scmi_xfer *xfer;
1333 xfer = scmi_xfer_get(pi->handle, minfo);
1334 if (IS_ERR(xfer)) {
1335 ret = PTR_ERR(xfer);
1340 /* Pick a sequence number and register this xfer as in-flight */
1341 ret = scmi_xfer_pending_set(xfer, minfo);
1345 __scmi_xfer_put(minfo, xfer);
1349 xfer->tx.len = tx_size;
1350 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1351 xfer->hdr.type = MSG_TYPE_COMMAND;
1352 xfer->hdr.id = msg_id;
1353 xfer->hdr.poll_completion = false;
1355 *p = xfer;
1482 * @t: A reference to the underlying xfer initialized and used transparently by
2209 struct scmi_xfer *xfer;
2236 xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
2237 if (!xfer)
2240 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
2242 if (!xfer->rx.buf)
2245 xfer->tx.buf = xfer->rx.buf;
2246 init_completion(&xfer->done);
2247 spin_lock_init(&xfer->lock);
2249 /* Add initialized xfer to the free list */
2250 hlist_add_head(&xfer->node, &info->free_xfers);