Lines Matching refs:xfer

159 	struct scmi_xfer *xfer;
176 xfer = &minfo->xfer_block[xfer_id];
177 xfer->hdr.seq = xfer_id;
178 reinit_completion(&xfer->done);
179 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
181 return xfer;
188 * @xfer: message that was reserved by scmi_xfer_get
193 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
203 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
209 struct scmi_xfer *xfer;
216 xfer = scmi_xfer_get(cinfo->handle, minfo);
217 if (IS_ERR(xfer)) {
219 PTR_ERR(xfer));
224 unpack_scmi_header(msg_hdr, &xfer->hdr);
225 scmi_dump_header_dbg(dev, &xfer->hdr);
227 xfer);
228 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
229 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
231 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
232 xfer->hdr.protocol_id, xfer->hdr.seq,
235 __scmi_xfer_put(minfo, xfer);
243 struct scmi_xfer *xfer;
255 xfer = &minfo->xfer_block[xfer_id];
262 if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
267 /* It was unexpected, so nobody will clear the xfer if not us */
268 __scmi_xfer_put(minfo, xfer);
274 xfer->rx.len = info->desc->max_msg_size;
276 scmi_dump_header_dbg(dev, &xfer->hdr);
278 info->desc->ops->fetch_response(cinfo, xfer);
280 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
281 xfer->hdr.protocol_id, xfer->hdr.seq,
286 complete(xfer->async_done);
288 complete(&xfer->done);
327 * @xfer: message that was reserved by scmi_xfer_get
329 void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
333 __scmi_xfer_put(&info->tx_minfo, xfer);
339 struct scmi_xfer *xfer, ktime_t stop)
343 return info->desc->ops->poll_done(cinfo, xfer) ||
351 * @xfer: Transfer to initiate and wait for response
357 int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
365 cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
369 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
370 xfer->hdr.protocol_id, xfer->hdr.seq,
371 xfer->hdr.poll_completion);
373 ret = info->desc->ops->send_message(cinfo, xfer);
379 if (xfer->hdr.poll_completion) {
382 spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
385 info->desc->ops->fetch_response(cinfo, xfer);
391 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
398 if (!ret && xfer->hdr.status)
399 ret = scmi_to_linux_errno(xfer->hdr.status);
404 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
405 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
411 struct scmi_xfer *xfer)
415 xfer->rx.len = info->desc->max_msg_size;
425 * @xfer: Transfer to initiate and wait for response
431 struct scmi_xfer *xfer)
436 xfer->async_done = &async_response;
438 ret = scmi_do_xfer(handle, xfer);
440 if (!wait_for_completion_timeout(xfer->async_done, timeout))
442 else if (xfer->hdr.status)
443 ret = scmi_to_linux_errno(xfer->hdr.status);
446 xfer->async_done = NULL;
470 struct scmi_xfer *xfer;
480 xfer = scmi_xfer_get(handle, minfo);
481 if (IS_ERR(xfer)) {
482 ret = PTR_ERR(xfer);
487 xfer->tx.len = tx_size;
488 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
489 xfer->hdr.id = msg_id;
490 xfer->hdr.protocol_id = prot_id;
491 xfer->hdr.poll_completion = false;
493 *p = xfer;
617 struct scmi_xfer *xfer;
640 for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
641 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
643 if (!xfer->rx.buf)
646 xfer->tx.buf = xfer->rx.buf;
647 init_completion(&xfer->done);