xref: /kernel/linux/linux-6.6/arch/arm64/kvm/hyp/nvhe/ffa.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
4 * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
5 * Framework for Arm A-profile", which is specified by Arm in document
6 * number DEN0077.
7 *
8 * Copyright (C) 2022 - Google LLC
9 * Author: Andrew Walbran <qwandor@google.com>
10 *
11 * This driver hooks into the SMC trapping logic for the host and intercepts
12 * all calls falling within the FF-A range. Each call is either:
13 *
14 *	- Forwarded on unmodified to the SPMD at EL3
15 *	- Rejected as "unsupported"
16 *	- Accompanied by a host stage-2 page-table check/update and reissued
17 *
18 * Consequently, any attempts by the host to make guest memory pages
19 * accessible to the secure world using FF-A will be detected either here
20 * (in the case that the memory is already owned by the guest) or during
21 * donation to the guest (in the case that the memory was previously shared
22 * with the secure world).
23 *
24 * To allow the rolling-back of page-table updates and FF-A calls in the
25 * event of failure, operations involving the RXTX buffers are locked for
26 * the duration and are therefore serialised.
27 */
28
29#include <linux/arm-smccc.h>
30#include <linux/arm_ffa.h>
31#include <asm/kvm_pkvm.h>
32
33#include <nvhe/ffa.h>
34#include <nvhe/mem_protect.h>
35#include <nvhe/memory.h>
36#include <nvhe/trap_handler.h>
37#include <nvhe/spinlock.h>
38
39/*
40 * "ID value 0 must be returned at the Non-secure physical FF-A instance"
41 * We share this ID with the host.
42 */
43#define HOST_FFA_ID	0
44
45/*
46 * A buffer to hold the maximum descriptor size we can see from the host,
47 * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
48 * when resolving the handle on the reclaim path.
49 */
50struct kvm_ffa_descriptor_buffer {
51	void	*buf;
52	size_t	len;
53};
54
55static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
56
57struct kvm_ffa_buffers {
58	hyp_spinlock_t lock;
59	void *tx;
60	void *rx;
61};
62
63/*
64 * Note that we don't currently lock these buffers explicitly, instead
65 * relying on the locking of the host FFA buffers as we only have one
66 * client.
67 */
68static struct kvm_ffa_buffers hyp_buffers;
69static struct kvm_ffa_buffers host_buffers;
70
71static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
72{
73	*res = (struct arm_smccc_res) {
74		.a0	= FFA_ERROR,
75		.a2	= ffa_errno,
76	};
77}
78
79static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
80{
81	if (ret == FFA_RET_SUCCESS) {
82		*res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS,
83						.a2 = prop };
84	} else {
85		ffa_to_smccc_error(res, ret);
86	}
87}
88
89static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
90{
91	ffa_to_smccc_res_prop(res, ret, 0);
92}
93
94static void ffa_set_retval(struct kvm_cpu_context *ctxt,
95			   struct arm_smccc_res *res)
96{
97	cpu_reg(ctxt, 0) = res->a0;
98	cpu_reg(ctxt, 1) = res->a1;
99	cpu_reg(ctxt, 2) = res->a2;
100	cpu_reg(ctxt, 3) = res->a3;
101}
102
103static bool is_ffa_call(u64 func_id)
104{
105	return ARM_SMCCC_IS_FAST_CALL(func_id) &&
106	       ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
107	       ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
108	       ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
109}
110
111static int ffa_map_hyp_buffers(u64 ffa_page_count)
112{
113	struct arm_smccc_res res;
114
115	arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
116			  hyp_virt_to_phys(hyp_buffers.tx),
117			  hyp_virt_to_phys(hyp_buffers.rx),
118			  ffa_page_count,
119			  0, 0, 0, 0,
120			  &res);
121
122	return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
123}
124
125static int ffa_unmap_hyp_buffers(void)
126{
127	struct arm_smccc_res res;
128
129	arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
130			  HOST_FFA_ID,
131			  0, 0, 0, 0, 0, 0,
132			  &res);
133
134	return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
135}
136
137static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
138			     u32 handle_hi, u32 fraglen, u32 endpoint_id)
139{
140	arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
141			  handle_lo, handle_hi, fraglen, endpoint_id,
142			  0, 0, 0,
143			  res);
144}
145
146static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
147			     u32 handle_hi, u32 fragoff)
148{
149	arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
150			  handle_lo, handle_hi, fragoff, HOST_FFA_ID,
151			  0, 0, 0,
152			  res);
153}
154
155static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
156			  u32 fraglen)
157{
158	arm_smccc_1_1_smc(func_id, len, fraglen,
159			  0, 0, 0, 0, 0,
160			  res);
161}
162
163static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo,
164			     u32 handle_hi, u32 flags)
165{
166	arm_smccc_1_1_smc(FFA_MEM_RECLAIM,
167			  handle_lo, handle_hi, flags,
168			  0, 0, 0, 0,
169			  res);
170}
171
172static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
173{
174	arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ,
175			  len, len,
176			  0, 0, 0, 0, 0,
177			  res);
178}
179
180static void do_ffa_rxtx_map(struct arm_smccc_res *res,
181			    struct kvm_cpu_context *ctxt)
182{
183	DECLARE_REG(phys_addr_t, tx, ctxt, 1);
184	DECLARE_REG(phys_addr_t, rx, ctxt, 2);
185	DECLARE_REG(u32, npages, ctxt, 3);
186	int ret = 0;
187	void *rx_virt, *tx_virt;
188
189	if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
190		ret = FFA_RET_INVALID_PARAMETERS;
191		goto out;
192	}
193
194	if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
195		ret = FFA_RET_INVALID_PARAMETERS;
196		goto out;
197	}
198
199	hyp_spin_lock(&host_buffers.lock);
200	if (host_buffers.tx) {
201		ret = FFA_RET_DENIED;
202		goto out_unlock;
203	}
204
205	/*
206	 * Map our hypervisor buffers into the SPMD before mapping and
207	 * pinning the host buffers in our own address space.
208	 */
209	ret = ffa_map_hyp_buffers(npages);
210	if (ret)
211		goto out_unlock;
212
213	ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
214	if (ret) {
215		ret = FFA_RET_INVALID_PARAMETERS;
216		goto err_unmap;
217	}
218
219	ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
220	if (ret) {
221		ret = FFA_RET_INVALID_PARAMETERS;
222		goto err_unshare_tx;
223	}
224
225	tx_virt = hyp_phys_to_virt(tx);
226	ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
227	if (ret) {
228		ret = FFA_RET_INVALID_PARAMETERS;
229		goto err_unshare_rx;
230	}
231
232	rx_virt = hyp_phys_to_virt(rx);
233	ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
234	if (ret) {
235		ret = FFA_RET_INVALID_PARAMETERS;
236		goto err_unpin_tx;
237	}
238
239	host_buffers.tx = tx_virt;
240	host_buffers.rx = rx_virt;
241
242out_unlock:
243	hyp_spin_unlock(&host_buffers.lock);
244out:
245	ffa_to_smccc_res(res, ret);
246	return;
247
248err_unpin_tx:
249	hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
250err_unshare_rx:
251	__pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
252err_unshare_tx:
253	__pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
254err_unmap:
255	ffa_unmap_hyp_buffers();
256	goto out_unlock;
257}
258
259static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
260			      struct kvm_cpu_context *ctxt)
261{
262	DECLARE_REG(u32, id, ctxt, 1);
263	int ret = 0;
264
265	if (id != HOST_FFA_ID) {
266		ret = FFA_RET_INVALID_PARAMETERS;
267		goto out;
268	}
269
270	hyp_spin_lock(&host_buffers.lock);
271	if (!host_buffers.tx) {
272		ret = FFA_RET_INVALID_PARAMETERS;
273		goto out_unlock;
274	}
275
276	hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
277	WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
278	host_buffers.tx = NULL;
279
280	hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
281	WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
282	host_buffers.rx = NULL;
283
284	ffa_unmap_hyp_buffers();
285
286out_unlock:
287	hyp_spin_unlock(&host_buffers.lock);
288out:
289	ffa_to_smccc_res(res, ret);
290}
291
292static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
293				   u32 nranges)
294{
295	u32 i;
296
297	for (i = 0; i < nranges; ++i) {
298		struct ffa_mem_region_addr_range *range = &ranges[i];
299		u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
300		u64 pfn = hyp_phys_to_pfn(range->address);
301
302		if (!PAGE_ALIGNED(sz))
303			break;
304
305		if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
306			break;
307	}
308
309	return i;
310}
311
312static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
313				     u32 nranges)
314{
315	u32 i;
316
317	for (i = 0; i < nranges; ++i) {
318		struct ffa_mem_region_addr_range *range = &ranges[i];
319		u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
320		u64 pfn = hyp_phys_to_pfn(range->address);
321
322		if (!PAGE_ALIGNED(sz))
323			break;
324
325		if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
326			break;
327	}
328
329	return i;
330}
331
332static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
333				 u32 nranges)
334{
335	u32 nshared = __ffa_host_share_ranges(ranges, nranges);
336	int ret = 0;
337
338	if (nshared != nranges) {
339		WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
340		ret = FFA_RET_DENIED;
341	}
342
343	return ret;
344}
345
346static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
347				   u32 nranges)
348{
349	u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
350	int ret = 0;
351
352	if (nunshared != nranges) {
353		WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
354		ret = FFA_RET_DENIED;
355	}
356
357	return ret;
358}
359
360static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
361			       struct kvm_cpu_context *ctxt)
362{
363	DECLARE_REG(u32, handle_lo, ctxt, 1);
364	DECLARE_REG(u32, handle_hi, ctxt, 2);
365	DECLARE_REG(u32, fraglen, ctxt, 3);
366	DECLARE_REG(u32, endpoint_id, ctxt, 4);
367	struct ffa_mem_region_addr_range *buf;
368	int ret = FFA_RET_INVALID_PARAMETERS;
369	u32 nr_ranges;
370
371	if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
372		goto out;
373
374	if (fraglen % sizeof(*buf))
375		goto out;
376
377	hyp_spin_lock(&host_buffers.lock);
378	if (!host_buffers.tx)
379		goto out_unlock;
380
381	buf = hyp_buffers.tx;
382	memcpy(buf, host_buffers.tx, fraglen);
383	nr_ranges = fraglen / sizeof(*buf);
384
385	ret = ffa_host_share_ranges(buf, nr_ranges);
386	if (ret) {
387		/*
388		 * We're effectively aborting the transaction, so we need
389		 * to restore the global state back to what it was prior to
390		 * transmission of the first fragment.
391		 */
392		ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
393		WARN_ON(res->a0 != FFA_SUCCESS);
394		goto out_unlock;
395	}
396
397	ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
398	if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
399		WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
400
401out_unlock:
402	hyp_spin_unlock(&host_buffers.lock);
403out:
404	if (ret)
405		ffa_to_smccc_res(res, ret);
406
407	/*
408	 * If for any reason this did not succeed, we're in trouble as we have
409	 * now lost the content of the previous fragments and we can't rollback
410	 * the host stage-2 changes. The pages previously marked as shared will
411	 * remain stuck in that state forever, hence preventing the host from
412	 * sharing/donating them again and may possibly lead to subsequent
413	 * failures, but this will not compromise confidentiality.
414	 */
415	return;
416}
417
418static __always_inline void do_ffa_mem_xfer(const u64 func_id,
419					    struct arm_smccc_res *res,
420					    struct kvm_cpu_context *ctxt)
421{
422	DECLARE_REG(u32, len, ctxt, 1);
423	DECLARE_REG(u32, fraglen, ctxt, 2);
424	DECLARE_REG(u64, addr_mbz, ctxt, 3);
425	DECLARE_REG(u32, npages_mbz, ctxt, 4);
426	struct ffa_composite_mem_region *reg;
427	struct ffa_mem_region *buf;
428	u32 offset, nr_ranges;
429	int ret = 0;
430
431	BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
432		     func_id != FFA_FN64_MEM_LEND);
433
434	if (addr_mbz || npages_mbz || fraglen > len ||
435	    fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
436		ret = FFA_RET_INVALID_PARAMETERS;
437		goto out;
438	}
439
440	if (fraglen < sizeof(struct ffa_mem_region) +
441		      sizeof(struct ffa_mem_region_attributes)) {
442		ret = FFA_RET_INVALID_PARAMETERS;
443		goto out;
444	}
445
446	hyp_spin_lock(&host_buffers.lock);
447	if (!host_buffers.tx) {
448		ret = FFA_RET_INVALID_PARAMETERS;
449		goto out_unlock;
450	}
451
452	buf = hyp_buffers.tx;
453	memcpy(buf, host_buffers.tx, fraglen);
454
455	offset = buf->ep_mem_access[0].composite_off;
456	if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
457		ret = FFA_RET_INVALID_PARAMETERS;
458		goto out_unlock;
459	}
460
461	if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
462		ret = FFA_RET_INVALID_PARAMETERS;
463		goto out_unlock;
464	}
465
466	reg = (void *)buf + offset;
467	nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
468	if (nr_ranges % sizeof(reg->constituents[0])) {
469		ret = FFA_RET_INVALID_PARAMETERS;
470		goto out_unlock;
471	}
472
473	nr_ranges /= sizeof(reg->constituents[0]);
474	ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
475	if (ret)
476		goto out_unlock;
477
478	ffa_mem_xfer(res, func_id, len, fraglen);
479	if (fraglen != len) {
480		if (res->a0 != FFA_MEM_FRAG_RX)
481			goto err_unshare;
482
483		if (res->a3 != fraglen)
484			goto err_unshare;
485	} else if (res->a0 != FFA_SUCCESS) {
486		goto err_unshare;
487	}
488
489out_unlock:
490	hyp_spin_unlock(&host_buffers.lock);
491out:
492	if (ret)
493		ffa_to_smccc_res(res, ret);
494	return;
495
496err_unshare:
497	WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
498	goto out_unlock;
499}
500
501static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
502			       struct kvm_cpu_context *ctxt)
503{
504	DECLARE_REG(u32, handle_lo, ctxt, 1);
505	DECLARE_REG(u32, handle_hi, ctxt, 2);
506	DECLARE_REG(u32, flags, ctxt, 3);
507	struct ffa_composite_mem_region *reg;
508	u32 offset, len, fraglen, fragoff;
509	struct ffa_mem_region *buf;
510	int ret = 0;
511	u64 handle;
512
513	handle = PACK_HANDLE(handle_lo, handle_hi);
514
515	hyp_spin_lock(&host_buffers.lock);
516
517	buf = hyp_buffers.tx;
518	*buf = (struct ffa_mem_region) {
519		.sender_id	= HOST_FFA_ID,
520		.handle		= handle,
521	};
522
523	ffa_retrieve_req(res, sizeof(*buf));
524	buf = hyp_buffers.rx;
525	if (res->a0 != FFA_MEM_RETRIEVE_RESP)
526		goto out_unlock;
527
528	len = res->a1;
529	fraglen = res->a2;
530
531	offset = buf->ep_mem_access[0].composite_off;
532	/*
533	 * We can trust the SPMD to get this right, but let's at least
534	 * check that we end up with something that doesn't look _completely_
535	 * bogus.
536	 */
537	if (WARN_ON(offset > len ||
538		    fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
539		ret = FFA_RET_ABORTED;
540		goto out_unlock;
541	}
542
543	if (len > ffa_desc_buf.len) {
544		ret = FFA_RET_NO_MEMORY;
545		goto out_unlock;
546	}
547
548	buf = ffa_desc_buf.buf;
549	memcpy(buf, hyp_buffers.rx, fraglen);
550
551	for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
552		ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
553		if (res->a0 != FFA_MEM_FRAG_TX) {
554			ret = FFA_RET_INVALID_PARAMETERS;
555			goto out_unlock;
556		}
557
558		fraglen = res->a3;
559		memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
560	}
561
562	ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
563	if (res->a0 != FFA_SUCCESS)
564		goto out_unlock;
565
566	reg = (void *)buf + offset;
567	/* If the SPMD was happy, then we should be too. */
568	WARN_ON(ffa_host_unshare_ranges(reg->constituents,
569					reg->addr_range_cnt));
570out_unlock:
571	hyp_spin_unlock(&host_buffers.lock);
572
573	if (ret)
574		ffa_to_smccc_res(res, ret);
575}
576
577/*
578 * Is a given FFA function supported, either by forwarding on directly
579 * or by handling at EL2?
580 */
581static bool ffa_call_supported(u64 func_id)
582{
583	switch (func_id) {
584	/* Unsupported memory management calls */
585	case FFA_FN64_MEM_RETRIEVE_REQ:
586	case FFA_MEM_RETRIEVE_RESP:
587	case FFA_MEM_RELINQUISH:
588	case FFA_MEM_OP_PAUSE:
589	case FFA_MEM_OP_RESUME:
590	case FFA_MEM_FRAG_RX:
591	case FFA_FN64_MEM_DONATE:
592	/* Indirect message passing via RX/TX buffers */
593	case FFA_MSG_SEND:
594	case FFA_MSG_POLL:
595	case FFA_MSG_WAIT:
596	/* 32-bit variants of 64-bit calls */
597	case FFA_MSG_SEND_DIRECT_REQ:
598	case FFA_MSG_SEND_DIRECT_RESP:
599	case FFA_RXTX_MAP:
600	case FFA_MEM_DONATE:
601	case FFA_MEM_RETRIEVE_REQ:
602		return false;
603	}
604
605	return true;
606}
607
608static bool do_ffa_features(struct arm_smccc_res *res,
609			    struct kvm_cpu_context *ctxt)
610{
611	DECLARE_REG(u32, id, ctxt, 1);
612	u64 prop = 0;
613	int ret = 0;
614
615	if (!ffa_call_supported(id)) {
616		ret = FFA_RET_NOT_SUPPORTED;
617		goto out_handled;
618	}
619
620	switch (id) {
621	case FFA_MEM_SHARE:
622	case FFA_FN64_MEM_SHARE:
623	case FFA_MEM_LEND:
624	case FFA_FN64_MEM_LEND:
625		ret = FFA_RET_SUCCESS;
626		prop = 0; /* No support for dynamic buffers */
627		goto out_handled;
628	default:
629		return false;
630	}
631
632out_handled:
633	ffa_to_smccc_res_prop(res, ret, prop);
634	return true;
635}
636
637bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
638{
639	struct arm_smccc_res res;
640
641	/*
642	 * There's no way we can tell what a non-standard SMC call might
643	 * be up to. Ideally, we would terminate these here and return
644	 * an error to the host, but sadly devices make use of custom
645	 * firmware calls for things like power management, debugging,
646	 * RNG access and crash reporting.
647	 *
648	 * Given that the architecture requires us to trust EL3 anyway,
649	 * we forward unrecognised calls on under the assumption that
650	 * the firmware doesn't expose a mechanism to access arbitrary
651	 * non-secure memory. Short of a per-device table of SMCs, this
652	 * is the best we can do.
653	 */
654	if (!is_ffa_call(func_id))
655		return false;
656
657	switch (func_id) {
658	case FFA_FEATURES:
659		if (!do_ffa_features(&res, host_ctxt))
660			return false;
661		goto out_handled;
662	/* Memory management */
663	case FFA_FN64_RXTX_MAP:
664		do_ffa_rxtx_map(&res, host_ctxt);
665		goto out_handled;
666	case FFA_RXTX_UNMAP:
667		do_ffa_rxtx_unmap(&res, host_ctxt);
668		goto out_handled;
669	case FFA_MEM_SHARE:
670	case FFA_FN64_MEM_SHARE:
671		do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
672		goto out_handled;
673	case FFA_MEM_RECLAIM:
674		do_ffa_mem_reclaim(&res, host_ctxt);
675		goto out_handled;
676	case FFA_MEM_LEND:
677	case FFA_FN64_MEM_LEND:
678		do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
679		goto out_handled;
680	case FFA_MEM_FRAG_TX:
681		do_ffa_mem_frag_tx(&res, host_ctxt);
682		goto out_handled;
683	}
684
685	if (ffa_call_supported(func_id))
686		return false; /* Pass through */
687
688	ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
689out_handled:
690	ffa_set_retval(host_ctxt, &res);
691	return true;
692}
693
694int hyp_ffa_init(void *pages)
695{
696	struct arm_smccc_res res;
697	size_t min_rxtx_sz;
698	void *tx, *rx;
699
700	if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
701		return 0;
702
703	arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res);
704	if (res.a0 == FFA_RET_NOT_SUPPORTED)
705		return 0;
706
707	/*
708	 * Firmware returns the maximum supported version of the FF-A
709	 * implementation. Check that the returned version is
710	 * backwards-compatible with the hyp according to the rules in DEN0077A
711	 * v1.1 REL0 13.2.1.
712	 *
713	 * Of course, things are never simple when dealing with firmware. v1.1
714	 * broke ABI with v1.0 on several structures, which is itself
715	 * incompatible with the aforementioned versioning scheme. The
716	 * expectation is that v1.x implementations that do not support the v1.0
717	 * ABI return NOT_SUPPORTED rather than a version number, according to
718	 * DEN0077A v1.1 REL0 18.6.4.
719	 */
720	if (FFA_MAJOR_VERSION(res.a0) != 1)
721		return -EOPNOTSUPP;
722
723	arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
724	if (res.a0 != FFA_SUCCESS)
725		return -EOPNOTSUPP;
726
727	if (res.a2 != HOST_FFA_ID)
728		return -EINVAL;
729
730	arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
731			  0, 0, 0, 0, 0, 0, &res);
732	if (res.a0 != FFA_SUCCESS)
733		return -EOPNOTSUPP;
734
735	switch (res.a2) {
736	case FFA_FEAT_RXTX_MIN_SZ_4K:
737		min_rxtx_sz = SZ_4K;
738		break;
739	case FFA_FEAT_RXTX_MIN_SZ_16K:
740		min_rxtx_sz = SZ_16K;
741		break;
742	case FFA_FEAT_RXTX_MIN_SZ_64K:
743		min_rxtx_sz = SZ_64K;
744		break;
745	default:
746		return -EINVAL;
747	}
748
749	if (min_rxtx_sz > PAGE_SIZE)
750		return -EOPNOTSUPP;
751
752	tx = pages;
753	pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
754	rx = pages;
755	pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
756
757	ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
758		.buf	= pages,
759		.len	= PAGE_SIZE *
760			  (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
761	};
762
763	hyp_buffers = (struct kvm_ffa_buffers) {
764		.lock	= __HYP_SPIN_LOCK_UNLOCKED,
765		.tx	= tx,
766		.rx	= rx,
767	};
768
769	host_buffers = (struct kvm_ffa_buffers) {
770		.lock	= __HYP_SPIN_LOCK_UNLOCKED,
771	};
772
773	return 0;
774}
775