162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Copyright(c) 2023 - Cornelis Networks, Inc.
462306a36Sopenharmony_ci */
562306a36Sopenharmony_ci
662306a36Sopenharmony_ci#include <linux/types.h>
762306a36Sopenharmony_ci
862306a36Sopenharmony_ci#include "hfi.h"
962306a36Sopenharmony_ci#include "common.h"
1062306a36Sopenharmony_ci#include "device.h"
1162306a36Sopenharmony_ci#include "pinning.h"
1262306a36Sopenharmony_ci#include "mmu_rb.h"
1362306a36Sopenharmony_ci#include "user_sdma.h"
1462306a36Sopenharmony_ci#include "trace.h"
1562306a36Sopenharmony_ci
1662306a36Sopenharmony_cistruct sdma_mmu_node {
1762306a36Sopenharmony_ci	struct mmu_rb_node rb;
1862306a36Sopenharmony_ci	struct hfi1_user_sdma_pkt_q *pq;
1962306a36Sopenharmony_ci	struct page **pages;
2062306a36Sopenharmony_ci	unsigned int npages;
2162306a36Sopenharmony_ci};
2262306a36Sopenharmony_ci
2362306a36Sopenharmony_cistatic bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
2462306a36Sopenharmony_ci			   unsigned long len);
2562306a36Sopenharmony_cistatic int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode, void *arg2,
2662306a36Sopenharmony_ci			 bool *stop);
2762306a36Sopenharmony_cistatic void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
2862306a36Sopenharmony_ci
2962306a36Sopenharmony_cistatic struct mmu_rb_ops sdma_rb_ops = {
3062306a36Sopenharmony_ci	.filter = sdma_rb_filter,
3162306a36Sopenharmony_ci	.evict = sdma_rb_evict,
3262306a36Sopenharmony_ci	.remove = sdma_rb_remove,
3362306a36Sopenharmony_ci};
3462306a36Sopenharmony_ci
3562306a36Sopenharmony_ciint hfi1_init_system_pinning(struct hfi1_user_sdma_pkt_q *pq)
3662306a36Sopenharmony_ci{
3762306a36Sopenharmony_ci	struct hfi1_devdata *dd = pq->dd;
3862306a36Sopenharmony_ci	int ret;
3962306a36Sopenharmony_ci
4062306a36Sopenharmony_ci	ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
4162306a36Sopenharmony_ci				   &pq->handler);
4262306a36Sopenharmony_ci	if (ret)
4362306a36Sopenharmony_ci		dd_dev_err(dd,
4462306a36Sopenharmony_ci			   "[%u:%u] Failed to register system memory DMA support with MMU: %d\n",
4562306a36Sopenharmony_ci			   pq->ctxt, pq->subctxt, ret);
4662306a36Sopenharmony_ci	return ret;
4762306a36Sopenharmony_ci}
4862306a36Sopenharmony_ci
4962306a36Sopenharmony_civoid hfi1_free_system_pinning(struct hfi1_user_sdma_pkt_q *pq)
5062306a36Sopenharmony_ci{
5162306a36Sopenharmony_ci	if (pq->handler)
5262306a36Sopenharmony_ci		hfi1_mmu_rb_unregister(pq->handler);
5362306a36Sopenharmony_ci}
5462306a36Sopenharmony_ci
5562306a36Sopenharmony_cistatic u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
5662306a36Sopenharmony_ci{
5762306a36Sopenharmony_ci	struct evict_data evict_data;
5862306a36Sopenharmony_ci
5962306a36Sopenharmony_ci	evict_data.cleared = 0;
6062306a36Sopenharmony_ci	evict_data.target = npages;
6162306a36Sopenharmony_ci	hfi1_mmu_rb_evict(pq->handler, &evict_data);
6262306a36Sopenharmony_ci	return evict_data.cleared;
6362306a36Sopenharmony_ci}
6462306a36Sopenharmony_ci
6562306a36Sopenharmony_cistatic void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
6662306a36Sopenharmony_ci			       unsigned int start, unsigned int npages)
6762306a36Sopenharmony_ci{
6862306a36Sopenharmony_ci	hfi1_release_user_pages(mm, pages + start, npages, false);
6962306a36Sopenharmony_ci	kfree(pages);
7062306a36Sopenharmony_ci}
7162306a36Sopenharmony_ci
7262306a36Sopenharmony_cistatic inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
7362306a36Sopenharmony_ci{
7462306a36Sopenharmony_ci	return node->rb.handler->mn.mm;
7562306a36Sopenharmony_ci}
7662306a36Sopenharmony_ci
7762306a36Sopenharmony_cistatic void free_system_node(struct sdma_mmu_node *node)
7862306a36Sopenharmony_ci{
7962306a36Sopenharmony_ci	if (node->npages) {
8062306a36Sopenharmony_ci		unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
8162306a36Sopenharmony_ci				   node->npages);
8262306a36Sopenharmony_ci		atomic_sub(node->npages, &node->pq->n_locked);
8362306a36Sopenharmony_ci	}
8462306a36Sopenharmony_ci	kfree(node);
8562306a36Sopenharmony_ci}
8662306a36Sopenharmony_ci
8762306a36Sopenharmony_ci/*
8862306a36Sopenharmony_ci * kref_get()'s an additional kref on the returned rb_node to prevent rb_node
8962306a36Sopenharmony_ci * from being released until after rb_node is assigned to an SDMA descriptor
9062306a36Sopenharmony_ci * (struct sdma_desc) under add_system_iovec_to_sdma_packet(), even if the
9162306a36Sopenharmony_ci * virtual address range for rb_node is invalidated between now and then.
9262306a36Sopenharmony_ci */
9362306a36Sopenharmony_cistatic struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
9462306a36Sopenharmony_ci					      unsigned long start,
9562306a36Sopenharmony_ci					      unsigned long end)
9662306a36Sopenharmony_ci{
9762306a36Sopenharmony_ci	struct mmu_rb_node *rb_node;
9862306a36Sopenharmony_ci	unsigned long flags;
9962306a36Sopenharmony_ci
10062306a36Sopenharmony_ci	spin_lock_irqsave(&handler->lock, flags);
10162306a36Sopenharmony_ci	rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start));
10262306a36Sopenharmony_ci	if (!rb_node) {
10362306a36Sopenharmony_ci		spin_unlock_irqrestore(&handler->lock, flags);
10462306a36Sopenharmony_ci		return NULL;
10562306a36Sopenharmony_ci	}
10662306a36Sopenharmony_ci
10762306a36Sopenharmony_ci	/* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
10862306a36Sopenharmony_ci	kref_get(&rb_node->refcount);
10962306a36Sopenharmony_ci	spin_unlock_irqrestore(&handler->lock, flags);
11062306a36Sopenharmony_ci
11162306a36Sopenharmony_ci	return container_of(rb_node, struct sdma_mmu_node, rb);
11262306a36Sopenharmony_ci}
11362306a36Sopenharmony_ci
11462306a36Sopenharmony_cistatic int pin_system_pages(struct user_sdma_request *req,
11562306a36Sopenharmony_ci			    uintptr_t start_address, size_t length,
11662306a36Sopenharmony_ci			    struct sdma_mmu_node *node, int npages)
11762306a36Sopenharmony_ci{
11862306a36Sopenharmony_ci	struct hfi1_user_sdma_pkt_q *pq = req->pq;
11962306a36Sopenharmony_ci	int pinned, cleared;
12062306a36Sopenharmony_ci	struct page **pages;
12162306a36Sopenharmony_ci
12262306a36Sopenharmony_ci	pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
12362306a36Sopenharmony_ci	if (!pages)
12462306a36Sopenharmony_ci		return -ENOMEM;
12562306a36Sopenharmony_ci
12662306a36Sopenharmony_ciretry:
12762306a36Sopenharmony_ci	if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked),
12862306a36Sopenharmony_ci				npages)) {
12962306a36Sopenharmony_ci		SDMA_DBG(req, "Evicting: nlocked %u npages %u",
13062306a36Sopenharmony_ci			 atomic_read(&pq->n_locked), npages);
13162306a36Sopenharmony_ci		cleared = sdma_cache_evict(pq, npages);
13262306a36Sopenharmony_ci		if (cleared >= npages)
13362306a36Sopenharmony_ci			goto retry;
13462306a36Sopenharmony_ci	}
13562306a36Sopenharmony_ci
13662306a36Sopenharmony_ci	SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u",
13762306a36Sopenharmony_ci		 start_address, node->npages, npages);
13862306a36Sopenharmony_ci	pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0,
13962306a36Sopenharmony_ci					 pages);
14062306a36Sopenharmony_ci
14162306a36Sopenharmony_ci	if (pinned < 0) {
14262306a36Sopenharmony_ci		kfree(pages);
14362306a36Sopenharmony_ci		SDMA_DBG(req, "pinned %d", pinned);
14462306a36Sopenharmony_ci		return pinned;
14562306a36Sopenharmony_ci	}
14662306a36Sopenharmony_ci	if (pinned != npages) {
14762306a36Sopenharmony_ci		unpin_vector_pages(current->mm, pages, node->npages, pinned);
14862306a36Sopenharmony_ci		SDMA_DBG(req, "npages %u pinned %d", npages, pinned);
14962306a36Sopenharmony_ci		return -EFAULT;
15062306a36Sopenharmony_ci	}
15162306a36Sopenharmony_ci	node->rb.addr = start_address;
15262306a36Sopenharmony_ci	node->rb.len = length;
15362306a36Sopenharmony_ci	node->pages = pages;
15462306a36Sopenharmony_ci	node->npages = npages;
15562306a36Sopenharmony_ci	atomic_add(pinned, &pq->n_locked);
15662306a36Sopenharmony_ci	SDMA_DBG(req, "done. pinned %d", pinned);
15762306a36Sopenharmony_ci	return 0;
15862306a36Sopenharmony_ci}
15962306a36Sopenharmony_ci
16062306a36Sopenharmony_ci/*
16162306a36Sopenharmony_ci * kref refcount on *node_p will be 2 on successful addition: one kref from
16262306a36Sopenharmony_ci * kref_init() for mmu_rb_handler and one kref to prevent *node_p from being
16362306a36Sopenharmony_ci * released until after *node_p is assigned to an SDMA descriptor (struct
16462306a36Sopenharmony_ci * sdma_desc) under add_system_iovec_to_sdma_packet(), even if the virtual
16562306a36Sopenharmony_ci * address range for *node_p is invalidated between now and then.
16662306a36Sopenharmony_ci */
16762306a36Sopenharmony_cistatic int add_system_pinning(struct user_sdma_request *req,
16862306a36Sopenharmony_ci			      struct sdma_mmu_node **node_p,
16962306a36Sopenharmony_ci			      unsigned long start, unsigned long len)
17062306a36Sopenharmony_ci
17162306a36Sopenharmony_ci{
17262306a36Sopenharmony_ci	struct hfi1_user_sdma_pkt_q *pq = req->pq;
17362306a36Sopenharmony_ci	struct sdma_mmu_node *node;
17462306a36Sopenharmony_ci	int ret;
17562306a36Sopenharmony_ci
17662306a36Sopenharmony_ci	node = kzalloc(sizeof(*node), GFP_KERNEL);
17762306a36Sopenharmony_ci	if (!node)
17862306a36Sopenharmony_ci		return -ENOMEM;
17962306a36Sopenharmony_ci
18062306a36Sopenharmony_ci	/* First kref "moves" to mmu_rb_handler */
18162306a36Sopenharmony_ci	kref_init(&node->rb.refcount);
18262306a36Sopenharmony_ci
18362306a36Sopenharmony_ci	/* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
18462306a36Sopenharmony_ci	kref_get(&node->rb.refcount);
18562306a36Sopenharmony_ci
18662306a36Sopenharmony_ci	node->pq = pq;
18762306a36Sopenharmony_ci	ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
18862306a36Sopenharmony_ci	if (ret == 0) {
18962306a36Sopenharmony_ci		ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
19062306a36Sopenharmony_ci		if (ret)
19162306a36Sopenharmony_ci			free_system_node(node);
19262306a36Sopenharmony_ci		else
19362306a36Sopenharmony_ci			*node_p = node;
19462306a36Sopenharmony_ci
19562306a36Sopenharmony_ci		return ret;
19662306a36Sopenharmony_ci	}
19762306a36Sopenharmony_ci
19862306a36Sopenharmony_ci	kfree(node);
19962306a36Sopenharmony_ci	return ret;
20062306a36Sopenharmony_ci}
20162306a36Sopenharmony_ci
20262306a36Sopenharmony_cistatic int get_system_cache_entry(struct user_sdma_request *req,
20362306a36Sopenharmony_ci				  struct sdma_mmu_node **node_p,
20462306a36Sopenharmony_ci				  size_t req_start, size_t req_len)
20562306a36Sopenharmony_ci{
20662306a36Sopenharmony_ci	struct hfi1_user_sdma_pkt_q *pq = req->pq;
20762306a36Sopenharmony_ci	u64 start = ALIGN_DOWN(req_start, PAGE_SIZE);
20862306a36Sopenharmony_ci	u64 end = PFN_ALIGN(req_start + req_len);
20962306a36Sopenharmony_ci	int ret;
21062306a36Sopenharmony_ci
21162306a36Sopenharmony_ci	if ((end - start) == 0) {
21262306a36Sopenharmony_ci		SDMA_DBG(req,
21362306a36Sopenharmony_ci			 "Request for empty cache entry req_start %lx req_len %lx start %llx end %llx",
21462306a36Sopenharmony_ci			 req_start, req_len, start, end);
21562306a36Sopenharmony_ci		return -EINVAL;
21662306a36Sopenharmony_ci	}
21762306a36Sopenharmony_ci
21862306a36Sopenharmony_ci	SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len);
21962306a36Sopenharmony_ci
22062306a36Sopenharmony_ci	while (1) {
22162306a36Sopenharmony_ci		struct sdma_mmu_node *node =
22262306a36Sopenharmony_ci			find_system_node(pq->handler, start, end);
22362306a36Sopenharmony_ci		u64 prepend_len = 0;
22462306a36Sopenharmony_ci
22562306a36Sopenharmony_ci		SDMA_DBG(req, "node %p start %llx end %llu", node, start, end);
22662306a36Sopenharmony_ci		if (!node) {
22762306a36Sopenharmony_ci			ret = add_system_pinning(req, node_p, start,
22862306a36Sopenharmony_ci						 end - start);
22962306a36Sopenharmony_ci			if (ret == -EEXIST) {
23062306a36Sopenharmony_ci				/*
23162306a36Sopenharmony_ci				 * Another execution context has inserted a
23262306a36Sopenharmony_ci				 * conficting entry first.
23362306a36Sopenharmony_ci				 */
23462306a36Sopenharmony_ci				continue;
23562306a36Sopenharmony_ci			}
23662306a36Sopenharmony_ci			return ret;
23762306a36Sopenharmony_ci		}
23862306a36Sopenharmony_ci
23962306a36Sopenharmony_ci		if (node->rb.addr <= start) {
24062306a36Sopenharmony_ci			/*
24162306a36Sopenharmony_ci			 * This entry covers at least part of the region. If it doesn't extend
24262306a36Sopenharmony_ci			 * to the end, then this will be called again for the next segment.
24362306a36Sopenharmony_ci			 */
24462306a36Sopenharmony_ci			*node_p = node;
24562306a36Sopenharmony_ci			return 0;
24662306a36Sopenharmony_ci		}
24762306a36Sopenharmony_ci
24862306a36Sopenharmony_ci		SDMA_DBG(req, "prepend: node->rb.addr %lx, node->rb.refcount %d",
24962306a36Sopenharmony_ci			 node->rb.addr, kref_read(&node->rb.refcount));
25062306a36Sopenharmony_ci		prepend_len = node->rb.addr - start;
25162306a36Sopenharmony_ci
25262306a36Sopenharmony_ci		/*
25362306a36Sopenharmony_ci		 * This node will not be returned, instead a new node
25462306a36Sopenharmony_ci		 * will be. So release the reference.
25562306a36Sopenharmony_ci		 */
25662306a36Sopenharmony_ci		kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
25762306a36Sopenharmony_ci
25862306a36Sopenharmony_ci		/* Prepend a node to cover the beginning of the allocation */
25962306a36Sopenharmony_ci		ret = add_system_pinning(req, node_p, start, prepend_len);
26062306a36Sopenharmony_ci		if (ret == -EEXIST) {
26162306a36Sopenharmony_ci			/* Another execution context has inserted a conficting entry first. */
26262306a36Sopenharmony_ci			continue;
26362306a36Sopenharmony_ci		}
26462306a36Sopenharmony_ci		return ret;
26562306a36Sopenharmony_ci	}
26662306a36Sopenharmony_ci}
26762306a36Sopenharmony_ci
26862306a36Sopenharmony_cistatic void sdma_mmu_rb_node_get(void *ctx)
26962306a36Sopenharmony_ci{
27062306a36Sopenharmony_ci	struct mmu_rb_node *node = ctx;
27162306a36Sopenharmony_ci
27262306a36Sopenharmony_ci	kref_get(&node->refcount);
27362306a36Sopenharmony_ci}
27462306a36Sopenharmony_ci
27562306a36Sopenharmony_cistatic void sdma_mmu_rb_node_put(void *ctx)
27662306a36Sopenharmony_ci{
27762306a36Sopenharmony_ci	struct sdma_mmu_node *node = ctx;
27862306a36Sopenharmony_ci
27962306a36Sopenharmony_ci	kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
28062306a36Sopenharmony_ci}
28162306a36Sopenharmony_ci
28262306a36Sopenharmony_cistatic int add_mapping_to_sdma_packet(struct user_sdma_request *req,
28362306a36Sopenharmony_ci				      struct user_sdma_txreq *tx,
28462306a36Sopenharmony_ci				      struct sdma_mmu_node *cache_entry,
28562306a36Sopenharmony_ci				      size_t start,
28662306a36Sopenharmony_ci				      size_t from_this_cache_entry)
28762306a36Sopenharmony_ci{
28862306a36Sopenharmony_ci	struct hfi1_user_sdma_pkt_q *pq = req->pq;
28962306a36Sopenharmony_ci	unsigned int page_offset;
29062306a36Sopenharmony_ci	unsigned int from_this_page;
29162306a36Sopenharmony_ci	size_t page_index;
29262306a36Sopenharmony_ci	void *ctx;
29362306a36Sopenharmony_ci	int ret;
29462306a36Sopenharmony_ci
29562306a36Sopenharmony_ci	/*
29662306a36Sopenharmony_ci	 * Because the cache may be more fragmented than the memory that is being accessed,
29762306a36Sopenharmony_ci	 * it's not strictly necessary to have a descriptor per cache entry.
29862306a36Sopenharmony_ci	 */
29962306a36Sopenharmony_ci
30062306a36Sopenharmony_ci	while (from_this_cache_entry) {
30162306a36Sopenharmony_ci		page_index = PFN_DOWN(start - cache_entry->rb.addr);
30262306a36Sopenharmony_ci
30362306a36Sopenharmony_ci		if (page_index >= cache_entry->npages) {
30462306a36Sopenharmony_ci			SDMA_DBG(req,
30562306a36Sopenharmony_ci				 "Request for page_index %zu >= cache_entry->npages %u",
30662306a36Sopenharmony_ci				 page_index, cache_entry->npages);
30762306a36Sopenharmony_ci			return -EINVAL;
30862306a36Sopenharmony_ci		}
30962306a36Sopenharmony_ci
31062306a36Sopenharmony_ci		page_offset = start - ALIGN_DOWN(start, PAGE_SIZE);
31162306a36Sopenharmony_ci		from_this_page = PAGE_SIZE - page_offset;
31262306a36Sopenharmony_ci
31362306a36Sopenharmony_ci		if (from_this_page < from_this_cache_entry) {
31462306a36Sopenharmony_ci			ctx = NULL;
31562306a36Sopenharmony_ci		} else {
31662306a36Sopenharmony_ci			/*
31762306a36Sopenharmony_ci			 * In the case they are equal the next line has no practical effect,
31862306a36Sopenharmony_ci			 * but it's better to do a register to register copy than a conditional
31962306a36Sopenharmony_ci			 * branch.
32062306a36Sopenharmony_ci			 */
32162306a36Sopenharmony_ci			from_this_page = from_this_cache_entry;
32262306a36Sopenharmony_ci			ctx = cache_entry;
32362306a36Sopenharmony_ci		}
32462306a36Sopenharmony_ci
32562306a36Sopenharmony_ci		ret = sdma_txadd_page(pq->dd, &tx->txreq,
32662306a36Sopenharmony_ci				      cache_entry->pages[page_index],
32762306a36Sopenharmony_ci				      page_offset, from_this_page,
32862306a36Sopenharmony_ci				      ctx,
32962306a36Sopenharmony_ci				      sdma_mmu_rb_node_get,
33062306a36Sopenharmony_ci				      sdma_mmu_rb_node_put);
33162306a36Sopenharmony_ci		if (ret) {
33262306a36Sopenharmony_ci			/*
33362306a36Sopenharmony_ci			 * When there's a failure, the entire request is freed by
33462306a36Sopenharmony_ci			 * user_sdma_send_pkts().
33562306a36Sopenharmony_ci			 */
33662306a36Sopenharmony_ci			SDMA_DBG(req,
33762306a36Sopenharmony_ci				 "sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u",
33862306a36Sopenharmony_ci				 ret, page_index, page_offset, from_this_page);
33962306a36Sopenharmony_ci			return ret;
34062306a36Sopenharmony_ci		}
34162306a36Sopenharmony_ci		start += from_this_page;
34262306a36Sopenharmony_ci		from_this_cache_entry -= from_this_page;
34362306a36Sopenharmony_ci	}
34462306a36Sopenharmony_ci	return 0;
34562306a36Sopenharmony_ci}
34662306a36Sopenharmony_ci
34762306a36Sopenharmony_cistatic int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
34862306a36Sopenharmony_ci					   struct user_sdma_txreq *tx,
34962306a36Sopenharmony_ci					   struct user_sdma_iovec *iovec,
35062306a36Sopenharmony_ci					   size_t from_this_iovec)
35162306a36Sopenharmony_ci{
35262306a36Sopenharmony_ci	while (from_this_iovec > 0) {
35362306a36Sopenharmony_ci		struct sdma_mmu_node *cache_entry;
35462306a36Sopenharmony_ci		size_t from_this_cache_entry;
35562306a36Sopenharmony_ci		size_t start;
35662306a36Sopenharmony_ci		int ret;
35762306a36Sopenharmony_ci
35862306a36Sopenharmony_ci		start = (uintptr_t)iovec->iov.iov_base + iovec->offset;
35962306a36Sopenharmony_ci		ret = get_system_cache_entry(req, &cache_entry, start,
36062306a36Sopenharmony_ci					     from_this_iovec);
36162306a36Sopenharmony_ci		if (ret) {
36262306a36Sopenharmony_ci			SDMA_DBG(req, "pin system segment failed %d", ret);
36362306a36Sopenharmony_ci			return ret;
36462306a36Sopenharmony_ci		}
36562306a36Sopenharmony_ci
36662306a36Sopenharmony_ci		from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr);
36762306a36Sopenharmony_ci		if (from_this_cache_entry > from_this_iovec)
36862306a36Sopenharmony_ci			from_this_cache_entry = from_this_iovec;
36962306a36Sopenharmony_ci
37062306a36Sopenharmony_ci		ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
37162306a36Sopenharmony_ci						 from_this_cache_entry);
37262306a36Sopenharmony_ci
37362306a36Sopenharmony_ci		/*
37462306a36Sopenharmony_ci		 * Done adding cache_entry to zero or more sdma_desc. Can
37562306a36Sopenharmony_ci		 * kref_put() the "safety" kref taken under
37662306a36Sopenharmony_ci		 * get_system_cache_entry().
37762306a36Sopenharmony_ci		 */
37862306a36Sopenharmony_ci		kref_put(&cache_entry->rb.refcount, hfi1_mmu_rb_release);
37962306a36Sopenharmony_ci
38062306a36Sopenharmony_ci		if (ret) {
38162306a36Sopenharmony_ci			SDMA_DBG(req, "add system segment failed %d", ret);
38262306a36Sopenharmony_ci			return ret;
38362306a36Sopenharmony_ci		}
38462306a36Sopenharmony_ci
38562306a36Sopenharmony_ci		iovec->offset += from_this_cache_entry;
38662306a36Sopenharmony_ci		from_this_iovec -= from_this_cache_entry;
38762306a36Sopenharmony_ci	}
38862306a36Sopenharmony_ci
38962306a36Sopenharmony_ci	return 0;
39062306a36Sopenharmony_ci}
39162306a36Sopenharmony_ci
39262306a36Sopenharmony_ci/*
39362306a36Sopenharmony_ci * Add up to pkt_data_remaining bytes to the txreq, starting at the current
39462306a36Sopenharmony_ci * offset in the given iovec entry and continuing until all data has been added
39562306a36Sopenharmony_ci * to the iovec or the iovec entry type changes.
39662306a36Sopenharmony_ci *
39762306a36Sopenharmony_ci * On success, prior to returning, adjust pkt_data_remaining, req->iov_idx, and
39862306a36Sopenharmony_ci * the offset value in req->iov[req->iov_idx] to reflect the data that has been
39962306a36Sopenharmony_ci * consumed.
40062306a36Sopenharmony_ci */
40162306a36Sopenharmony_ciint hfi1_add_pages_to_sdma_packet(struct user_sdma_request *req,
40262306a36Sopenharmony_ci				  struct user_sdma_txreq *tx,
40362306a36Sopenharmony_ci				  struct user_sdma_iovec *iovec,
40462306a36Sopenharmony_ci				  u32 *pkt_data_remaining)
40562306a36Sopenharmony_ci{
40662306a36Sopenharmony_ci	size_t remaining_to_add = *pkt_data_remaining;
40762306a36Sopenharmony_ci	/*
40862306a36Sopenharmony_ci	 * Walk through iovec entries, ensure the associated pages
40962306a36Sopenharmony_ci	 * are pinned and mapped, add data to the packet until no more
41062306a36Sopenharmony_ci	 * data remains to be added or the iovec entry type changes.
41162306a36Sopenharmony_ci	 */
41262306a36Sopenharmony_ci	while (remaining_to_add > 0) {
41362306a36Sopenharmony_ci		struct user_sdma_iovec *cur_iovec;
41462306a36Sopenharmony_ci		size_t from_this_iovec;
41562306a36Sopenharmony_ci		int ret;
41662306a36Sopenharmony_ci
41762306a36Sopenharmony_ci		cur_iovec = iovec;
41862306a36Sopenharmony_ci		from_this_iovec = iovec->iov.iov_len - iovec->offset;
41962306a36Sopenharmony_ci
42062306a36Sopenharmony_ci		if (from_this_iovec > remaining_to_add) {
42162306a36Sopenharmony_ci			from_this_iovec = remaining_to_add;
42262306a36Sopenharmony_ci		} else {
42362306a36Sopenharmony_ci			/* The current iovec entry will be consumed by this pass. */
42462306a36Sopenharmony_ci			req->iov_idx++;
42562306a36Sopenharmony_ci			iovec++;
42662306a36Sopenharmony_ci		}
42762306a36Sopenharmony_ci
42862306a36Sopenharmony_ci		ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec,
42962306a36Sopenharmony_ci						      from_this_iovec);
43062306a36Sopenharmony_ci		if (ret)
43162306a36Sopenharmony_ci			return ret;
43262306a36Sopenharmony_ci
43362306a36Sopenharmony_ci		remaining_to_add -= from_this_iovec;
43462306a36Sopenharmony_ci	}
43562306a36Sopenharmony_ci	*pkt_data_remaining = remaining_to_add;
43662306a36Sopenharmony_ci
43762306a36Sopenharmony_ci	return 0;
43862306a36Sopenharmony_ci}
43962306a36Sopenharmony_ci
44062306a36Sopenharmony_cistatic bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
44162306a36Sopenharmony_ci			   unsigned long len)
44262306a36Sopenharmony_ci{
44362306a36Sopenharmony_ci	return (bool)(node->addr == addr);
44462306a36Sopenharmony_ci}
44562306a36Sopenharmony_ci
44662306a36Sopenharmony_ci/*
44762306a36Sopenharmony_ci * Return 1 to remove the node from the rb tree and call the remove op.
44862306a36Sopenharmony_ci *
44962306a36Sopenharmony_ci * Called with the rb tree lock held.
45062306a36Sopenharmony_ci */
45162306a36Sopenharmony_cistatic int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
45262306a36Sopenharmony_ci			 void *evict_arg, bool *stop)
45362306a36Sopenharmony_ci{
45462306a36Sopenharmony_ci	struct sdma_mmu_node *node =
45562306a36Sopenharmony_ci		container_of(mnode, struct sdma_mmu_node, rb);
45662306a36Sopenharmony_ci	struct evict_data *evict_data = evict_arg;
45762306a36Sopenharmony_ci
45862306a36Sopenharmony_ci	/* this node will be evicted, add its pages to our count */
45962306a36Sopenharmony_ci	evict_data->cleared += node->npages;
46062306a36Sopenharmony_ci
46162306a36Sopenharmony_ci	/* have enough pages been cleared? */
46262306a36Sopenharmony_ci	if (evict_data->cleared >= evict_data->target)
46362306a36Sopenharmony_ci		*stop = true;
46462306a36Sopenharmony_ci
46562306a36Sopenharmony_ci	return 1; /* remove this node */
46662306a36Sopenharmony_ci}
46762306a36Sopenharmony_ci
46862306a36Sopenharmony_cistatic void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
46962306a36Sopenharmony_ci{
47062306a36Sopenharmony_ci	struct sdma_mmu_node *node =
47162306a36Sopenharmony_ci		container_of(mnode, struct sdma_mmu_node, rb);
47262306a36Sopenharmony_ci
47362306a36Sopenharmony_ci	free_system_node(node);
47462306a36Sopenharmony_ci}
475