Lines Matching defs:pfault

445 				      struct mlx5_pagefault *pfault,
448 int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
449 pfault->wqe.wq_num : pfault->token;
454 MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
455 MLX5_SET(page_fault_resume_in, in, token, pfault->token);
1041 * @pfault contains page fault information.
1056 struct mlx5_pagefault *pfault,
1097 pfault->bytes_committed);
1104 if (inline_segment || bcnt <= pfault->bytes_committed) {
1105 pfault->bytes_committed -=
1107 pfault->bytes_committed);
1113 &pfault->bytes_committed,
1128 struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
1132 u16 wqe_index = pfault->wqe.wqe_index;
1267 struct mlx5_pagefault *pfault)
1269 bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
1270 u16 wqe_index = pfault->wqe.wqe_index;
1279 res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
1281 mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
1288 pfault->type);
1306 dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
1328 ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped,
1345 ret, wqe_index, pfault->token);
1348 mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
1350 pfault->wqe.wq_num, resume_with_error,
1351 pfault->type);
1363 struct mlx5_pagefault *pfault)
1367 u32 prefetch_len = pfault->bytes_committed;
1369 u32 rkey = pfault->rdma.r_key;
1374 * (and uses the pfault context), and then (after resuming the QP)
1375 * prefetches more pages. The second operation cannot use the pfault
1378 pfault->rdma.rdma_va += pfault->bytes_committed;
1379 pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
1380 pfault->rdma.rdma_op_len);
1381 pfault->bytes_committed = 0;
1383 address = pfault->rdma.rdma_va;
1384 length = pfault->rdma.rdma_op_len;
1391 length = pfault->rdma.packet_size;
1396 &pfault->bytes_committed, NULL);
1401 mlx5_ib_page_fault_resume(dev, pfault, 1);
1404 ret, pfault->token, pfault->type);
1408 mlx5_ib_page_fault_resume(dev, pfault, 0);
1410 pfault->token, pfault->type,
1426 ret, pfault->token, address, prefetch_len);
1431 static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
1433 u8 event_subtype = pfault->event_subtype;
1437 mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
1440 mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
1445 mlx5_ib_page_fault_resume(dev, pfault, 1);
1451 struct mlx5_pagefault *pfault = container_of(work,
1454 struct mlx5_ib_pf_eq *eq = pfault->eq;
1456 mlx5_ib_pfault(eq->dev, pfault);
1457 mempool_free(pfault, eq->pool);
1463 struct mlx5_pagefault *pfault;
1468 pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
1469 if (!pfault) {
1475 pfault->event_subtype = eqe->sub_type;
1476 pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
1480 eqe->sub_type, pfault->bytes_committed);
1485 pfault->type =
1487 pfault->token =
1490 pfault->rdma.r_key =
1492 pfault->rdma.packet_size =
1494 pfault->rdma.rdma_op_len =
1496 pfault->rdma.rdma_va =
1500 pfault->type, pfault->token,
1501 pfault->rdma.r_key);
1504 pfault->rdma.rdma_op_len,
1505 pfault->rdma.rdma_va);
1510 pfault->type =
1512 pfault->token =
1514 pfault->wqe.wq_num =
1517 pfault->wqe.wqe_index =
1519 pfault->wqe.packet_size =
1523 pfault->type, pfault->token,
1524 pfault->wqe.wq_num,
1525 pfault->wqe.wqe_index);
1537 pfault->eq = eq;
1538 INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
1539 queue_work(eq->wq, &pfault->work);