Lines Matching defs:cqp
356 * i40iw_get_cqp_request - get cqp struct
357 * @cqp: device cqp ptr
358 * @wait: cqp to be used in wait mode
360 struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait)
365 spin_lock_irqsave(&cqp->req_lock, flags);
366 if (!list_empty(&cqp->cqp_avail_reqs)) {
367 cqp_request = list_entry(cqp->cqp_avail_reqs.next,
371 spin_unlock_irqrestore(&cqp->req_lock, flags);
395 * i40iw_free_cqp_request - free cqp request
396 * @cqp: cqp ptr
397 * @cqp_request: to be put back in cqp list
399 void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
401 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
411 spin_lock_irqsave(&cqp->req_lock, flags);
412 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
413 spin_unlock_irqrestore(&cqp->req_lock, flags);
420 * @cqp: cqp ptr
421 * @cqp_request: to be put back in cqp list
423 void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
427 i40iw_free_cqp_request(cqp, cqp_request);
431 * i40iw_free_pending_cqp_request -free pending cqp request objs
432 * @cqp: cqp ptr
433 * @cqp_request: to be put back in cqp list
435 static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
438 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
445 i40iw_put_cqp_request(cqp, cqp_request);
452 * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
458 struct i40iw_cqp *cqp = &iwdev->cqp;
463 pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
464 wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
466 cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
468 i40iw_free_pending_cqp_request(cqp, cqp_request);
469 wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
476 i40iw_free_pending_cqp_request(cqp, cqp_request);
483 * @cqp_request: cqp request to wait
489 struct i40iw_cqp *iwcqp = &iwdev->cqp;
505 i40iw_pr_err("error cqp command 0x%x timed out", info->cqp_cmd);
515 i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n",
527 * i40iw_handle_cqp_op - process cqp command
529 * @cqp_request: cqp request to process
541 i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
547 i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
548 i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
785 * i40iw_cqp_sds_cmd - create cqp command for sd
787 * @sd_info: information for sd cqp
798 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
815 * i40iw_qp_suspend_resume - cqp command for suspend/resume
824 struct i40iw_sc_cqp *cqp = dev->cqp;
828 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
834 cqp_info->in.u.suspend_resume.cqp = cqp;
932 * i40iw_cqp_generic_worker - generic worker for cqp
977 ccq_cqe_info.cqp = NULL;
986 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
990 * i40iw_cqp_manage_hmc_fcn_callback - called function after cqp completion
991 * @cqp_request: cqp request info struct for hmc fun
1018 * i40iw_cqp_manage_hmc_fcn_cmd - issue cqp command to manage hmc
1031 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
1050 * i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm
1064 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1069 cqp_info->in.u.query_fpm_values.cqp = dev->cqp;
1097 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1102 cqp_info->in.u.commit_fpm_values.cqp = dev->cqp;
1143 * i40iw_cqp_cq_create_cmd - create a cq for the cqp
1151 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1173 * i40iw_cqp_qp_create_cmd - create a qp for the cqp
1181 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1210 * i40iw_cqp_cq_destroy_cmd - destroy the cqp cq
1222 * i40iw_cqp_qp_destroy_cmd - destroy the cqp
1229 struct i40iw_cqp *iwcqp = &iwdev->cqp;