Lines Matching defs:hdev
43 struct hl_device *hdev = cs->ctx->hdev;
47 if (hdev->disabled)
50 q = &hdev->kernel_queues[0];
51 for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) {
60 * @hdev: pointer to habanalabs device structure
73 static void ext_and_hw_queue_submit_bd(struct hl_device *hdev,
85 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
91 * @hdev : pointer to hl_device structure
106 static int ext_queue_sanity_checks(struct hl_device *hdev,
111 &hdev->completion_queue[q->cq_id].free_slots_cnt;
118 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
132 dev_dbg(hdev->dev, "No space for %d on CQ %d\n",
145 * @hdev : pointer to hl_device structure
155 static int int_queue_sanity_checks(struct hl_device *hdev,
162 dev_err(hdev->dev,
172 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
182 * @hdev: Pointer to hl_device structure.
190 static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
199 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
210 * @hdev: pointer to hl_device structure
218 int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
221 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
233 hdev->asic_funcs->hw_queues_lock(hdev);
235 if (hdev->disabled) {
246 rc = ext_queue_sanity_checks(hdev, q, 1, false);
251 ext_and_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
255 hdev->asic_funcs->hw_queues_unlock(hdev);
270 struct hl_device *hdev = job->cs->ctx->hdev;
271 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
303 cq = &hdev->completion_queue[q->cq_id];
306 hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
316 ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
329 struct hl_device *hdev = job->cs->ctx->hdev;
330 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
343 hdev->asic_funcs->pqe_write(hdev, pi, &bd);
345 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
358 struct hl_device *hdev = job->cs->ctx->hdev;
359 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
369 offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1);
388 ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
400 struct hl_device *hdev = ctx->hdev;
413 hw_queue = &hdev->kernel_queues[q_idx];
421 dev_dbg(hdev->dev,
425 hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
444 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
458 dev_dbg(hdev->dev,
463 hdev->asic_funcs->gen_wait_cb(hdev, job->patched_cb,
488 struct hl_device *hdev = ctx->hdev;
494 hdev->asic_funcs->hw_queues_lock(hdev);
496 if (hl_device_disabled_or_in_reset(hdev)) {
498 dev_err(hdev->dev,
504 max_queues = hdev->asic_prop.max_queues;
506 q = &hdev->kernel_queues[0];
511 rc = ext_queue_sanity_checks(hdev, q,
515 rc = int_queue_sanity_checks(hdev, q,
519 rc = hw_queue_sanity_checks(hdev, q,
523 dev_err(hdev->dev, "Queue type %d is invalid\n",
542 spin_lock(&hdev->hw_queues_mirror_lock);
543 list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list);
546 if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
547 (list_first_entry(&hdev->hw_queues_mirror_list,
550 schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
551 spin_unlock(&hdev->hw_queues_mirror_lock);
553 spin_unlock(&hdev->hw_queues_mirror_lock);
556 if (!hdev->cs_active_cnt++) {
559 ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx];
584 q = &hdev->kernel_queues[0];
589 &hdev->completion_queue[i].free_slots_cnt;
596 hdev->asic_funcs->hw_queues_unlock(hdev);
604 * @hdev: pointer to hl_device structure
607 void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
609 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
614 static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
621 p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
625 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
638 dev_err(hdev->dev,
653 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
657 hdev->asic_funcs->asic_dma_free_coherent(hdev,
665 static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
669 p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id,
672 dev_err(hdev->dev,
685 static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
687 return ext_and_cpu_queue_init(hdev, q, true);
690 static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
692 return ext_and_cpu_queue_init(hdev, q, false);
695 static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
699 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
715 static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx)
717 struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
718 struct asic_fixed_properties *prop = &hdev->asic_prop;
720 int sob, queue_idx = hdev->sync_stream_queue_idx++;
731 hw_sob->hdev = hdev;
738 static void sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx)
740 struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
754 * @hdev: pointer to hl_device device structure
761 static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
770 rc = ext_queue_init(hdev, q);
773 rc = int_queue_init(hdev, q);
776 rc = cpu_queue_init(hdev, q);
779 rc = hw_queue_init(hdev, q);
785 dev_crit(hdev->dev, "wrong queue type %d during init\n",
792 sync_stream_queue_init(hdev, q->hw_queue_id);
805 * @hdev: pointer to hl_device device structure
810 static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
839 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
843 hdev->asic_funcs->asic_dma_free_coherent(hdev,
849 int hl_hw_queues_create(struct hl_device *hdev)
851 struct asic_fixed_properties *asic = &hdev->asic_prop;
855 hdev->kernel_queues = kcalloc(asic->max_queues,
856 sizeof(*hdev->kernel_queues), GFP_KERNEL);
858 if (!hdev->kernel_queues) {
859 dev_err(hdev->dev, "Not enough memory for H/W queues\n");
864 for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
870 rc = queue_init(hdev, q, i);
872 dev_err(hdev->dev,
881 for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
882 queue_fini(hdev, q);
884 kfree(hdev->kernel_queues);
889 void hl_hw_queues_destroy(struct hl_device *hdev)
892 u32 max_queues = hdev->asic_prop.max_queues;
895 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++)
896 queue_fini(hdev, q);
898 kfree(hdev->kernel_queues);
901 void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
904 u32 max_queues = hdev->asic_prop.max_queues;
907 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) {
915 sync_stream_queue_reset(hdev, q->hw_queue_id);