Lines Matching defs:hdev

43 	struct hl_device *hdev = cs->ctx->hdev;
47 if (hdev->disabled)
50 q = &hdev->kernel_queues[0];
53 if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW)
61 for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) {
70 * @hdev: pointer to habanalabs device structure
83 void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
95 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
101 * @hdev : pointer to hl_device structure
116 static int ext_queue_sanity_checks(struct hl_device *hdev,
121 &hdev->completion_queue[q->cq_id].free_slots_cnt;
128 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
142 dev_dbg(hdev->dev, "No space for %d on CQ %d\n",
155 * @hdev : pointer to hl_device structure
165 static int int_queue_sanity_checks(struct hl_device *hdev,
172 dev_err(hdev->dev,
182 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
192 * @hdev: Pointer to hl_device structure.
200 static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
209 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
220 * @hdev: pointer to hl_device structure
228 int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
231 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
234 hdev->asic_funcs->hw_queues_lock(hdev);
236 if (hdev->disabled) {
247 rc = ext_queue_sanity_checks(hdev, q, 1, false);
252 hl_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
255 hdev->asic_funcs->hw_queues_unlock(hdev);
270 struct hl_device *hdev = job->cs->ctx->hdev;
271 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
307 cq = &hdev->completion_queue[q->cq_id];
310 hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
322 hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
335 struct hl_device *hdev = job->cs->ctx->hdev;
336 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
356 hdev->asic_funcs->pqe_write(hdev, pi, &bd);
358 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
371 struct hl_device *hdev = job->cs->ctx->hdev;
372 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
382 offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1);
401 hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
404 static int init_signal_cs(struct hl_device *hdev,
413 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
419 dev_dbg(hdev->dev,
427 hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
430 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1,
439 void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
463 static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
472 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
483 hl_hw_queue_encaps_sig_set_sob_info(hdev, cs, job, cs_cmpl);
485 dev_dbg(hdev->dev, "Wait for encaps signals handle, qidx(%u), CS sequence(%llu), sob val: 0x%x, offset: %u\n",
519 dev_dbg(hdev->dev,
532 hdev->asic_funcs->gen_wait_cb(hdev, &wait_prop);
550 struct hl_device *hdev = ctx->hdev;
561 rc = init_signal_cs(hdev, job, cs_cmpl);
563 rc = init_wait_cs(hdev, cs, job, cs_cmpl);
569 (struct hl_device *hdev, struct hl_cs *cs)
603 dev_dbg(hdev->dev, "CS seq (%llu) added to encaps signal handler id (%u), count(%u), qidx(%u), sob(%u), val(%u)\n",
611 dev_err(hdev->dev, "encaps handle id(%u) wasn't found!\n",
630 struct hl_device *hdev = ctx->hdev;
637 cntr = &hdev->aggregated_cs_counters;
639 hdev->asic_funcs->hw_queues_lock(hdev);
641 if (!hl_device_operational(hdev, &status)) {
644 dev_err(hdev->dev,
645 "device is %s, CS rejected!\n", hdev->status[status]);
650 max_queues = hdev->asic_prop.max_queues;
652 q = &hdev->kernel_queues[0];
657 rc = ext_queue_sanity_checks(hdev, q,
663 rc = int_queue_sanity_checks(hdev, q,
667 rc = hw_queue_sanity_checks(hdev, q,
671 dev_err(hdev->dev, "Queue type %d is invalid\n",
694 rc = hdev->asic_funcs->collective_wait_init_cs(cs);
699 rc = hdev->asic_funcs->pre_schedule_cs(cs);
701 dev_err(hdev->dev,
707 hdev->shadow_cs_queue[cs->sequence &
708 (hdev->asic_prop.max_pending_cs - 1)] = cs;
711 rc = encaps_sig_first_staged_cs_handler(hdev, cs);
716 spin_lock(&hdev->cs_mirror_lock);
722 staged_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
724 dev_err(hdev->dev,
731 if (is_staged_cs_last_exists(hdev, staged_cs)) {
732 dev_err(hdev->dev,
742 if (hdev->supports_wait_for_multi_cs)
747 list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list);
750 first_entry = list_first_entry(&hdev->cs_mirror_list,
752 if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
759 spin_unlock(&hdev->cs_mirror_lock);
781 spin_unlock(&hdev->cs_mirror_lock);
783 q = &hdev->kernel_queues[0];
788 &hdev->completion_queue[i].free_slots_cnt;
795 hdev->asic_funcs->hw_queues_unlock(hdev);
803 * @hdev: pointer to hl_device structure
806 void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
808 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
813 static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
820 p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address);
822 p = hl_asic_dma_alloc_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address,
831 dev_err(hdev->dev,
846 hl_cpu_accessible_dma_pool_free(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address);
848 hl_asic_dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address,
854 static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
858 p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id,
861 dev_err(hdev->dev,
874 static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
876 return ext_and_cpu_queue_init(hdev, q, true);
879 static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
881 return ext_and_cpu_queue_init(hdev, q, false);
884 static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
888 p = hl_asic_dma_alloc_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address,
902 static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx)
905 struct asic_fixed_properties *prop = &hdev->asic_prop;
909 sync_stream_prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
916 if (hdev->kernel_queues[q_idx].collective_mode ==
918 reserved_mon_idx = hdev->collective_mon_idx;
928 hdev->collective_mon_idx += HL_COLLECTIVE_RSVD_MSTR_MONS;
929 } else if (hdev->kernel_queues[q_idx].collective_mode ==
931 reserved_mon_idx = hdev->collective_mon_idx++;
938 if (!hdev->kernel_queues[q_idx].supports_sync_stream)
941 queue_idx = hdev->sync_stream_queue_idx++;
952 hw_sob->hdev = hdev;
955 hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
961 static void sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx)
964 &hdev->kernel_queues[q_idx].sync_stream_prop;
978 * @hdev: pointer to hl_device device structure
985 static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
994 rc = ext_queue_init(hdev, q);
997 rc = int_queue_init(hdev, q);
1000 rc = cpu_queue_init(hdev, q);
1003 rc = hw_queue_init(hdev, q);
1009 dev_crit(hdev->dev, "wrong queue type %d during init\n",
1015 sync_stream_queue_init(hdev, q->hw_queue_id);
1028 * @hdev: pointer to hl_device device structure
1033 static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
1062 hl_cpu_accessible_dma_pool_free(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address);
1064 hl_asic_dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address,
1068 int hl_hw_queues_create(struct hl_device *hdev)
1070 struct asic_fixed_properties *asic = &hdev->asic_prop;
1074 hdev->kernel_queues = kcalloc(asic->max_queues,
1075 sizeof(*hdev->kernel_queues), GFP_KERNEL);
1077 if (!hdev->kernel_queues) {
1078 dev_err(hdev->dev, "Not enough memory for H/W queues\n");
1083 for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
1090 rc = queue_init(hdev, q, i);
1092 dev_err(hdev->dev,
1101 for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
1102 queue_fini(hdev, q);
1104 kfree(hdev->kernel_queues);
1109 void hl_hw_queues_destroy(struct hl_device *hdev)
1112 u32 max_queues = hdev->asic_prop.max_queues;
1115 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++)
1116 queue_fini(hdev, q);
1118 kfree(hdev->kernel_queues);
1121 void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
1124 u32 max_queues = hdev->asic_prop.max_queues;
1127 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) {
1135 sync_stream_queue_reset(hdev, q->hw_queue_id);