Lines Matching defs:hdev
17 static long _hl_cs_wait_ioctl(struct hl_device *hdev,
25 struct hl_device *hdev = hw_sob->hdev;
27 hdev->asic_funcs->reset_sob(hdev, hw_sob);
34 struct hl_device *hdev = hw_sob->hdev;
36 dev_crit(hdev->dev,
47 struct hl_device *hdev = hl_cs_cmpl->hdev;
58 dev_dbg(hdev->dev,
123 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
132 !hdev->mmu_enable));
148 struct hl_device *hdev = hpriv->hdev;
165 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
167 if (is_cb_patched(hdev, job)) {
195 static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
199 if (is_cb_patched(hdev, job)) {
200 hl_userptr_delete_list(hdev, &job->userptr_list);
220 job->is_kernel_allocated_cb && hdev->mmu_enable) {
236 hl_debugfs_remove_job(hdev, job);
245 static void cs_counters_aggregate(struct hl_device *hdev, struct hl_ctx *ctx)
247 hdev->aggregated_cs_counters.device_in_reset_drop_cnt +=
249 hdev->aggregated_cs_counters.out_of_mem_drop_cnt +=
251 hdev->aggregated_cs_counters.parsing_drop_cnt +=
253 hdev->aggregated_cs_counters.queue_full_drop_cnt +=
255 hdev->aggregated_cs_counters.max_cs_in_flight_drop_cnt +=
263 struct hl_device *hdev = cs->ctx->hdev;
277 free_job(hdev, job);
281 hdev->asic_funcs->hw_queues_lock(hdev);
283 hdev->cs_active_cnt--;
284 if (!hdev->cs_active_cnt) {
287 ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++];
290 if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE)
291 hdev->idle_busy_ts_idx = 0;
292 } else if (hdev->cs_active_cnt < 0) {
293 dev_crit(hdev->dev, "CS active cnt %d is negative\n",
294 hdev->cs_active_cnt);
297 hdev->asic_funcs->hw_queues_unlock(hdev);
301 spin_lock(&hdev->hw_queues_mirror_lock);
304 spin_unlock(&hdev->hw_queues_mirror_lock);
311 (hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) {
317 spin_lock(&hdev->hw_queues_mirror_lock);
321 &hdev->hw_queues_mirror_list,
327 hdev->timeout_jiffies);
330 spin_unlock(&hdev->hw_queues_mirror_lock);
361 cs_counters_aggregate(hdev, cs->ctx);
369 struct hl_device *hdev;
385 hdev = cs->ctx->hdev;
387 dev_err(hdev->dev,
393 if (hdev->reset_on_lockup)
394 hl_device_reset(hdev, false, false);
397 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
424 cs_cmpl->hdev = hdev;
433 (hdev->asic_prop.max_pending_cs - 1)];
436 dev_dbg_ratelimited(hdev->dev,
443 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
456 (hdev->asic_prop.max_pending_cs - 1)] =
478 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
483 free_job(hdev, job);
486 void hl_cs_rollback_all(struct hl_device *hdev)
492 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
493 flush_workqueue(hdev->cq_wq[i]);
496 list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
500 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
502 cs_rollback(hdev, cs);
512 struct hl_device *hdev = cs->ctx->hdev;
515 free_job(hdev, job);
518 static int validate_queue_index(struct hl_device *hdev,
523 struct asic_fixed_properties *asic = &hdev->asic_prop;
530 dev_err(hdev->dev, "Queue index %d is invalid\n",
538 dev_err(hdev->dev, "Queue index %d is invalid\n",
544 dev_err(hdev->dev,
556 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
565 cb = hl_cb_get(hdev, cb_mgr, cb_handle);
567 dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
572 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
587 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
599 if (is_cb_patched(hdev, job))
611 struct hl_device *hdev = hpriv->hdev;
623 dev_err(hdev->dev,
639 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
645 hl_ctx_get(hdev, hpriv->ctx);
647 rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, &cs);
663 rc = validate_queue_index(hdev, chunk, &queue_type,
671 cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
684 job = hl_cs_allocate_job(hdev, queue_type,
688 dev_err(hdev->dev, "Failed to allocate a new job\n");
716 hl_debugfs_add_job(hdev, job);
721 dev_err(hdev->dev,
730 dev_err(hdev->dev,
740 dev_err(hdev->dev,
755 cs_rollback(hdev, cs);
771 struct hl_device *hdev = hpriv->hdev;
787 dev_err(hdev->dev,
803 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
811 if (chunk->queue_index >= hdev->asic_prop.max_queues) {
812 dev_err(hdev->dev, "Queue index %d is invalid\n",
819 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
822 if ((q_idx >= hdev->asic_prop.max_queues) ||
824 dev_err(hdev->dev, "Queue index %d is invalid\n", q_idx);
836 dev_err(hdev->dev,
855 dev_err(hdev->dev,
865 dev_err(hdev->dev,
882 dev_err(hdev->dev,
899 hl_ctx_get(hdev, ctx);
901 rc = allocate_cs(hdev, ctx, cs_type, &cs);
920 job = hl_cs_allocate_job(hdev, q_type, true);
923 dev_err(hdev->dev, "Failed to allocate a new job\n");
929 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
931 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
933 cb = hl_cb_kernel_create(hdev, cb_size,
934 q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
957 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
966 hl_debugfs_add_job(hdev, job);
971 dev_err(hdev->dev,
981 cs_rollback(hdev, cs);
998 struct hl_device *hdev = hpriv->hdev;
1008 if (hl_device_disabled_or_in_reset(hdev)) {
1009 dev_warn_ratelimited(hdev->dev,
1011 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
1019 dev_err(hdev->dev,
1027 (!hdev->supports_sync_stream))) {
1028 dev_err(hdev->dev, "Sync stream CS is not supported\n");
1045 dev_err(hdev->dev,
1052 dev_err(hdev->dev,
1071 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1073 dev_err_ratelimited(hdev->dev,
1092 hdev->asic_funcs->restore_phase_topology(hdev);
1095 dev_dbg(hdev->dev,
1106 dev_err(hdev->dev,
1114 ret = _hl_cs_wait_ioctl(hdev, ctx,
1115 jiffies_to_usecs(hdev->timeout_jiffies),
1118 dev_err(hdev->dev,
1130 rc = hl_poll_timeout_memory(hdev,
1132 100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1135 dev_err(hdev->dev,
1156 hl_device_reset(hdev, false, false);
1161 static long _hl_cs_wait_ioctl(struct hl_device *hdev,
1173 hl_ctx_get(hdev, ctx);
1179 dev_notice_ratelimited(hdev->dev,
1196 dev_dbg(hdev->dev,
1209 struct hl_device *hdev = hpriv->hdev;
1214 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq);
1220 dev_err_ratelimited(hdev->dev,
1226 dev_err_ratelimited(hdev->dev,
1231 dev_err_ratelimited(hdev->dev,