Lines Matching refs:ctx

35 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
304 parser.ctx_id = job->cs->ctx->asid;
679 struct hl_device *hdev = cs->ctx->hdev;
754 /* Must be called before hl_ctx_put because inside we use ctx to get
781 hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence,
785 hl_ctx_put(cs->ctx);
818 hdev = cs->ctx->hdev;
880 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
897 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
903 hl_ctx_get(ctx);
905 cs->ctx = ctx;
926 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
939 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
950 spin_lock(&ctx->cs_lock);
952 cs_cmpl->cs_seq = ctx->cs_sequence;
953 other = ctx->cs_pending[cs_cmpl->cs_seq &
971 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
982 ctx->cs_pending[cs_cmpl->cs_seq &
985 ctx->cs_sequence++;
991 spin_unlock(&ctx->cs_lock);
998 spin_unlock(&ctx->cs_lock);
1004 hl_ctx_put(ctx);
1029 struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
1034 if (!ctx)
1037 mgr = &ctx->sig_mgr;
1043 hl_ctx_put(ctx);
1068 cs->ctx->asid, cs->sequence);
1150 struct hl_device *hdev = cs->ctx->hdev;
1159 struct hl_device *hdev = cs->ctx->hdev;
1329 struct hl_ctx *ctx = hpriv->ctx;
1357 ctx->asid);
1374 dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid);
1380 ctx->asid);
1390 struct hl_ctx *ctx)
1395 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1409 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1416 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1478 struct hl_ctx *ctx = hpriv->ctx;
1491 hpriv->ctx);
1501 rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
1531 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1540 &ctx->cs_counters.validation_drop_cnt);
1569 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1605 atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
1609 cs->ctx->asid, cs->sequence, job->id, rc);
1618 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1622 cs->ctx->asid, cs->sequence);
1642 cs->ctx->asid, cs->sequence, rc);
1671 struct hl_ctx *ctx = hpriv->ctx;
1680 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1686 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1690 ctx->asid, rc);
1726 ctx->asid, rc);
1734 ret = _hl_cs_wait_ioctl(hdev, ctx,
1745 ctx->asid, ret);
1752 ctx->thread_ctx_switch_wait_token = 1;
1754 } else if (hdev->supports_ctx_switch && !ctx->thread_ctx_switch_wait_token) {
1756 &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1866 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
1882 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1897 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1906 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1924 struct hl_ctx *ctx, struct hl_cs *cs,
1936 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1949 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
2036 hl_ctx_get(hpriv->ctx);
2037 handle->ctx = hpriv->ctx;
2038 mgr = &hpriv->ctx->sig_mgr;
2103 hl_ctx_put(handle->ctx);
2120 mgr = &hpriv->ctx->sig_mgr;
2168 hl_ctx_put(encaps_sig_hdl->ctx);
2200 struct hl_ctx *ctx = hpriv->ctx;
2210 ctx);
2218 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2231 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2242 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2251 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2269 ctx, cs_encaps_signals);
2280 spin_lock(&ctx->sig_mgr.lock);
2281 idp = &ctx->sig_mgr.handles;
2297 spin_unlock(&ctx->sig_mgr.lock);
2318 sig_fence = hl_ctx_get_fence(ctx, signal_seq);
2320 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2344 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2362 rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
2391 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
2394 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
2398 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2420 ctx->asid, cs->sequence, rc);
2642 static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
2645 struct hl_device *hdev = ctx->hdev;
2655 seq, ctx->cs_sequence);
2660 if (!hl_pop_cs_outcome(&ctx->outcome_store, seq, &timestamp_kt, &error)) {
2663 seq, ctx->cs_sequence);
2722 struct hl_device *hdev = mcs_data->ctx->hdev;
2731 rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
2773 rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence, &status, 0, NULL);
2843 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
2852 hl_ctx_get(ctx);
2854 fence = hl_ctx_get_fence(ctx, seq);
2856 rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
2858 hl_ctx_put(ctx);
2997 struct hl_ctx *ctx = hpriv->ctx;
3047 mcs_data.ctx = ctx;
3052 hl_ctx_get(ctx);
3106 hl_ctx_put(ctx);
3153 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, &status, &timestamp);
3281 static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
3297 hl_ctx_get(ctx);
3436 hl_ctx_put(ctx);
3445 hl_ctx_put(ctx);
3450 static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx,
3464 hl_ctx_get(ctx);
3468 hl_ctx_put(ctx);
3561 hl_ctx_put(ctx);
3615 rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr,
3623 rc = _hl_interrupt_wait_ioctl_user_addr(hdev, hpriv->ctx,