Lines Matching refs:ctx

47 	struct cxl_context *ctx;
82 if (!(ctx = cxl_context_alloc())) {
87 rc = cxl_context_init(ctx, afu, master);
91 cxl_context_set_mapping(ctx, inode->i_mapping);
93 pr_devel("afu_open pe: %i\n", ctx->pe);
94 file->private_data = ctx;
119 struct cxl_context *ctx = file->private_data;
122 __func__, ctx->pe);
123 cxl_context_detach(ctx);
130 if (!ctx->kernelapi) {
131 mutex_lock(&ctx->mapping_lock);
132 ctx->mapping = NULL;
133 mutex_unlock(&ctx->mapping_lock);
142 cxl_context_free(ctx);
147 static long afu_ioctl_start_work(struct cxl_context *ctx,
154 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
161 mutex_lock(&ctx->status_mutex);
162 if (ctx->status != OPENED) {
179 work.num_interrupts = ctx->afu->pp_irqs;
180 else if ((work.num_interrupts < ctx->afu->pp_irqs) ||
181 (work.num_interrupts > ctx->afu->irqs_max)) {
186 if ((rc = afu_register_irqs(ctx, work.num_interrupts)))
193 ctx->assign_tidr = true;
195 ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
201 rc = cxl_adapter_context_get(ctx->afu->adapter);
203 afu_release_irqs(ctx, ctx);
216 ctx->pid = get_task_pid(current, PIDTYPE_PID);
219 ctx->mm = get_task_mm(current);
222 cxl_context_mm_count_get(ctx);
224 if (ctx->mm) {
226 mmput(ctx->mm);
228 mm_context_add_copro(ctx->mm);
251 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
253 if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
255 afu_release_irqs(ctx, ctx);
256 cxl_adapter_context_put(ctx->afu->adapter);
257 put_pid(ctx->pid);
258 ctx->pid = NULL;
260 cxl_context_mm_count_put(ctx);
261 if (ctx->mm)
262 mm_context_remove_copro(ctx->mm);
268 work.tid = ctx->tidr;
273 ctx->status = STARTED;
276 mutex_unlock(&ctx->status_mutex);
280 static long afu_ioctl_process_element(struct cxl_context *ctx,
283 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
285 if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32)))
291 static long afu_ioctl_get_afu_id(struct cxl_context *ctx,
296 afuid.card_id = ctx->afu->adapter->adapter_num;
297 afuid.afu_offset = ctx->afu->slice;
298 afuid.afu_mode = ctx->afu->current_mode;
301 if (ctx->afu->current_mode == CXL_MODE_DIRECTED && !ctx->master)
312 struct cxl_context *ctx = file->private_data;
314 if (ctx->status == CLOSED)
317 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
323 return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg);
325 return afu_ioctl_process_element(ctx, (__u32 __user *)arg);
327 return afu_ioctl_get_afu_id(ctx, (struct cxl_afu_id __user *)
341 struct cxl_context *ctx = file->private_data;
344 if (ctx->status != STARTED)
347 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
350 return cxl_context_iomap(ctx, vm);
353 static inline bool ctx_event_pending(struct cxl_context *ctx)
355 if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err)
358 if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events))
366 struct cxl_context *ctx = file->private_data;
371 poll_wait(file, &ctx->wq, poll);
373 pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
375 spin_lock_irqsave(&ctx->lock, flags);
376 if (ctx_event_pending(ctx))
378 else if (ctx->status == CLOSED)
382 spin_unlock_irqrestore(&ctx->lock, flags);
384 pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask);
389 static ssize_t afu_driver_event_copy(struct cxl_context *ctx,
396 ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
403 ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
409 ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
416 ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
420 ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */
427 struct cxl_context *ctx = file->private_data;
434 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
440 spin_lock_irqsave(&ctx->lock, flags);
443 prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
444 if (ctx_event_pending(ctx) || (ctx->status == CLOSED))
447 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
462 spin_unlock_irqrestore(&ctx->lock, flags);
466 spin_lock_irqsave(&ctx->lock, flags);
469 finish_wait(&ctx->wq, &wait);
472 event.header.process_element = ctx->pe;
474 if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) {
476 pl = ctx->afu_driver_ops->fetch_event(ctx);
477 atomic_dec(&ctx->afu_driver_events);
479 } else if (ctx->pending_irq) {
483 event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1;
484 clear_bit(event.irq.irq - 1, ctx->irq_bitmap);
485 if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count))
486 ctx->pending_irq = false;
487 } else if (ctx->pending_fault) {
491 event.fault.addr = ctx->fault_addr;
492 event.fault.dsisr = ctx->fault_dsisr;
493 ctx->pending_fault = false;
494 } else if (ctx->pending_afu_err) {
498 event.afu_error.error = ctx->afu_err;
499 ctx->pending_afu_err = false;
500 } else if (ctx->status == CLOSED) {
502 spin_unlock_irqrestore(&ctx->lock, flags);
507 spin_unlock_irqrestore(&ctx->lock, flags);
510 return afu_driver_event_copy(ctx, buf, &event, pl);
517 finish_wait(&ctx->wq, &wait);
518 spin_unlock_irqrestore(&ctx->lock, flags);