Lines Matching refs:gts
59 * Find and lock the gts that contains the specified user vaddr.
62 * - *gts with the mmap_lock locked for read and the GTS locked.
70 struct gru_thread_state *gts = NULL;
75 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
76 if (gts)
77 mutex_lock(>s->ts_ctxlock);
80 return gts;
87 struct gru_thread_state *gts = ERR_PTR(-EINVAL);
94 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
95 if (IS_ERR(gts))
97 mutex_lock(>s->ts_ctxlock);
99 return gts;
103 return gts;
109 static void gru_unlock_gts(struct gru_thread_state *gts)
111 mutex_unlock(>s->ts_ctxlock);
252 static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
255 struct mm_struct *mm = gts->ts_mm;
310 struct gru_thread_state *gts, int atomic,
332 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
337 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
338 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
356 struct gru_thread_state *gts,
361 unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
410 if (atomic_read(>s->ts_gms->ms_range_active))
413 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
419 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
420 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
421 if (atomic || !gru_update_cch(gts)) {
422 gts->ts_force_cch_reload = 1;
428 gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
433 gts->ustats.tlbdropin++;
437 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
439 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
523 struct gru_thread_state *gts;
560 * The gts cannot change until a TFH start/writestart command
564 gts = gru->gs_gts[ctxnum];
567 if (!gts) {
576 gts->ustats.fmm_tlbmiss++;
577 if (!gts->ts_force_cch_reload &&
578 mmap_read_trylock(gts->ts_mm)) {
579 gru_try_dropin(gru, gts, tfh, NULL);
580 mmap_read_unlock(gts->ts_mm);
613 static int gru_user_dropin(struct gru_thread_state *gts,
617 struct gru_mm_struct *gms = gts->ts_gms;
620 gts->ustats.upm_tlbmiss++;
625 ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
640 struct gru_thread_state *gts;
652 gts = gru_find_lock_gts(cb);
653 if (!gts)
655 gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
657 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
660 if (gru_check_context_placement(gts)) {
661 gru_unlock_gts(gts);
662 gru_unload_context(gts, 1);
669 if (gts->ts_gru && gts->ts_force_cch_reload) {
670 gts->ts_force_cch_reload = 0;
671 gru_update_cch(gts);
675 cbrnum = thread_cbr_number(gts, ucbnum);
676 if (gts->ts_gru) {
677 tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
678 cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
679 gts->ts_ctxnum, ucbnum);
680 ret = gru_user_dropin(gts, tfh, cbk);
683 gru_unlock_gts(gts);
695 struct gru_thread_state *gts;
702 gts = gru_find_lock_gts(excdet.cb);
703 if (!gts)
706 gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
708 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
710 } else if (gts->ts_gru) {
711 cbrnum = thread_cbr_number(gts, ucbnum);
712 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
727 gru_unlock_gts(gts);
744 struct gru_thread_state *gts;
754 gts = gru->gs_gts[ctxnum];
755 if (gts && mutex_trylock(>s->ts_ctxlock)) {
757 gru_unload_context(gts, 1);
758 mutex_unlock(>s->ts_ctxlock);
769 struct gru_thread_state *gts;
781 gts = gru_find_lock_gts(req.gseg);
782 if (!gts)
785 if (gts->ts_gru)
786 gru_unload_context(gts, 1);
787 gru_unlock_gts(gts);
798 struct gru_thread_state *gts;
809 gts = gru_find_lock_gts(req.gseg);
810 if (!gts)
813 gms = gts->ts_gms;
814 gru_unlock_gts(gts);
825 struct gru_thread_state *gts;
833 * If no gts exists in the array, the context has never been used & all
836 gts = gru_find_lock_gts(req.gseg);
837 if (gts) {
838 memcpy(&req.stats, >s->ustats, sizeof(gts->ustats));
839 gru_unlock_gts(gts);
841 memset(&req.stats, 0, sizeof(gts->ustats));
856 struct gru_thread_state *gts;
865 gts = gru_find_lock_gts(req.gseg);
866 if (!gts) {
867 gts = gru_alloc_locked_gts(req.gseg);
868 if (IS_ERR(gts))
869 return PTR_ERR(gts);
880 gts->ts_user_blade_id = req.val1;
881 gts->ts_user_chiplet_id = req.val0;
882 if (gru_check_context_placement(gts)) {
883 gru_unlock_gts(gts);
884 gru_unload_context(gts, 1);
891 gts->ts_tgid_owner = current->tgid;
895 gts->ts_cch_req_slice = req.val1 & 3;
900 gru_unlock_gts(gts);