Lines Matching defs:gru
20 #include "gru.h"
27 .name = "gru"
38 * Select a gru fault map to be used by the current cpu. Note that
84 static int gru_wrap_asid(struct gru_state *gru)
86 gru_dbg(grudev, "gid %d\n", gru->gs_gid);
88 gru->gs_asid_gen++;
93 static int gru_reset_asid_limit(struct gru_state *gru, int asid)
97 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
101 asid = gru_wrap_asid(gru);
102 gru_flush_all_tlb(gru);
103 gid = gru->gs_gid;
106 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
108 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
110 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms,
121 asid = gru_wrap_asid(gru);
129 gru->gs_asid_limit = limit;
130 gru->gs_asid = asid;
131 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid,
137 static int gru_assign_asid(struct gru_state *gru)
141 gru->gs_asid += ASID_INC;
142 asid = gru->gs_asid;
143 if (asid >= gru->gs_asid_limit)
144 asid = gru_reset_asid_limit(gru, asid);
146 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
172 unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
175 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU,
179 unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
182 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU,
186 static void reserve_gru_resources(struct gru_state *gru,
189 gru->gs_active_contexts++;
191 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count,
194 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL);
197 static void free_gru_resources(struct gru_state *gru,
200 gru->gs_active_contexts--;
201 gru->gs_cbr_map |= gts->ts_cbr_map;
202 gru->gs_dsr_map |= gts->ts_dsr_map;
212 static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
215 return hweight64(gru->gs_cbr_map) >= cbr_au_count
216 && hweight64(gru->gs_dsr_map) >= dsr_au_count
217 && gru->gs_active_contexts < max_active_contexts;
224 static int gru_load_mm_tracker(struct gru_state *gru,
228 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
235 spin_lock(&gru->gs_asid_lock);
237 gru->gs_asid_gen)) {
238 asid = gru_assign_asid(gru);
240 asids->mt_asid_gen = gru->gs_asid_gen;
245 spin_unlock(&gru->gs_asid_lock);
249 if (!test_bit(gru->gs_gid, gms->ms_asidmap))
250 __set_bit(gru->gs_gid, gms->ms_asidmap);
255 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
260 static void gru_unload_mm_tracker(struct gru_state *gru,
267 asids = &gms->ms_asids[gru->gs_gid];
270 spin_lock(&gru->gs_asid_lock);
274 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
275 spin_unlock(&gru->gs_asid_lock);
426 struct gru_state *gru;
428 gru = gts->ts_gru;
429 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
431 spin_lock(&gru->gs_lock);
432 gru->gs_gts[gts->ts_ctxnum] = NULL;
433 free_gru_resources(gru, gts);
434 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0);
435 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
439 spin_unlock(&gru->gs_lock);
544 struct gru_state *gru = gts->ts_gru;
550 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
559 gru_unload_mm_tracker(gru, gts);
561 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
580 struct gru_state *gru = gts->ts_gru;
584 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
613 asid = gru_load_mm_tracker(gru, gts);
628 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
648 struct gru_state *gru = gts->ts_gru;
651 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
655 if (gru->gs_gts[gts->ts_ctxnum] != gts)
699 static int gru_check_chiplet_assignment(struct gru_state *gru,
710 return gru->gs_blade_id == blade_id &&
711 (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id);
715 * Unload the gru context if it is not assigned to the correct blade or
721 struct gru_state *gru;
729 gru = gts->ts_gru;
731 * If gru or gts->ts_tgid_owner isn't initialized properly, return
733 * gru context.The caller is responsible for their inspection and
736 if (!gru || gts->ts_tgid_owner != current->tgid)
739 if (!gru_check_chiplet_assignment(gru, gts)) {
782 struct gru_state *gru, *gru0;
797 gru = blade->bs_lru_gru;
799 gru = next_gru(blade, gru);
800 blade->bs_lru_gru = gru;
803 gru0 = gru;
805 if (gru_check_chiplet_assignment(gru, gts)) {
806 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
808 spin_lock(&gru->gs_lock);
810 if (flag && gru == gru0 && ctxnum == ctxnum0)
812 ngts = gru->gs_gts[ctxnum];
823 spin_unlock(&gru->gs_lock);
824 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
827 if (flag && gru == gru0)
831 gru = next_gru(blade, gru);
846 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
847 hweight64(gru->gs_dsr_map));
851 * Assign a gru context.
853 static int gru_assign_context_number(struct gru_state *gru)
857 ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
858 __set_bit(ctxnum, &gru->gs_context_map);
867 struct gru_state *gru, *grux;
874 gru = NULL;
882 gru = grux;
889 if (gru) {
890 spin_lock(&gru->gs_lock);
891 if (!check_gru_resources(gru, gts->ts_cbr_au_count,
893 spin_unlock(&gru->gs_lock);
896 reserve_gru_resources(gru, gts);
897 gts->ts_gru = gru;
898 gts->ts_blade = gru->gs_blade_id;
899 gts->ts_ctxnum = gru_assign_context_number(gru);
901 gru->gs_gts[gts->ts_ctxnum] = gts;
902 spin_unlock(&gru->gs_lock);
915 return gru;
923 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.