/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvif/ |
H A D | fifo.c | 32 struct nv_device_info_v1_data runlist[64]; in nvif_fifo_runlists() member 37 if (device->runlist) in nvif_fifo_runlists() 45 for (i = 0; i < ARRAY_SIZE(a->v.runlist); i++) { in nvif_fifo_runlists() 46 a->v.runlist[i].mthd = NV_DEVICE_HOST_RUNLIST_ENGINES; in nvif_fifo_runlists() 47 a->v.runlist[i].data = i; in nvif_fifo_runlists() 55 device->runlist = kcalloc(device->runlists, sizeof(*device->runlist), in nvif_fifo_runlists() 57 if (!device->runlist) { in nvif_fifo_runlists() 63 if (a->v.runlist[i].mthd != NV_DEVICE_INFO_INVALID) in nvif_fifo_runlists() 64 device->runlist[ in nvif_fifo_runlists() [all...] |
H A D | device.c | 45 kfree(device->runlist); in nvif_device_dtor() 46 device->runlist = NULL; in nvif_device_dtor() 56 device->runlist = NULL; in nvif_device_ctor()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvif/ |
H A D | fifo.c | 32 struct nv_device_info_v1_data runlist[64]; in nvif_fifo_runlists() member 37 if (device->runlist) in nvif_fifo_runlists() 45 for (i = 0; i < ARRAY_SIZE(a->v.runlist); i++) in nvif_fifo_runlists() 46 a->v.runlist[i].mthd = NV_DEVICE_FIFO_RUNLIST_ENGINES(i); in nvif_fifo_runlists() 53 device->runlist = kcalloc(device->runlists, sizeof(*device->runlist), in nvif_fifo_runlists() 55 if (!device->runlist) { in nvif_fifo_runlists() 62 device->runlist[i].engines = a->v.runlist[i].data; in nvif_fifo_runlists() 93 if (device->runlist[ in nvif_fifo_runlist() [all...] |
H A D | device.c | 45 kfree(device->runlist); in nvif_device_dtor() 46 device->runlist = NULL; in nvif_device_dtor() 56 device->runlist = NULL; in nvif_device_ctor()
|
/kernel/linux/linux-5.10/fs/ntfs/ |
H A D | runlist.h | 3 * runlist.h - Defines for runlist handling in NTFS Linux kernel driver. 35 * runlist - in memory vcn to lcn mapping array including a read/write lock 36 * @rl: pointer to an array of runlist elements 43 } runlist; typedef 45 static inline void ntfs_init_runlist(runlist *rl) in ntfs_init_runlist() 81 runlist *const runlist, const s64 new_length); 83 int ntfs_rl_punch_nolock(const ntfs_volume *vol, runlist *const runlist, [all...] |
H A D | runlist.c | 3 * runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project. 16 * ntfs_rl_mm - runlist memmove 18 * It is up to the caller to serialize access to the runlist @base. 28 * ntfs_rl_mc - runlist memory copy 42 * @rl: original runlist 43 * @old_size: number of runlist elements in the original runlist @rl 44 * @new_size: number of runlist elements we need space for 50 * It is up to the caller to serialize access to the runlist 1485 ntfs_rl_truncate_nolock(const ntfs_volume *vol, runlist *const runlist, const s64 new_length) ntfs_rl_truncate_nolock() argument 1630 ntfs_rl_punch_nolock(const ntfs_volume *vol, runlist *const runlist, const VCN start, const s64 length) ntfs_rl_punch_nolock() argument [all...] |
H A D | attrib.c | 25 * ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode 26 * @ni: ntfs inode for which to map (part of) a runlist 27 * @vcn: map runlist part containing this vcn 30 * Map the part of a runlist containing the @vcn of the ntfs inode @ni. 34 * runlist fragments and allows their mapping. If you do not have the mft 51 * of bounds of the runlist. 53 * Note the runlist can be NULL after this function returns if @vcn is zero and 54 * the attribute has zero allocated size, i.e. there simply is no runlist. 63 * Locking: - The runlist described by @ni must be locked for writing on entry 64 * and is locked on return. Note the runlist wil 720 load_attribute_list(ntfs_volume *vol, runlist *runlist, u8 *al_start, const s64 size, const s64 initialized_size) load_attribute_list() argument [all...] |
H A D | aops.c | 25 #include "runlist.h" 185 /* $MFT/$DATA must have its complete runlist in memory at all times. */ in ntfs_read_block() 186 BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni)); in ntfs_read_block() 208 * on due to the runlist being incomplete and if the page is being in ntfs_read_block() 248 down_read(&ni->runlist.lock); in ntfs_read_block() 249 rl = ni->runlist.rl; in ntfs_read_block() 275 /* If first try and runlist unmapped, map and retry. */ in ntfs_read_block() 279 * Attempt to map runlist, dropping lock for in ntfs_read_block() 282 up_read(&ni->runlist.lock); in ntfs_read_block() 288 up_read(&ni->runlist in ntfs_read_block() [all...] |
H A D | mft.c | 537 runlist.lock); in ntfs_sync_mft_mirror() 538 rl = NTFS_I(vol->mftmirr_ino)->runlist.rl; in ntfs_sync_mft_mirror() 540 * $MFTMirr always has the whole of its runlist in ntfs_sync_mft_mirror() 574 up_read(&NTFS_I(vol->mftmirr_ino)->runlist.lock); in ntfs_sync_mft_mirror() 729 down_read(&NTFS_I(vol->mft_ino)->runlist.lock); in write_mft_record_nolock() 730 rl = NTFS_I(vol->mft_ino)->runlist.rl; in write_mft_record_nolock() 761 up_read(&NTFS_I(vol->mft_ino)->runlist.lock); in write_mft_record_nolock() 1268 * - This function takes NTFS_I(vol->mftbmp_ino)->runlist.lock for 1300 down_write(&mftbmp_ni->runlist.lock); in ntfs_mft_bitmap_extend_allocation_nolock() 1307 up_write(&mftbmp_ni->runlist in ntfs_mft_bitmap_extend_allocation_nolock() [all...] |
H A D | inode.h | 24 #include "runlist.h" 55 runlist runlist; /* If state has the NI_NonResident bit set, member 56 the runlist of the unnamed data attribute 60 If runlist.rl is NULL, the runlist has not 66 runlist.rl is always NULL.*/ 87 runlist attr_list_rl; /* Run list for the attribute list value. */
|
H A D | file.c | 261 * determined from the runlist. This would greatly reduce the in ntfs_attr_extend_initialized() 700 * cache, there is no need to check the runlist again. The in ntfs_prepare_pages_for_non_resident_write() 830 down_read(&ni->runlist.lock); in ntfs_prepare_pages_for_non_resident_write() 832 rl = ni->runlist.rl; in ntfs_prepare_pages_for_non_resident_write() 854 * runlist as the map cache will be used from in ntfs_prepare_pages_for_non_resident_write() 859 up_write(&ni->runlist.lock); in ntfs_prepare_pages_for_non_resident_write() 862 up_read(&ni->runlist.lock); in ntfs_prepare_pages_for_non_resident_write() 870 * If it is not a hole and not out of bounds, the runlist is in ntfs_prepare_pages_for_non_resident_write() 875 /* Attempt to map runlist. */ in ntfs_prepare_pages_for_non_resident_write() 878 * We need the runlist locke in ntfs_prepare_pages_for_non_resident_write() [all...] |
/kernel/linux/linux-6.6/fs/ntfs/ |
H A D | runlist.h | 3 * runlist.h - Defines for runlist handling in NTFS Linux kernel driver. 35 * runlist - in memory vcn to lcn mapping array including a read/write lock 36 * @rl: pointer to an array of runlist elements 43 } runlist; typedef 45 static inline void ntfs_init_runlist(runlist *rl) in ntfs_init_runlist() 81 runlist *const runlist, const s64 new_length); 83 int ntfs_rl_punch_nolock(const ntfs_volume *vol, runlist *const runlist, [all...] |
H A D | runlist.c | 3 * runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project. 16 * ntfs_rl_mm - runlist memmove 18 * It is up to the caller to serialize access to the runlist @base. 28 * ntfs_rl_mc - runlist memory copy 42 * @rl: original runlist 43 * @old_size: number of runlist elements in the original runlist @rl 44 * @new_size: number of runlist elements we need space for 50 * It is up to the caller to serialize access to the runlist 1485 ntfs_rl_truncate_nolock(const ntfs_volume *vol, runlist *const runlist, const s64 new_length) ntfs_rl_truncate_nolock() argument 1630 ntfs_rl_punch_nolock(const ntfs_volume *vol, runlist *const runlist, const VCN start, const s64 length) ntfs_rl_punch_nolock() argument [all...] |
H A D | attrib.c | 25 * ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode 26 * @ni: ntfs inode for which to map (part of) a runlist 27 * @vcn: map runlist part containing this vcn 30 * Map the part of a runlist containing the @vcn of the ntfs inode @ni. 34 * runlist fragments and allows their mapping. If you do not have the mft 51 * of bounds of the runlist. 53 * Note the runlist can be NULL after this function returns if @vcn is zero and 54 * the attribute has zero allocated size, i.e. there simply is no runlist. 63 * Locking: - The runlist described by @ni must be locked for writing on entry 64 * and is locked on return. Note the runlist wil 720 load_attribute_list(ntfs_volume *vol, runlist *runlist, u8 *al_start, const s64 size, const s64 initialized_size) load_attribute_list() argument [all...] |
H A D | aops.c | 25 #include "runlist.h" 185 /* $MFT/$DATA must have its complete runlist in memory at all times. */ in ntfs_read_block() 186 BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni)); in ntfs_read_block() 208 * on due to the runlist being incomplete and if the page is being in ntfs_read_block() 248 down_read(&ni->runlist.lock); in ntfs_read_block() 249 rl = ni->runlist.rl; in ntfs_read_block() 275 /* If first try and runlist unmapped, map and retry. */ in ntfs_read_block() 279 * Attempt to map runlist, dropping lock for in ntfs_read_block() 282 up_read(&ni->runlist.lock); in ntfs_read_block() 288 up_read(&ni->runlist in ntfs_read_block() [all...] |
H A D | mft.c | 537 runlist.lock); in ntfs_sync_mft_mirror() 538 rl = NTFS_I(vol->mftmirr_ino)->runlist.rl; in ntfs_sync_mft_mirror() 540 * $MFTMirr always has the whole of its runlist in ntfs_sync_mft_mirror() 574 up_read(&NTFS_I(vol->mftmirr_ino)->runlist.lock); in ntfs_sync_mft_mirror() 729 down_read(&NTFS_I(vol->mft_ino)->runlist.lock); in write_mft_record_nolock() 730 rl = NTFS_I(vol->mft_ino)->runlist.rl; in write_mft_record_nolock() 761 up_read(&NTFS_I(vol->mft_ino)->runlist.lock); in write_mft_record_nolock() 1268 * - This function takes NTFS_I(vol->mftbmp_ino)->runlist.lock for 1300 down_write(&mftbmp_ni->runlist.lock); in ntfs_mft_bitmap_extend_allocation_nolock() 1307 up_write(&mftbmp_ni->runlist in ntfs_mft_bitmap_extend_allocation_nolock() [all...] |
H A D | inode.h | 24 #include "runlist.h" 55 runlist runlist; /* If state has the NI_NonResident bit set, member 56 the runlist of the unnamed data attribute 60 If runlist.rl is NULL, the runlist has not 66 runlist.rl is always NULL.*/ 87 runlist attr_list_rl; /* Run list for the attribute list value. */
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ |
H A D | gpfifogv100.c | 46 /* Block runlist to prevent the channel from being rescheduled. */ in gv100_fifo_gpfifo_engine_valid() 59 /* Resume runlist. */ in gv100_fifo_gpfifo_engine_valid() 131 int runlist = ffs(*runlists) -1, ret, i; in gv100_fifo_gpfifo_new_() local 137 if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr) in gv100_fifo_gpfifo_new_() 139 *runlists = BIT_ULL(runlist); in gv100_fifo_gpfifo_new_() 141 engm = fifo->runlist[runlist].engm; in gv100_fifo_gpfifo_new_() 152 chan->runl = runlist; in gv100_fifo_gpfifo_new_() 237 "runlist in gv100_fifo_gpfifo_new() [all...] |
H A D | nv50.c | 36 cur = fifo->runlist[fifo->cur_runlist]; in nv50_fifo_runlist_update_locked() 67 false, &fifo->runlist[0]); in nv50_fifo_oneinit() 72 false, &fifo->runlist[1]); in nv50_fifo_oneinit() 103 nvkm_memory_unref(&fifo->runlist[1]); in nv50_fifo_dtor() 104 nvkm_memory_unref(&fifo->runlist[0]); in nv50_fifo_dtor()
|
H A D | gk104.c | 175 nvkm_error(subdev, "runlist %d update timeout\n", runl); in gk104_fifo_runlist_commit() 181 const struct gk104_fifo_runlist_func *func = fifo->func->runlist; in gk104_fifo_runlist_update() 189 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; in gk104_fifo_runlist_update() 190 fifo->runlist[runl].next = !fifo->runlist[runl].next; in gk104_fifo_runlist_update() 193 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { in gk104_fifo_runlist_update() 197 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { in gk104_fifo_runlist_update() 229 list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp); in gk104_fifo_runlist_insert() 232 list_add_tail(&chan->head, &fifo->runlist[cha in gk104_fifo_runlist_insert() [all...] |
H A D | gpfifogk104.c | 249 int runlist = ffs(*runlists) -1, ret, i; in gk104_fifo_gpfifo_new_() local 254 if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr) in gk104_fifo_gpfifo_new_() 256 *runlists = BIT_ULL(runlist); in gk104_fifo_gpfifo_new_() 258 engm = fifo->runlist[runlist].engm; in gk104_fifo_gpfifo_new_() 272 chan->runl = runlist; in gk104_fifo_gpfifo_new_() 343 "runlist %016llx priv %d\n", in gk104_fifo_gpfifo_new() 345 args->v0.ilength, args->v0.runlist, args->v0.priv); in gk104_fifo_gpfifo_new() 349 &args->v0.runlist, in gk104_fifo_gpfifo_new() [all...] |
H A D | gf100.c | 61 cur = fifo->runlist.mem[fifo->runlist.active]; in gf100_fifo_runlist_commit() 62 fifo->runlist.active = !fifo->runlist.active; in gf100_fifo_runlist_commit() 85 if (wait_event_timeout(fifo->runlist.wait, in gf100_fifo_runlist_commit() 88 nvkm_error(subdev, "runlist update timeout\n"); in gf100_fifo_runlist_commit() 450 wake_up(&fifo->runlist.wait); in gf100_fifo_intr_runlist() 589 false, &fifo->runlist.mem[0]); in gf100_fifo_oneinit() 594 false, &fifo->runlist.mem[1]); in gf100_fifo_oneinit() 598 init_waitqueue_head(&fifo->runlist in gf100_fifo_oneinit() [all...] |
H A D | gpfifotu102.c | 65 "runlist %016llx priv %d\n", in tu102_fifo_gpfifo_new() 67 args->v0.ilength, args->v0.runlist, args->v0.priv); in tu102_fifo_gpfifo_new() 71 &args->v0.runlist, in tu102_fifo_gpfifo_new()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvkm/subdev/top/ |
H A D | ga100.c | 57 info->runlist = (data & 0x00fffc00); in ga100_top_parse() 88 "runlist %6x engine %2d reset %2d\n", type, inst, in ga100_top_parse() 90 info->addr, info->fault, info->runlist < 0 ? 0 : info->runlist, in ga100_top_parse()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/top/ |
H A D | base.c | 35 info->runlist = -1; in nvkm_top_device_new() 150 if (info->engine >= 0 && info->runlist >= 0 && n++ == index) { in nvkm_top_engine() 151 *runl = info->runlist; in nvkm_top_engine()
|