/kernel/linux/linux-5.10/arch/c6x/ |
H A D | Makefile | 2 # linux/arch/c6x/Makefile 30 head-y := arch/c6x/kernel/head.o 31 core-y += arch/c6x/kernel/ arch/c6x/mm/ arch/c6x/platforms/ 32 libs-y += arch/c6x/lib/ 37 boot := arch/$(ARCH)/boot 57 @echo ' vmlinux.bin - Binary kernel image (arch/$(ARCH)/boot/vmlinux.bin)' 58 @echo ' dtbImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
|
/kernel/linux/linux-5.10/arch/powerpc/ |
H A D | Makefile | 49 KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o 53 # There is a corresponding test in arch/powerpc/lib/Makefile 56 KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o 189 KBUILD_CPPFLAGS += -I $(srctree)/arch/$(ARCH) $(asinstr) 204 -T $(srctree)/arch/powerpc/platforms/44x/ppc476_modules.lds 240 head-$(CONFIG_PPC64) := arch/powerpc/kernel/head_64.o 241 head-$(CONFIG_PPC_BOOK3S_32) := arch/powerpc/kernel/head_book3s_32.o 242 head-$(CONFIG_PPC_8xx) := arch/powerpc/kernel/head_8xx.o 243 head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o 244 head-$(CONFIG_44x) := arch/powerp [all...] |
/kernel/linux/linux-6.6/arch/x86/kvm/mmu/ |
H A D | mmu.c | 252 .efer = vcpu->arch.efer, in vcpu_to_role_regs() 646 return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct; in is_tdp_mmu_active() 688 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, in mmu_topup_memory_caches() 692 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache, in mmu_topup_memory_caches() 697 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache, in mmu_topup_memory_caches() 702 return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, in mmu_topup_memory_caches() 708 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache); in mmu_free_memory_caches() 709 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache); in mmu_free_memory_caches() 710 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache); in mmu_free_memory_caches() 711 kvm_mmu_free_memory_cache(&vcpu->arch in mmu_free_memory_caches() 4224 struct kvm_arch_async_pf arch; kvm_arch_setup_async_pf() local [all...] |
/kernel/linux/linux-6.6/arch/arm64/kvm/vgic/ |
H A D | vgic-kvm-device.c | 38 if (kvm->arch.vgic.vgic_model != type_needed) in vgic_check_type() 46 struct vgic_dist *vgic = &kvm->arch.vgic; in kvm_set_legacy_vgic_v2_addr() 49 mutex_lock(&kvm->arch.config_lock); in kvm_set_legacy_vgic_v2_addr() 71 mutex_unlock(&kvm->arch.config_lock); in kvm_set_legacy_vgic_v2_addr() 94 struct vgic_dist *vgic = &kvm->arch.vgic; in kvm_vgic_addr() 189 mutex_lock(&kvm->arch.config_lock); in kvm_vgic_addr() 197 mutex_unlock(&kvm->arch.config_lock); in kvm_vgic_addr() 236 mutex_lock(&dev->kvm->arch.config_lock); in vgic_set_common_attr() 238 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis) in vgic_set_common_attr() 241 dev->kvm->arch in vgic_set_common_attr() [all...] |
H A D | vgic-v4.c | 98 raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock); in vgic_v4_doorbell_handler() 99 vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; in vgic_v4_doorbell_handler() 100 raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock); in vgic_v4_doorbell_handler() 117 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; in vgic_v4_enable_vsgis() 189 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_v4_configure_vsgis() 193 lockdep_assert_held(&kvm->arch.config_lock); in vgic_v4_configure_vsgis() 215 struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe; in vgic_v4_get_vlpi_state() 241 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_v4_init() 246 lockdep_assert_held(&kvm->arch.config_lock); in vgic_v4_init() 264 dist->its_vm.vpes[i] = &vcpu->arch in vgic_v4_init() [all...] |
/third_party/mesa3d/src/gallium/drivers/panfrost/ |
H A D | pan_screen.c | 105 bool has_mrt = (dev->arch >= 5); in panfrost_get_param() 202 return dev->arch >= 6 ? 320 : 310; in panfrost_get_param() 242 return dev->arch >= 6; in panfrost_get_param() 292 return dev->arch <= 5; in panfrost_get_param() 298 return dev->arch <= 7; in panfrost_get_param() 328 if (dev->arch <= 6) { in panfrost_get_param() 333 if (dev->arch >= 9) { in panfrost_get_param() 378 (dev->arch <= 5); in panfrost_get_shader_param() 416 return dev->arch >= 6; in panfrost_get_shader_param() 439 return dev->arch > in panfrost_get_shader_param() [all...] |
/kernel/linux/linux-5.10/arch/x86/kvm/ |
H A D | hyperv.h | 5 * derived from arch/x86/kvm/x86.c 55 return &vcpu->arch.hyperv; in vcpu_to_hv_vcpu() 60 struct kvm_vcpu_arch *arch; in hv_vcpu_to_vcpu() local 62 arch = container_of(hv_vcpu, struct kvm_vcpu_arch, hyperv); in hv_vcpu_to_vcpu() 63 return container_of(arch, struct kvm_vcpu, arch); in hv_vcpu_to_vcpu() 68 return &vcpu->arch.hyperv.synic; in vcpu_to_synic() 78 return &vcpu->kvm->arch.hyperv.hv_syndbg; in vcpu_to_hv_syndbg() 117 return !bitmap_empty(vcpu->arch.hyperv.stimer_pending_bitmap, in kvm_hv_has_stimer_pending()
|
/kernel/linux/linux-6.6/arch/s390/ |
H A D | Makefile | 63 cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include 84 KBUILD_LDFLAGS_MODULE += arch/s390/lib/expoline/expoline.o 121 libs-y += arch/s390/lib/ 123 boot := arch/s390/boot 124 syscalls := arch/s390/kernel/syscalls 125 tools := arch/s390/tools 142 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@ 159 $(Q)$(MAKE) $(build)=arch/s390/kernel/vdso64 include/generated/vdso64-offsets.h 161 $(build)=arch/s390/kernel/vdso32 include/generated/vdso32-offsets.h) 166 $(Q)$(MAKE) $(build)=arch/s39 [all...] |
/third_party/vk-gl-cts/scripts/build/ |
H A D | config.py | 130 def __init__(self, version, arch): 137 if arch == self.ARCH_64BIT: 139 elif arch == self.ARCH_32BIT: 142 if arch == self.ARCH_64BIT: 147 self.arch = arch 164 def registryKeyAvailable (root, arch, name): 166 key = _winreg.OpenKey(root, name, 0, _winreg.KEY_READ | arch) 175 if nativeArch == self.ARCH_32BIT and self.arch == self.ARCH_64BIT: 178 arch [all...] |
/third_party/vk-gl-cts/scripts/ctsbuild/ |
H A D | config.py | 130 def __init__(self, version, arch): 137 if arch == self.ARCH_64BIT: 139 elif arch == self.ARCH_32BIT: 142 if arch == self.ARCH_64BIT: 147 self.arch = arch 164 def registryKeyAvailable (root, arch, name): 166 key = _winreg.OpenKey(root, name, 0, _winreg.KEY_READ | arch) 175 if nativeArch == self.ARCH_32BIT and self.arch == self.ARCH_64BIT: 178 arch [all...] |
/kernel/linux/linux-5.10/arch/s390/kvm/ |
H A D | gaccess.c | 266 if (vcpu->arch.sie_block->eca & ECA_SII) { in ipte_lock_held() 269 read_lock(&vcpu->kvm->arch.sca_lock); in ipte_lock_held() 271 read_unlock(&vcpu->kvm->arch.sca_lock); in ipte_lock_held() 274 return vcpu->kvm->arch.ipte_lock_count != 0; in ipte_lock_held() 281 mutex_lock(&vcpu->kvm->arch.ipte_mutex); in ipte_lock_simple() 282 vcpu->kvm->arch.ipte_lock_count++; in ipte_lock_simple() 283 if (vcpu->kvm->arch.ipte_lock_count > 1) in ipte_lock_simple() 286 read_lock(&vcpu->kvm->arch.sca_lock); in ipte_lock_simple() 291 read_unlock(&vcpu->kvm->arch.sca_lock); in ipte_lock_simple() 298 read_unlock(&vcpu->kvm->arch in ipte_lock_simple() [all...] |
/kernel/linux/linux-5.10/arch/arm64/kvm/vgic/ |
H A D | vgic-v4.c | 98 raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock); in vgic_v4_doorbell_handler() 99 vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; in vgic_v4_doorbell_handler() 100 raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock); in vgic_v4_doorbell_handler() 117 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; in vgic_v4_enable_vsgis() 190 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_v4_configure_vsgis() 217 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_v4_init() 237 dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; in vgic_v4_init() 293 struct its_vm *its_vm = &kvm->arch.vgic.its_vm; in vgic_v4_teardown() 315 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; in vgic_v4_put() 325 struct its_vpe *vpe = &vcpu->arch in vgic_v4_load() [all...] |
/kernel/linux/linux-5.10/tools/perf/arch/x86/annotate/ |
H A D | instructions.c | 147 static bool x86__ins_is_fused(struct arch *arch, const char *ins1, in x86__ins_is_fused() argument 150 if (arch->family != 6 || arch->model < 0x1e || strstr(ins2, "jmp")) in x86__ins_is_fused() 153 if (arch->model == 0x1e) { in x86__ins_is_fused() 175 static int x86__cpuid_parse(struct arch *arch, char *cpuid) in x86__cpuid_parse() argument 185 arch->family = family; in x86__cpuid_parse() 186 arch->model = model; in x86__cpuid_parse() 193 static int x86__annotate_init(struct arch *arc argument [all...] |
/third_party/gn/src/gn/ |
H A D | args.cc | 352 const char* arch = nullptr; 358 arch = kX86; 360 arch = kX64; 362 arch = kArm64; 364 arch = kArm; 366 arch = kMips; 368 arch = kMips64; 370 arch = kS390X; 375 arch = kPPC64; 377 arch [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/scripts/ |
H A D | update.py | 174 for arch, defs in LLVM_TARGETS: 176 targets.add(arch) 247 ] + [path.join('lib', 'Target', arch) for arch, defs in LLVM_TARGETS] 268 for arch, defs in LLVM_TARGETS: 269 conds[arch] = ' || '.join('defined(' + v + ')' for v in defs) 294 arch = match.group(1) 295 print('#if ' + conds[arch], file=dst_file) 304 arch = match.group(3) 306 if arch [all...] |
/kernel/linux/linux-5.10/arch/powerpc/kexec/ |
H A D | ima.c | 132 * arch_ima_add_kexec_buffer - do arch-specific steps to add the IMA buffer 142 image->arch.ima_buffer_addr = load_addr; in arch_ima_add_kexec_buffer() 143 image->arch.ima_buffer_size = size; in arch_ima_add_kexec_buffer() 183 if (!image->arch.ima_buffer_size) in setup_ima_buffer() 195 ret = write_number(value, image->arch.ima_buffer_addr, addr_cells); in setup_ima_buffer() 199 ret = write_number(value + 4 * addr_cells, image->arch.ima_buffer_size, in setup_ima_buffer() 209 ret = fdt_add_mem_rsv(fdt, image->arch.ima_buffer_addr, in setup_ima_buffer() 210 image->arch.ima_buffer_size); in setup_ima_buffer() 215 image->arch.ima_buffer_addr, image->arch in setup_ima_buffer() [all...] |
/kernel/linux/linux-6.6/arch/powerpc/kvm/ |
H A D | book3s_hv_ras.c | 46 slb = vcpu->arch.slb_shadow.pinned_addr; in reload_slb() 52 if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end) in reload_slb() 71 unsigned long srr1 = vcpu->arch.shregs.msr; in kvmppc_realmode_mc_power7() 76 unsigned long dsisr = vcpu->arch.shregs.dsisr; in kvmppc_realmode_mc_power7() 86 tlbiel_all_lpid(vcpu->kvm->arch.radix); in kvmppc_realmode_mc_power7() 103 tlbiel_all_lpid(vcpu->kvm->arch.radix); in kvmppc_realmode_mc_power7() 117 if (vcpu->kvm->arch.fwnmi_enabled) { in kvmppc_realmode_machine_check() 137 vcpu->arch.mce_evt = mce_evt; in kvmppc_realmode_machine_check() 143 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_p9_realmode_hmi_handler()
|
/kernel/linux/linux-6.6/arch/riscv/kvm/ |
H A D | vcpu_vector.c | 21 unsigned long *isa = vcpu->arch.isa; in kvm_riscv_vcpu_vector_reset() 22 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_vector_reset() 80 vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL); in kvm_riscv_vcpu_alloc_vector_context() 81 if (!vcpu->arch.host_context.vector.datap) in kvm_riscv_vcpu_alloc_vector_context() 89 kfree(vcpu->arch.guest_reset_context.vector.datap); in kvm_riscv_vcpu_free_vector_context() 90 kfree(vcpu->arch.host_context.vector.datap); in kvm_riscv_vcpu_free_vector_context() 99 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_vreg_addr() 137 unsigned long *isa = vcpu->arch.isa; in kvm_riscv_vcpu_get_reg_vector() 163 unsigned long *isa = vcpu->arch.isa; in kvm_riscv_vcpu_set_reg_vector()
|
H A D | vcpu_fp.c | 19 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_fp_reset() 22 if (riscv_isa_extension_available(vcpu->arch.isa, f) || in kvm_riscv_vcpu_fp_reset() 23 riscv_isa_extension_available(vcpu->arch.isa, d)) in kvm_riscv_vcpu_fp_reset() 81 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_get_reg_fp() 90 riscv_isa_extension_available(vcpu->arch.isa, f)) { in kvm_riscv_vcpu_get_reg_fp() 101 riscv_isa_extension_available(vcpu->arch.isa, d)) { in kvm_riscv_vcpu_get_reg_fp() 126 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_fp() 135 riscv_isa_extension_available(vcpu->arch.isa, f)) { in kvm_riscv_vcpu_set_reg_fp() 146 riscv_isa_extension_available(vcpu->arch.isa, d)) { in kvm_riscv_vcpu_set_reg_fp()
|
/third_party/python/PCbuild/ |
H A D | prepare_ssl.py | 115 def prep(arch): 118 if arch == "x86": 122 elif arch == "amd64": 127 raise ValueError('Unrecognized platform: %s' % arch) 160 arch = sys.argv[2] if len(sys.argv) >= 3 else None 194 if arch: 195 prep(arch) 197 for arch in ['amd64', 'x86']: 198 prep(arch)
|
/kernel/linux/linux-6.6/arch/mips/ |
H A D | Makefile | 15 $(Q)$(MAKE) $(build)=arch/mips/tools elf-entry 17 $(Q)$(MAKE) $(build)=arch/mips/tools loongson3-llsc-check 19 $(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs 266 libs-$(CONFIG_FW_ARC) += arch/mips/fw/arc/ 267 libs-$(CONFIG_FW_CFE) += arch/mips/fw/cfe/ 268 libs-$(CONFIG_FW_SNIPROM) += arch/mips/fw/sni/ 269 libs-y += arch/mips/fw/lib/ 283 include $(srctree)/arch/mips/Kbuild.platforms 289 entry-y = $(shell $(objtree)/arch/mips/tools/elf-entry vmlinux) 290 cflags-y += -I$(srctree)/arch/mip [all...] |
/kernel/linux/linux-6.6/arch/s390/kvm/ |
H A D | gaccess.c | 270 read_lock(&kvm->arch.sca_lock); in ipte_lock_held() 272 read_unlock(&kvm->arch.sca_lock); in ipte_lock_held() 275 return kvm->arch.ipte_lock_count != 0; in ipte_lock_held() 282 mutex_lock(&kvm->arch.ipte_mutex); in ipte_lock_simple() 283 kvm->arch.ipte_lock_count++; in ipte_lock_simple() 284 if (kvm->arch.ipte_lock_count > 1) in ipte_lock_simple() 287 read_lock(&kvm->arch.sca_lock); in ipte_lock_simple() 292 read_unlock(&kvm->arch.sca_lock); in ipte_lock_simple() 299 read_unlock(&kvm->arch.sca_lock); in ipte_lock_simple() 301 mutex_unlock(&kvm->arch in ipte_lock_simple() [all...] |
/kernel/linux/linux-5.10/arch/loongarch/kvm/ |
H A D | csr.c | 25 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_emu_read_csr() 49 csrid, vcpu->arch.pc); in _kvm_emu_read_csr() 65 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_emu_write_csr() 90 csrid, vcpu->arch.pc); in _kvm_emu_write_csr() 104 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_emu_xchg_csr() 123 csrid, vcpu->arch.pc); in _kvm_emu_xchg_csr() 128 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_getcsr() 208 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_setcsr() 309 if (addr == kvm->arch.iocsr[i].addr) in _kvm_find_iocsr() 310 return &kvm->arch in _kvm_find_iocsr() [all...] |
/kernel/linux/linux-6.6/arch/arm64/kvm/hyp/include/hyp/ |
H A D | switch.h | 44 return vcpu->arch.fp_state == FP_STATE_GUEST_OWNED; in guest_owns_fp_regs() 197 vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); in __activate_traps_common() 198 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); in __activate_traps_common() 219 write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2); in __deactivate_traps_common() 238 u64 hcr = vcpu->arch.hcr_el2; in ___activate_traps() 246 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); in ___activate_traps() 257 if (vcpu->arch.hcr_el2 & HCR_VSE) { in ___deactivate_traps() 258 vcpu->arch.hcr_el2 &= ~HCR_VSE; in ___deactivate_traps() 259 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE; in ___deactivate_traps() 265 return __get_fault_info(vcpu->arch in __populate_fault_info() [all...] |
/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
H A D | book3s_xive.c | 75 if (!tima || !vcpu->arch.xive_cam_word) in kvmppc_xive_push_vcpu() 79 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS); in kvmppc_xive_push_vcpu() 80 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2); in kvmppc_xive_push_vcpu() 81 vcpu->arch.xive_pushed = 1; in kvmppc_xive_push_vcpu() 91 vcpu->arch.irq_pending = 0; in kvmppc_xive_push_vcpu() 97 if (vcpu->arch.xive_esc_on) { in kvmppc_xive_push_vcpu() 98 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + in kvmppc_xive_push_vcpu() 125 vcpu->arch.xive_esc_on = 0; in kvmppc_xive_push_vcpu() 153 vcpu->arch.irq_pending = 1; in xive_esc_irq() 155 if (vcpu->arch in xive_esc_irq() [all...] |