/third_party/mesa3d/src/gallium/drivers/radeonsi/ |
H A D | si_sdma_copy_image.c | 113 bool si_sdma_v4_v5_copy_texture(struct si_context *sctx, struct si_texture *sdst, struct si_texture *ssrc, bool is_v5) in si_sdma_v4_v5_copy_texture() argument 115 unsigned bpp = sdst->surface.bpe; in si_sdma_v4_v5_copy_texture() 116 uint64_t dst_address = sdst->buffer.gpu_address + sdst->surface.u.gfx9.surf_offset; in si_sdma_v4_v5_copy_texture() 118 unsigned dst_pitch = sdst->surface.u.gfx9.surf_pitch; in si_sdma_v4_v5_copy_texture() 124 assert (!tmz || (sdst->buffer.flags & RADEON_FLAG_ENCRYPTED)); in si_sdma_v4_v5_copy_texture() 127 if (ssrc->surface.is_linear && sdst->surface.is_linear) { in si_sdma_v4_v5_copy_texture() 136 dst_address += sdst->surface.u.gfx9.offset[0]; in si_sdma_v4_v5_copy_texture() 153 if (ssrc->surface.is_linear != sdst->surface.is_linear) { in si_sdma_v4_v5_copy_texture() 154 struct si_texture *tiled = ssrc->surface.is_linear ? sdst in si_sdma_v4_v5_copy_texture() 225 cik_sdma_copy_texture(struct si_context *sctx, struct si_texture *sdst, struct si_texture *ssrc) cik_sdma_copy_texture() argument [all...] |
H A D | si_cp_dma.c | 194 struct si_resource *sdst = si_resource(dst); in si_cp_dma_clear_buffer() local 195 uint64_t va = (sdst ? sdst->gpu_address : 0) + offset; in si_cp_dma_clear_buffer() 212 if (sdst) { in si_cp_dma_clear_buffer() 213 util_range_add(dst, &sdst->valid_buffer_range, offset, offset + size); in si_cp_dma_clear_buffer() 221 unsigned dma_flags = CP_DMA_CLEAR | (sdst ? 0 : CP_DMA_DST_IS_GDS); in si_cp_dma_clear_buffer() 233 if (sdst && cache_policy != L2_BYPASS) in si_cp_dma_clear_buffer() 234 sdst->TC_L2_dirty = true; in si_cp_dma_clear_buffer()
|
H A D | si_fence.c | 204 struct si_fence **sdst = (struct si_fence **)dst; in si_fence_reference() local 207 if (pipe_reference(&(*sdst)->reference, &ssrc->reference)) { in si_fence_reference() 208 ws->fence_reference(&(*sdst)->gfx, NULL); in si_fence_reference() 209 tc_unflushed_batch_token_reference(&(*sdst)->tc_token, NULL); in si_fence_reference() 210 si_resource_reference(&(*sdst)->fine.buf, NULL); in si_fence_reference() 211 FREE(*sdst); in si_fence_reference() 213 *sdst = ssrc; in si_fence_reference()
|
H A D | si_buffer.c | 285 struct si_resource *sdst = si_resource(dst); in si_replace_buffer_storage() local 288 radeon_bo_reference(sctx->screen->ws, &sdst->buf, ssrc->buf); in si_replace_buffer_storage() 289 sdst->gpu_address = ssrc->gpu_address; in si_replace_buffer_storage() 290 sdst->b.b.bind = ssrc->b.b.bind; in si_replace_buffer_storage() 291 sdst->flags = ssrc->flags; in si_replace_buffer_storage() 293 assert(sdst->memory_usage_kb == ssrc->memory_usage_kb); in si_replace_buffer_storage() 294 assert(sdst->bo_size == ssrc->bo_size); in si_replace_buffer_storage() 295 assert(sdst->bo_alignment_log2 == ssrc->bo_alignment_log2); in si_replace_buffer_storage() 296 assert(sdst->domains == ssrc->domains); in si_replace_buffer_storage()
|
H A D | si_test_image_copy_region.c | 506 struct si_texture *sdst; in si_test_image_copy_region() local 538 sdst = (struct si_texture *)dst; in si_test_image_copy_region() 542 print_image_attrs(sscreen, sdst); in si_test_image_copy_region() 559 si_clear_buffer(sctx, dst, 0, sdst->surface.surf_size, &zero, 4, SI_OP_SYNC_BEFORE_AFTER, in si_test_image_copy_region()
|
H A D | si_compute_blit.c | 613 struct si_texture *sdst = (struct si_texture*)dst; in si_compute_copy_image() local 627 vi_dcc_enabled(sdst, dst_level)) || in si_compute_copy_image() 634 bool is_linear = ssrc->surface.is_linear || sdst->surface.is_linear; in si_compute_copy_image() 640 !vi_dcc_enabled(sdst, dst_level) && in si_compute_copy_image()
|
H A D | si_blit.c | 1166 struct si_texture *sdst = (struct si_texture *)info->dst.resource; in si_blit() local 1169 (info->dst.resource->bind & PIPE_BIND_PRIME_BLIT_DST) && sdst->surface.is_linear && in si_blit() 1181 if (si_sdma_copy_image(sctx, sdst, ssrc)) in si_blit()
|
H A D | si_clear.c | 1264 struct si_texture *sdst = (struct si_texture *)dst->texture; in si_clear_render_target() local 1272 (sctx->gfx_level >= GFX10 || !vi_dcc_enabled(sdst, dst->u.tex.level))) { in si_clear_render_target()
|
/third_party/node/deps/openssl/openssl/providers/implementations/macs/ |
H A D | siphash_prov.c | 76 struct siphash_data_st *sdst; in siphash_dup() local 80 sdst = OPENSSL_malloc(sizeof(*sdst)); in siphash_dup() 81 if (sdst == NULL) in siphash_dup() 84 *sdst = *ssrc; in siphash_dup() 85 return sdst; in siphash_dup()
|
/third_party/openssl/providers/implementations/macs/ |
H A D | siphash_prov.c | 76 struct siphash_data_st *sdst; in siphash_dup() local 80 sdst = OPENSSL_malloc(sizeof(*sdst)); in siphash_dup() 81 if (sdst == NULL) in siphash_dup() 84 *sdst = *ssrc; in siphash_dup() 85 return sdst; in siphash_dup()
|
/third_party/icu/icu4j/main/classes/localespi/src/com/ibm/icu/impl/javaspi/util/ |
H A D | TimeZoneNameProviderICU.java | 35 String sdst = tznames.getDisplayName(canonicalID, NameType.SHORT_DAYLIGHT, date); in getDisplayName() 37 if (lstd != null && ldst != null && sstd != null && sdst != null) { in getDisplayName() 43 dispName = daylight ? sdst : sstd; in getDisplayName()
|
/third_party/ffmpeg/libavfilter/ |
H A D | af_crossfeed.c | 203 double *sdst = s->side[0] + s->block_samples; in filter_frame() local 211 sdst[n] = (src[0] - src[1]) * level_in * .5; in filter_frame() 214 sdst = s->side[1]; in filter_frame() 215 filter_samples(sdst, ssrc, s->block_samples, in filter_frame() 222 sdst = s->side[1] + s->block_samples; in filter_frame() 223 filter_samples(sdst, ssrc, s->block_samples, in filter_frame()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
H A D | SIPeepholeSDWA.cpp | 894 const MachineOperand *Sdst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst); in pseudoOpConvertToVOP2() 909 MachineOperand *CarryOut = TII->getNamedOperand(MISucc, AMDGPU::OpName::sdst); in pseudoOpConvertToVOP2() 959 const MachineOperand *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst); in isConvertibleToSDWA() 970 } else if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst) || in isConvertibleToSDWA() 1020 } else if ((Dst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst))) { in convertToSDWA() 1022 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1); in convertToSDWA() 1025 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1); in convertToSDWA() 1132 // We also expect a vdst, since sdst can't preserve. in convertToSDWA()
|
H A D | GCNHazardRecognizer.cpp | 960 SDSTName = AMDGPU::OpName::sdst; in fixSMEMtoVectorWriteHazards() 1052 if (TII->getNamedOperand(*MI, AMDGPU::OpName::sdst)) in fixVcmpxExecWARHazard()
|
H A D | SIShrinkInstructions.cpp | 757 AMDGPU::OpName::sdst); in runOnMachineFunction()
|
H A D | SILoadStoreOptimizer.cpp | 1204 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst); in mergeSBufferLoadImmPair() 1205 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::sdst); in mergeSBufferLoadImmPair()
|
H A D | SIInstrInfo.cpp | 462 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst); in shouldClusterMemOps() 463 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst); in shouldClusterMemOps() 3017 // Additional verification is needed for sdst/src2.
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/Disassembler/ |
H A D | AMDGPUDisassembler.cpp | 425 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1) in convertSDWAInst() 429 int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst); in convertSDWAInst() 431 // VOPC - insert VCC register as sdst in convertSDWAInst() 433 AMDGPU::OpName::sdst); in convertSDWAInst()
|
/third_party/mesa3d/src/gallium/drivers/nouveau/nv50/ |
H A D | nv50_surface.c | 974 struct ureg_dst sdst = ureg_writemask(data, TGSI_WRITEMASK_Y); in nv50_blitter_make_fp() local 999 ureg_I2F(ureg, sdst, ssrc); in nv50_blitter_make_fp()
|