Lines Matching defs:offset

403       int offset;
853 uint32_t offset =
855 __ Load(tmp, tmp.gp(), no_reg, offset, LoadType::kI32Load, pinned);
891 __ Spill(__ cache_state()->stack_state[local_index].offset(),
958 __ Spill(entry.offset, entry.reg, entry.kind);
974 // {total_frame_size} is the highest offset from the FP that is used to
975 // store a value. The offset of the first spill slot should therefore be
977 // don't start at offset '0' but at offset '-1' (or
1004 __ Fill(entry.reg, entry.offset, entry.kind);
1056 // A single breakpoint at offset 0 indicates stepping.
1103 // position exists, and that the offset to the return address is the
2255 uint32_t offset =
2258 __ Store(array.gp(), no_reg, offset, budget, StoreType::kI32Store, pinned);
2288 __ Fill(reg, local_slot.offset(), local_slot.kind());
2300 __ Fill(dst_slot->reg(), src_slot.offset(), kind);
2309 __ Fill(dst_reg, src_slot.offset(), kind);
2310 *dst_slot = LiftoffAssembler::VarState(kind, dst_reg, dst_slot->offset());
2356 LiftoffRegList* pinned, uint32_t* offset) {
2363 *offset = 0;
2366 *offset = global->offset;
2373 Register* offset) {
2381 wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(global->offset),
2384 // For the offset we need the index of the global in the buffer, and
2385 // then calculate the actual offset from the index. Load the index from
2392 *offset = imported_mutable_globals;
2393 __ Load(LiftoffRegister(*offset), imported_mutable_globals, no_reg,
2397 __ emit_i32_shli(*offset, *offset, kTaggedSizeLog2);
2398 __ emit_i32_addi(*offset, *offset,
2414 Register offset = no_reg;
2416 &base, &offset);
2417 __ LoadTaggedPointer(base, base, offset, 0, pinned);
2430 imm.global->offset),
2436 uint32_t offset = 0;
2437 Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
2441 __ Load(value, addr, no_reg, offset, type, pinned, nullptr, false);
2458 Register offset = no_reg;
2460 &base, &offset);
2461 __ StoreTaggedPointer(base, offset, 0, value, pinned);
2473 imm.global->offset),
2478 uint32_t offset = 0;
2479 Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
2482 __ Store(addr, no_reg, offset, reg, type, {}, nullptr, false);
2761 slot.offset(), slot.reg(), slot.kind()});
2762 __ RecordUsedSpillOffset(slot.offset());
2798 uint64_t offset, LiftoffRegister index,
2801 !base::IsInBounds<uintptr_t>(offset, access_size,
2848 uintptr_t end_offset = offset + access_size - 1u;
2858 // If the end offset is larger than the smallest memory, dynamically check
2859 // the end offset against the actual memory size, which is not known at
2878 uintptr_t offset, Register index,
2886 if ((offset & align_mask) == 0) {
2887 // If {offset} is aligned, we can produce faster code.
2896 // For alignment checks we only look at the lower 32-bits in {offset}.
2897 __ emit_i32_addi(address, index, static_cast<uint32_t>(offset));
2904 Register index, uintptr_t offset,
2911 // Get one register for computing the effective offset (offset + index).
2914 DCHECK_GE(kMaxUInt32, offset);
2915 __ LoadConstant(effective_offset, WasmValue(static_cast<uint32_t>(offset)));
2932 // Zero-extend the effective offset to u64.
2937 info.gp(), no_reg, offsetof(MemoryTracingInfo, offset), data,
2964 int access_size, uintptr_t* offset) {
2969 const uintptr_t effective_offset = index + *offset;
2977 *offset = effective_offset;
3002 uintptr_t offset = imm.offset;
3009 if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
3011 CODE_COMMENT("load from memory (constant offset)");
3015 __ Load(value, mem, no_reg, offset, type, pinned, nullptr, true,
3020 index = BoundsCheckMem(decoder, type.size(), offset, full_index, {},
3033 __ Load(value, mem, index, offset, type, pinned, &protected_load_pc, true,
3044 offset, decoder->position());
3064 Register index = BoundsCheckMem(decoder, access_size, imm.offset,
3068 uintptr_t offset = imm.offset;
3074 __ LoadTransform(value, addr, index, offset, type, transform,
3089 TraceMemoryOperation(false, mem_rep, index, offset, decoder->position());
3103 Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
3107 uintptr_t offset = imm.offset;
3114 __ LoadLane(result, value, addr, index, offset, type, laneidx,
3125 offset, decoder->position());
3138 uintptr_t offset = imm.offset;
3142 if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
3144 CODE_COMMENT("store to memory (constant offset)");
3146 __ Store(mem, no_reg, offset, value, type, pinned, nullptr, true);
3149 index = BoundsCheckMem(decoder, type.size(), imm.offset, full_index,
3161 __ Store(mem, index, offset, value, type, outer_pinned,
3170 TraceMemoryOperation(true, type.mem_rep(), index, offset,
3182 Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
3186 uintptr_t offset = imm.offset;
3191 __ StoreLane(addr, index, offset, value, type, lane, &protected_store_pc);
3197 TraceMemoryOperation(true, type.mem_rep(), index, offset,
3322 value.stack_offset = slot.offset();
4463 Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
4468 AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
4469 uintptr_t offset = imm.offset;
4474 __ AtomicStore(addr, index, offset, value, type, outer_pinned);
4476 TraceMemoryOperation(true, type.mem_rep(), index, offset,
4485 Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
4490 AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
4491 uintptr_t offset = imm.offset;
4496 __ AtomicLoad(value, addr, index, offset, type, pinned);
4501 offset, decoder->position());
4531 Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
4536 AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
4539 uintptr_t offset = imm.offset;
4542 (asm_.*emit_fn)(addr, index, offset, value, result, type);
4555 Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
4559 AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
4561 uintptr_t offset = imm.offset;
4580 __ AtomicCompareExchange(addr, no_reg, offset, expected, new_value, result,
4590 Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
4594 AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
4596 uintptr_t offset = imm.offset;
4601 __ AtomicCompareExchange(addr, index, offset, expected, new_value, result,
4635 BoundsCheckMem(decoder, value_kind_size(kind), imm.offset, full_index,
4639 AlignmentCheckMem(decoder, value_kind_size(kind), imm.offset, index_reg,
4642 uintptr_t offset = imm.offset;
4649 if (offset) {
4650 __ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
4682 Register index_reg = BoundsCheckMem(decoder, kInt32Size, imm.offset,
4686 AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
4688 uintptr_t offset = imm.offset;
4695 if (offset) {
4696 __ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
5203 int offset = StructFieldOffset(imm.struct_type, i);
5213 StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
5242 int offset = StructFieldOffset(struct_type, field.field_imm.index);
5248 LoadObjectField(value, obj.gp(), no_reg, offset, field_kind, is_signed,
5258 int offset = StructFieldOffset(struct_type, field.field_imm.index);
5263 StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
5308 LiftoffRegister offset = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
5310 offset,
5317 __ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp());
5320 __ emit_cond_jump(kUnsignedGreaterEqual, &done, kI32, offset.gp(),
5322 StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_kind);
5323 __ emit_i32_addi(offset.gp(), offset.gp(), elem_size);
5474 const Value& /* offset */, const Value& /* length */,
5487 __ cache_state()->stack_state.end()[-3], // offset
6318 int offset, ValueKind kind, bool is_signed,
6321 __ LoadTaggedPointer(dst.gp(), src, offset_reg, offset, pinned);
6325 __ Load(dst, src, offset_reg, offset, load_type, pinned);
6329 void StoreObjectField(Register obj, Register offset_reg, int offset,
6333 __ StoreTaggedPointer(obj, offset_reg, offset, value, pinned);
6337 __ Store(obj, offset_reg, offset, value, store_type, pinned);
6525 // The pc offset of the instructions to reserve the stack frame. Needed to
6530 // A single breakpoint at offset 0 indicates that we should prepare the