1 // Copyright 2014 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_ 6 #define V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_ 7 8 namespace v8 { 9 namespace internal { 10 namespace compiler { 11 12 // X64-specific opcodes that specify which assembly sequence to emit. 13 // Most opcodes specify a single instruction. 14 15 // Opcodes that support a MemoryAccessMode. 16 #define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \ 17 V(X64F64x2PromoteLowF32x4) \ 18 V(X64Movb) \ 19 V(X64Movdqu) \ 20 V(X64Movl) \ 21 V(X64Movq) \ 22 V(X64Movsd) \ 23 V(X64Movss) \ 24 V(X64Movsxbl) \ 25 V(X64Movsxbq) \ 26 V(X64Movsxlq) \ 27 V(X64Movsxwl) \ 28 V(X64Movsxwq) \ 29 V(X64Movw) \ 30 V(X64Movzxbl) \ 31 V(X64Movzxbq) \ 32 V(X64Movzxwl) \ 33 V(X64Movzxwq) \ 34 V(X64Pextrb) \ 35 V(X64Pextrw) \ 36 V(X64Pinsrb) \ 37 V(X64Pinsrd) \ 38 V(X64Pinsrq) \ 39 V(X64Pinsrw) \ 40 V(X64S128Load16Splat) \ 41 V(X64S128Load16x4S) \ 42 V(X64S128Load16x4U) \ 43 V(X64S128Load32Splat) \ 44 V(X64S128Load32x2S) \ 45 V(X64S128Load32x2U) \ 46 V(X64S128Load64Splat) \ 47 V(X64S128Load8Splat) \ 48 V(X64S128Load8x8S) \ 49 V(X64S128Load8x8U) \ 50 V(X64S128Store32Lane) \ 51 V(X64S128Store64Lane) 52 53 #define TARGET_ARCH_OPCODE_LIST(V) \ 54 TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \ 55 V(X64Add) \ 56 V(X64Add32) \ 57 V(X64And) \ 58 V(X64And32) \ 59 V(X64Cmp) \ 60 V(X64Cmp32) \ 61 V(X64Cmp16) \ 62 V(X64Cmp8) \ 63 V(X64Test) \ 64 V(X64Test32) \ 65 V(X64Test16) \ 66 V(X64Test8) \ 67 V(X64Or) \ 68 V(X64Or32) \ 69 V(X64Xor) \ 70 V(X64Xor32) \ 71 V(X64Sub) \ 72 V(X64Sub32) \ 73 V(X64Imul) \ 74 V(X64Imul32) \ 75 V(X64ImulHigh32) \ 76 V(X64UmulHigh32) \ 77 V(X64Idiv) \ 78 V(X64Idiv32) \ 79 V(X64Udiv) \ 80 V(X64Udiv32) \ 81 V(X64Not) \ 82 V(X64Not32) \ 83 V(X64Neg) \ 84 V(X64Neg32) \ 85 V(X64Shl) \ 86 V(X64Shl32) \ 87 V(X64Shr) \ 88 V(X64Shr32) \ 89 V(X64Sar) \ 90 V(X64Sar32) \ 91 V(X64Rol) \ 92 V(X64Rol32) \ 93 V(X64Ror) \ 94 V(X64Ror32) \ 95 V(X64Lzcnt) \ 96 V(X64Lzcnt32) \ 97 V(X64Tzcnt) \ 98 V(X64Tzcnt32) \ 99 V(X64Popcnt) \ 100 V(X64Popcnt32) \ 101 V(X64Bswap) \ 102 V(X64Bswap32) \ 103 V(X64MFence) \ 104 V(X64LFence) \ 105 V(SSEFloat32Cmp) \ 106 V(SSEFloat32Add) \ 107 V(SSEFloat32Sub) \ 108 V(SSEFloat32Mul) \ 109 V(SSEFloat32Div) \ 110 V(SSEFloat32Sqrt) \ 111 V(SSEFloat32ToFloat64) \ 112 V(SSEFloat32ToInt32) \ 113 V(SSEFloat32ToUint32) \ 114 V(SSEFloat32Round) \ 115 V(SSEFloat64Cmp) \ 116 V(SSEFloat64Add) \ 117 V(SSEFloat64Sub) \ 118 V(SSEFloat64Mul) \ 119 V(SSEFloat64Div) \ 120 V(SSEFloat64Mod) \ 121 V(SSEFloat64Sqrt) \ 122 V(SSEFloat64Round) \ 123 V(SSEFloat32Max) \ 124 V(SSEFloat64Max) \ 125 V(SSEFloat32Min) \ 126 V(SSEFloat64Min) \ 127 V(SSEFloat64ToFloat32) \ 128 V(SSEFloat64ToInt32) \ 129 V(SSEFloat64ToUint32) \ 130 V(SSEFloat32ToInt64) \ 131 V(SSEFloat64ToInt64) \ 132 V(SSEFloat32ToUint64) \ 133 V(SSEFloat64ToUint64) \ 134 V(SSEInt32ToFloat64) \ 135 V(SSEInt32ToFloat32) \ 136 V(SSEInt64ToFloat32) \ 137 V(SSEInt64ToFloat64) \ 138 V(SSEUint64ToFloat32) \ 139 V(SSEUint64ToFloat64) \ 140 V(SSEUint32ToFloat64) \ 141 V(SSEUint32ToFloat32) \ 142 V(SSEFloat64ExtractLowWord32) \ 143 V(SSEFloat64ExtractHighWord32) \ 144 V(SSEFloat64InsertLowWord32) \ 145 V(SSEFloat64InsertHighWord32) \ 146 V(SSEFloat64LoadLowWord32) \ 147 V(SSEFloat64SilenceNaN) \ 148 V(AVXFloat32Cmp) \ 149 V(AVXFloat32Add) \ 150 V(AVXFloat32Sub) \ 151 V(AVXFloat32Mul) \ 152 V(AVXFloat32Div) \ 153 V(AVXFloat64Cmp) \ 154 V(AVXFloat64Add) \ 155 V(AVXFloat64Sub) \ 156 V(AVXFloat64Mul) \ 157 V(AVXFloat64Div) \ 158 V(X64Float64Abs) \ 159 V(X64Float64Neg) \ 160 V(X64Float32Abs) \ 161 V(X64Float32Neg) \ 162 V(X64MovqDecompressTaggedSigned) \ 163 V(X64MovqDecompressTaggedPointer) \ 164 V(X64MovqDecompressAnyTagged) \ 165 V(X64MovqCompressTagged) \ 166 V(X64MovqEncodeSandboxedPointer) \ 167 V(X64MovqDecodeSandboxedPointer) \ 168 V(X64BitcastFI) \ 169 V(X64BitcastDL) \ 170 V(X64BitcastIF) \ 171 V(X64BitcastLD) \ 172 V(X64Lea32) \ 173 V(X64Lea) \ 174 V(X64Dec32) \ 175 V(X64Inc32) \ 176 V(X64Push) \ 177 V(X64Poke) \ 178 V(X64Peek) \ 179 V(X64Cvttps2dq) \ 180 V(X64Cvttpd2dq) \ 181 V(X64I32x4TruncF64x2UZero) \ 182 V(X64I32x4TruncF32x4U) \ 183 V(X64F64x2Splat) \ 184 V(X64F64x2ExtractLane) \ 185 V(X64F64x2ReplaceLane) \ 186 V(X64F64x2Abs) \ 187 V(X64F64x2Neg) \ 188 V(X64F64x2Sqrt) \ 189 V(X64F64x2Add) \ 190 V(X64F64x2Sub) \ 191 V(X64F64x2Mul) \ 192 V(X64F64x2Div) \ 193 V(X64F64x2Min) \ 194 V(X64F64x2Max) \ 195 V(X64F64x2Eq) \ 196 V(X64F64x2Ne) \ 197 V(X64F64x2Lt) \ 198 V(X64F64x2Le) \ 199 V(X64F64x2Qfma) \ 200 V(X64F64x2Qfms) \ 201 V(X64Minpd) \ 202 V(X64Maxpd) \ 203 V(X64F64x2Round) \ 204 V(X64F64x2ConvertLowI32x4S) \ 205 V(X64F64x2ConvertLowI32x4U) \ 206 V(X64F32x4Splat) \ 207 V(X64F32x4ExtractLane) \ 208 V(X64F32x4ReplaceLane) \ 209 V(X64F32x4SConvertI32x4) \ 210 V(X64F32x4UConvertI32x4) \ 211 V(X64F32x4Abs) \ 212 V(X64F32x4Neg) \ 213 V(X64F32x4Sqrt) \ 214 V(X64F32x4RecipApprox) \ 215 V(X64F32x4RecipSqrtApprox) \ 216 V(X64F32x4Add) \ 217 V(X64F32x4Sub) \ 218 V(X64F32x4Mul) \ 219 V(X64F32x4Div) \ 220 V(X64F32x4Min) \ 221 V(X64F32x4Max) \ 222 V(X64F32x4Eq) \ 223 V(X64F32x4Ne) \ 224 V(X64F32x4Lt) \ 225 V(X64F32x4Le) \ 226 V(X64F32x4Qfma) \ 227 V(X64F32x4Qfms) \ 228 V(X64Minps) \ 229 V(X64Maxps) \ 230 V(X64F32x4Round) \ 231 V(X64F32x4DemoteF64x2Zero) \ 232 V(X64I64x2Splat) \ 233 V(X64I64x2ExtractLane) \ 234 V(X64I64x2Abs) \ 235 V(X64I64x2Neg) \ 236 V(X64I64x2BitMask) \ 237 V(X64I64x2Shl) \ 238 V(X64I64x2ShrS) \ 239 V(X64I64x2Add) \ 240 V(X64I64x2Sub) \ 241 V(X64I64x2Mul) \ 242 V(X64I64x2Eq) \ 243 V(X64I64x2GtS) \ 244 V(X64I64x2GeS) \ 245 V(X64I64x2Ne) \ 246 V(X64I64x2ShrU) \ 247 V(X64I64x2ExtMulLowI32x4S) \ 248 V(X64I64x2ExtMulHighI32x4S) \ 249 V(X64I64x2ExtMulLowI32x4U) \ 250 V(X64I64x2ExtMulHighI32x4U) \ 251 V(X64I64x2SConvertI32x4Low) \ 252 V(X64I64x2SConvertI32x4High) \ 253 V(X64I64x2UConvertI32x4Low) \ 254 V(X64I64x2UConvertI32x4High) \ 255 V(X64I32x4Splat) \ 256 V(X64I32x4ExtractLane) \ 257 V(X64I32x4SConvertF32x4) \ 258 V(X64I32x4SConvertI16x8Low) \ 259 V(X64I32x4SConvertI16x8High) \ 260 V(X64I32x4Neg) \ 261 V(X64I32x4Shl) \ 262 V(X64I32x4ShrS) \ 263 V(X64I32x4Add) \ 264 V(X64I32x4Sub) \ 265 V(X64I32x4Mul) \ 266 V(X64I32x4MinS) \ 267 V(X64I32x4MaxS) \ 268 V(X64I32x4Eq) \ 269 V(X64I32x4Ne) \ 270 V(X64I32x4GtS) \ 271 V(X64I32x4GeS) \ 272 V(X64I32x4UConvertF32x4) \ 273 V(X64I32x4UConvertI16x8Low) \ 274 V(X64I32x4UConvertI16x8High) \ 275 V(X64I32x4ShrU) \ 276 V(X64I32x4MinU) \ 277 V(X64I32x4MaxU) \ 278 V(X64I32x4GtU) \ 279 V(X64I32x4GeU) \ 280 V(X64I32x4Abs) \ 281 V(X64I32x4BitMask) \ 282 V(X64I32x4DotI16x8S) \ 283 V(X64I32x4ExtMulLowI16x8S) \ 284 V(X64I32x4ExtMulHighI16x8S) \ 285 V(X64I32x4ExtMulLowI16x8U) \ 286 V(X64I32x4ExtMulHighI16x8U) \ 287 V(X64I32x4ExtAddPairwiseI16x8S) \ 288 V(X64I32x4ExtAddPairwiseI16x8U) \ 289 V(X64I32x4TruncSatF64x2SZero) \ 290 V(X64I32x4TruncSatF64x2UZero) \ 291 V(X64I16x8Splat) \ 292 V(X64I16x8ExtractLaneS) \ 293 V(X64I16x8SConvertI8x16Low) \ 294 V(X64I16x8SConvertI8x16High) \ 295 V(X64I16x8Neg) \ 296 V(X64I16x8Shl) \ 297 V(X64I16x8ShrS) \ 298 V(X64I16x8SConvertI32x4) \ 299 V(X64I16x8Add) \ 300 V(X64I16x8AddSatS) \ 301 V(X64I16x8Sub) \ 302 V(X64I16x8SubSatS) \ 303 V(X64I16x8Mul) \ 304 V(X64I16x8MinS) \ 305 V(X64I16x8MaxS) \ 306 V(X64I16x8Eq) \ 307 V(X64I16x8Ne) \ 308 V(X64I16x8GtS) \ 309 V(X64I16x8GeS) \ 310 V(X64I16x8UConvertI8x16Low) \ 311 V(X64I16x8UConvertI8x16High) \ 312 V(X64I16x8ShrU) \ 313 V(X64I16x8UConvertI32x4) \ 314 V(X64I16x8AddSatU) \ 315 V(X64I16x8SubSatU) \ 316 V(X64I16x8MinU) \ 317 V(X64I16x8MaxU) \ 318 V(X64I16x8GtU) \ 319 V(X64I16x8GeU) \ 320 V(X64I16x8RoundingAverageU) \ 321 V(X64I16x8Abs) \ 322 V(X64I16x8BitMask) \ 323 V(X64I16x8ExtMulLowI8x16S) \ 324 V(X64I16x8ExtMulHighI8x16S) \ 325 V(X64I16x8ExtMulLowI8x16U) \ 326 V(X64I16x8ExtMulHighI8x16U) \ 327 V(X64I16x8ExtAddPairwiseI8x16S) \ 328 V(X64I16x8ExtAddPairwiseI8x16U) \ 329 V(X64I16x8Q15MulRSatS) \ 330 V(X64I8x16Splat) \ 331 V(X64I8x16ExtractLaneS) \ 332 V(X64I8x16SConvertI16x8) \ 333 V(X64I8x16Neg) \ 334 V(X64I8x16Shl) \ 335 V(X64I8x16ShrS) \ 336 V(X64I8x16Add) \ 337 V(X64I8x16AddSatS) \ 338 V(X64I8x16Sub) \ 339 V(X64I8x16SubSatS) \ 340 V(X64I8x16MinS) \ 341 V(X64I8x16MaxS) \ 342 V(X64I8x16Eq) \ 343 V(X64I8x16Ne) \ 344 V(X64I8x16GtS) \ 345 V(X64I8x16GeS) \ 346 V(X64I8x16UConvertI16x8) \ 347 V(X64I8x16AddSatU) \ 348 V(X64I8x16SubSatU) \ 349 V(X64I8x16ShrU) \ 350 V(X64I8x16MinU) \ 351 V(X64I8x16MaxU) \ 352 V(X64I8x16GtU) \ 353 V(X64I8x16GeU) \ 354 V(X64I8x16RoundingAverageU) \ 355 V(X64I8x16Abs) \ 356 V(X64I8x16BitMask) \ 357 V(X64S128Const) \ 358 V(X64S128Zero) \ 359 V(X64S128AllOnes) \ 360 V(X64S128Not) \ 361 V(X64S128And) \ 362 V(X64S128Or) \ 363 V(X64S128Xor) \ 364 V(X64S128Select) \ 365 V(X64S128AndNot) \ 366 V(X64I8x16Swizzle) \ 367 V(X64I8x16Shuffle) \ 368 V(X64I8x16Popcnt) \ 369 V(X64Shufps) \ 370 V(X64S32x4Rotate) \ 371 V(X64S32x4Swizzle) \ 372 V(X64S32x4Shuffle) \ 373 V(X64S16x8Blend) \ 374 V(X64S16x8HalfShuffle1) \ 375 V(X64S16x8HalfShuffle2) \ 376 V(X64S8x16Alignr) \ 377 V(X64S16x8Dup) \ 378 V(X64S8x16Dup) \ 379 V(X64S16x8UnzipHigh) \ 380 V(X64S16x8UnzipLow) \ 381 V(X64S8x16UnzipHigh) \ 382 V(X64S8x16UnzipLow) \ 383 V(X64S64x2UnpackHigh) \ 384 V(X64S32x4UnpackHigh) \ 385 V(X64S16x8UnpackHigh) \ 386 V(X64S8x16UnpackHigh) \ 387 V(X64S64x2UnpackLow) \ 388 V(X64S32x4UnpackLow) \ 389 V(X64S16x8UnpackLow) \ 390 V(X64S8x16UnpackLow) \ 391 V(X64S8x16TransposeLow) \ 392 V(X64S8x16TransposeHigh) \ 393 V(X64S8x8Reverse) \ 394 V(X64S8x4Reverse) \ 395 V(X64S8x2Reverse) \ 396 V(X64V128AnyTrue) \ 397 V(X64I64x2AllTrue) \ 398 V(X64I32x4AllTrue) \ 399 V(X64I16x8AllTrue) \ 400 V(X64I8x16AllTrue) \ 401 V(X64Pblendvb) \ 402 V(X64Word64AtomicAddUint64) \ 403 V(X64Word64AtomicSubUint64) \ 404 V(X64Word64AtomicAndUint64) \ 405 V(X64Word64AtomicOrUint64) \ 406 V(X64Word64AtomicXorUint64) \ 407 V(X64Word64AtomicStoreWord64) \ 408 V(X64Word64AtomicExchangeUint64) \ 409 V(X64Word64AtomicCompareExchangeUint64) 410 411 // Addressing modes represent the "shape" of inputs to an instruction. 412 // Many instructions support multiple addressing modes. Addressing modes 413 // are encoded into the InstructionCode of the instruction and tell the 414 // code generator after register allocation which assembler method to call. 415 // 416 // We use the following local notation for addressing modes: 417 // 418 // M = memory operand 419 // R = base register 420 // N = index register * N for N in {1, 2, 4, 8} 421 // I = immediate displacement (32-bit signed integer) 422 423 #define TARGET_ADDRESSING_MODE_LIST(V) \ 424 V(MR) /* [%r1 ] */ \ 425 V(MRI) /* [%r1 + K] */ \ 426 V(MR1) /* [%r1 + %r2*1 ] */ \ 427 V(MR2) /* [%r1 + %r2*2 ] */ \ 428 V(MR4) /* [%r1 + %r2*4 ] */ \ 429 V(MR8) /* [%r1 + %r2*8 ] */ \ 430 V(MR1I) /* [%r1 + %r2*1 + K] */ \ 431 V(MR2I) /* [%r1 + %r2*2 + K] */ \ 432 V(MR4I) /* [%r1 + %r2*4 + K] */ \ 433 V(MR8I) /* [%r1 + %r2*8 + K] */ \ 434 V(M1) /* [ %r2*1 ] */ \ 435 V(M2) /* [ %r2*2 ] */ \ 436 V(M4) /* [ %r2*4 ] */ \ 437 V(M8) /* [ %r2*8 ] */ \ 438 V(M1I) /* [ %r2*1 + K] */ \ 439 V(M2I) /* [ %r2*2 + K] */ \ 440 V(M4I) /* [ %r2*4 + K] */ \ 441 V(M8I) /* [ %r2*8 + K] */ \ 442 V(Root) /* [%root + K] */ 443 444 } // namespace compiler 445 } // namespace internal 446 } // namespace v8 447 448 #endif // V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_ 449