Home
last modified time | relevance | path

Searched refs:_XFER (Results 1 - 6 of 6) sorted by relevance

/kernel/linux/linux-5.10/arch/x86/crypto/
H A Dsha256-avx-asm.S125 _XFER = _INP + _INP_SIZE define
126 _XMM_SAVE = _XFER + _XFER_SIZE
178 add _XFER(%rsp), y2 # y2 = k + w + S1 + CH
213 add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
252 add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
290 add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
308 ## input is [rsp + _XFER + %1 * 4]
327 offset = \round * 4 + _XFER #
396 vmovdqa XFER, _XFER(%rsp)
400 vmovdqa XFER, _XFER(
[all...]
H A Dsha256-ssse3-asm.S119 _XFER = _INP + _INP_SIZE define
120 _XMM_SAVE = _XFER + _XFER_SIZE
174 add _XFER(%rsp) , y2 # y2 = k + w + S1 + CH
213 add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
255 add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
296 add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
315 ## input is [rsp + _XFER + %1 * 4]
334 offset = \round * 4 + _XFER
407 movdqa XFER, _XFER(%rsp)
412 movdqa XFER, _XFER(
[all...]
H A Dsha256-avx2-asm.S122 _XFER = 0 define
123 _XMM_SAVE = _XFER + _XFER_SIZE
595 vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
596 FOUR_ROUNDS_AND_SCHED _XFER + 0*32
599 vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
600 FOUR_ROUNDS_AND_SCHED _XFER + 1*32
603 vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
604 FOUR_ROUNDS_AND_SCHED _XFER + 2*32
607 vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
608 FOUR_ROUNDS_AND_SCHED _XFER
[all...]
/kernel/linux/linux-6.6/arch/x86/crypto/
H A Dsha256-avx-asm.S126 _XFER = _INP + _INP_SIZE define
127 _XMM_SAVE = _XFER + _XFER_SIZE
179 add _XFER(%rsp), y2 # y2 = k + w + S1 + CH
214 add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
253 add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
291 add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
309 ## input is [rsp + _XFER + %1 * 4]
328 offset = \round * 4 + _XFER #
396 vmovdqa XFER, _XFER(%rsp)
400 vmovdqa XFER, _XFER(
[all...]
H A Dsha256-ssse3-asm.S120 _XFER = _INP + _INP_SIZE define
121 _XMM_SAVE = _XFER + _XFER_SIZE
175 add _XFER(%rsp) , y2 # y2 = k + w + S1 + CH
214 add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
256 add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
297 add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
316 ## input is [rsp + _XFER + %1 * 4]
335 offset = \round * 4 + _XFER
407 movdqa XFER, _XFER(%rsp)
412 movdqa XFER, _XFER(
[all...]
H A Dsha256-avx2-asm.S122 _XFER = 0 define
123 _XMM_SAVE = _XFER + _XFER_SIZE
594 vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
595 FOUR_ROUNDS_AND_SCHED _XFER + 0*32
599 vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
600 FOUR_ROUNDS_AND_SCHED _XFER + 1*32
604 vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
605 FOUR_ROUNDS_AND_SCHED _XFER + 2*32
609 vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
610 FOUR_ROUNDS_AND_SCHED _XFER
[all...]

Completed in 5 milliseconds