Lines Matching refs:Xi
104 static void gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256])
107 const u8 *xi = (const u8 *)Xi + 15;
181 if ((u8 *)Xi == xi)
197 Xi[0] = BSWAP8(Z.hi);
198 Xi[1] = BSWAP8(Z.lo);
200 u8 *p = (u8 *)Xi;
212 Xi[0] = Z.hi;
213 Xi[1] = Z.lo;
217 # define GCM_MUL(ctx) gcm_gmult_8bit(ctx->Xi.u,ctx->Htable)
300 static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16])
307 nlo = ((const u8 *)Xi)[15];
329 nlo = ((const u8 *)Xi)[cnt];
347 Xi[0] = BSWAP8(Z.hi);
348 Xi[1] = BSWAP8(Z.lo);
350 u8 *p = (u8 *)Xi;
362 Xi[0] = Z.hi;
363 Xi[1] = Z.lo;
375 static void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16],
386 nlo = ((const u8 *)Xi)[15];
409 nlo = ((const u8 *)Xi)[cnt];
484 nlo = ((const u8 *)Xi)[cnt];
502 nlo = ((const u8 *)Xi)[0];
522 Xi[0] = BSWAP8(Z.hi);
523 Xi[1] = BSWAP8(Z.lo);
525 u8 *p = (u8 *)Xi;
537 Xi[0] = Z.hi;
538 Xi[1] = Z.lo;
544 void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]);
545 void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], const u8 *inp,
549 # define GCM_MUL(ctx) gcm_gmult_4bit(ctx->Xi.u,ctx->Htable)
551 # define GHASH(ctx,in,len) gcm_ghash_4bit((ctx)->Xi.u,(ctx)->Htable,in,len)
562 static void gcm_gmult_1bit(u64 Xi[2], const u64 H[2])
567 const long *xi = (const long *)Xi;
600 Xi[0] = BSWAP8(Z.hi);
601 Xi[1] = BSWAP8(Z.lo);
603 u8 *p = (u8 *)Xi;
615 Xi[0] = Z.hi;
616 Xi[1] = Z.lo;
620 # define GCM_MUL(ctx) gcm_gmult_1bit(ctx->Xi.u,ctx->H.u)
632 void gcm_init_clmul(u128 Htable[16], const u64 Xi[2]);
633 void gcm_gmult_clmul(u64 Xi[2], const u128 Htable[16]);
634 void gcm_ghash_clmul(u64 Xi[2], const u128 Htable[16], const u8 *inp,
642 void gcm_init_avx(u128 Htable[16], const u64 Xi[2]);
643 void gcm_gmult_avx(u64 Xi[2], const u128 Htable[16]);
644 void gcm_ghash_avx(u64 Xi[2], const u128 Htable[16], const u8 *inp,
650 void gcm_gmult_4bit_mmx(u64 Xi[2], const u128 Htable[16]);
651 void gcm_ghash_4bit_mmx(u64 Xi[2], const u128 Htable[16], const u8 *inp,
654 void gcm_gmult_4bit_x86(u64 Xi[2], const u128 Htable[16]);
655 void gcm_ghash_4bit_x86(u64 Xi[2], const u128 Htable[16], const u8 *inp,
667 void gcm_init_neon(u128 Htable[16], const u64 Xi[2]);
668 void gcm_gmult_neon(u64 Xi[2], const u128 Htable[16]);
669 void gcm_ghash_neon(u64 Xi[2], const u128 Htable[16], const u8 *inp,
671 void gcm_init_v8(u128 Htable[16], const u64 Xi[2]);
672 void gcm_gmult_v8(u64 Xi[2], const u128 Htable[16]);
673 void gcm_ghash_v8(u64 Xi[2], const u128 Htable[16], const u8 *inp,
680 void gcm_init_vis3(u128 Htable[16], const u64 Xi[2]);
681 void gcm_gmult_vis3(u64 Xi[2], const u128 Htable[16]);
682 void gcm_ghash_vis3(u64 Xi[2], const u128 Htable[16], const u8 *inp,
688 void gcm_init_p8(u128 Htable[16], const u64 Xi[2]);
689 void gcm_gmult_p8(u64 Xi[2], const u128 Htable[16]);
690 void gcm_ghash_p8(u64 Xi[2], const u128 Htable[16], const u8 *inp,
697 # define GCM_MUL(ctx) (*gcm_gmult_p)(ctx->Xi.u,ctx->Htable)
700 # define GHASH(ctx,in,len) (*gcm_ghash_p)(ctx->Xi.u,ctx->Htable,in,len)
821 void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
840 /* Borrow ctx->Xi to calculate initial Yi */
841 ctx->Xi.u[0] = 0;
842 ctx->Xi.u[1] = 0;
846 ctx->Xi.c[i] ^= iv[i];
853 ctx->Xi.c[i] ^= iv[i];
859 ctx->Xi.u[1] ^= BSWAP8(len0);
861 ctx->Xi.c[8] ^= (u8)(len0 >> 56);
862 ctx->Xi.c[9] ^= (u8)(len0 >> 48);
863 ctx->Xi.c[10] ^= (u8)(len0 >> 40);
864 ctx->Xi.c[11] ^= (u8)(len0 >> 32);
865 ctx->Xi.c[12] ^= (u8)(len0 >> 24);
866 ctx->Xi.c[13] ^= (u8)(len0 >> 16);
867 ctx->Xi.c[14] ^= (u8)(len0 >> 8);
868 ctx->Xi.c[15] ^= (u8)(len0);
871 ctx->Xi.u[1] ^= len0;
878 ctr = BSWAP4(ctx->Xi.d[3]);
880 ctr = GETU32(ctx->Xi.c + 12);
883 ctr = ctx->Xi.d[3];
885 /* Copy borrowed Xi to Yi */
886 ctx->Yi.u[0] = ctx->Xi.u[0];
887 ctx->Yi.u[1] = ctx->Xi.u[1];
890 ctx->Xi.u[0] = 0;
891 ctx->Xi.u[1] = 0;
912 void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
914 void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
930 ctx->Xi.c[n] ^= *(aad++);
950 ctx->Xi.c[i] ^= aad[i];
959 ctx->Xi.c[i] ^= aad[i];
977 void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
979 void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
999 memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
1000 ctx->Xi.u[0] = 0;
1001 ctx->Xi.u[1] = 0;
1002 mres = sizeof(ctx->Xi);
1038 ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
1129 ctx->Xi.t[i] ^= out_t[i] = in_t[i] ^ ctx->EKi.t[i];
1154 ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n];
1187 ctx->Xi.c[n] ^= out[i] = in[i] ^ ctx->EKi.c[n];
1209 void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
1211 void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
1231 memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
1232 ctx->Xi.u[0] = 0;
1233 ctx->Xi.u[1] = 0;
1234 mres = sizeof(ctx->Xi);
1272 ctx->Xi.c[n] ^= c;
1363 ctx->Xi.t[i] ^= c;
1390 ctx->Xi.c[n] ^= c;
1427 ctx->Xi.c[n] ^= c;
1451 void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
1453 void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
1473 memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
1474 ctx->Xi.u[0] = 0;
1475 ctx->Xi.u[1] = 0;
1476 mres = sizeof(ctx->Xi);
1509 ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
1567 ctx->Xi.c[i] ^= out[i];
1588 ctx->Xi.c[mres++] ^= out[n] = in[n] ^ ctx->EKi.c[n];
1612 void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
1614 void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
1634 memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
1635 ctx->Xi.u[0] = 0;
1636 ctx->Xi.u[1] = 0;
1637 mres = sizeof(ctx->Xi);
1672 ctx->Xi.c[n] ^= c;
1718 ctx->Xi.c[k] ^= in[k];
1755 ctx->Xi.c[mres++] ^= c;
1774 void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
1776 void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
1824 ctx->Xi.u[0] ^= alen;
1825 ctx->Xi.u[1] ^= clen;
1829 ctx->Xi.u[0] ^= ctx->EK0.u[0];
1830 ctx->Xi.u[1] ^= ctx->EK0.u[1];
1832 if (tag && len <= sizeof(ctx->Xi))
1833 return CRYPTO_memcmp(ctx->Xi.c, tag, len);
1841 memcpy(tag, ctx->Xi.c,
1842 len <= sizeof(ctx->Xi.c) ? len : sizeof(ctx->Xi.c));