Lines Matching refs:rh
103 #define ADD128(rh, rl, ih, il) \
108 (rh)++; \
109 (rh) += (ih); \
114 #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
118 rh = MUL32(_i1>>32, _i2>>32); \
120 ADD128(rh, rl, (m >> 32), (m << 32)); \
123 #define MUL64(rh, rl, i1, i2) \
128 rh = MUL32(_i1>>32, _i2>>32); \
130 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
131 ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
148 #define nh_16(mp, kp, nw, rh, rl) \
151 rh = rl = 0; \
155 ADD128(rh, rl, th, tl); \
159 #define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
162 rh1 = rl1 = rh = rl = 0; \
166 ADD128(rh, rl, th, tl); \
174 #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
177 rh = rl = 0; \
181 ADD128(rh, rl, th, tl); \
184 ADD128(rh, rl, th, tl); \
187 ADD128(rh, rl, th, tl); \
190 ADD128(rh, rl, th, tl); \
194 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
197 rh1 = rl1 = rh = rl = 0; \
201 ADD128(rh, rl, th, tl); \
207 ADD128(rh, rl, th, tl); \
213 ADD128(rh, rl, th, tl); \
219 ADD128(rh, rl, th, tl); \
253 #define nh_16(mp, kp, nw, rh, rl) \
257 rh = rl = t = 0; \
263 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
265 rh += (u64)(u32)(m1 >> 32) \
269 ADD128(rh, rl, (t >> 32), (t << 32)); \
340 #define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
342 nh_16(mp, kp, nw, rh, rl); \
347 #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
348 nh_16(mp, kp, nw, rh, rl)
351 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
353 nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
360 u64 rh, rl, t, z = 0;
385 MUL64(rh, rl, p1, p2);
386 t = rh >> 56;
387 ADD128(t, rl, z, rh);
388 rh <<= 8;
389 ADD128(t, rl, z, rh);
407 u64 rh, rl;
411 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
412 rh &= m62;
413 ADD128(ch, cl, rh, rl);
419 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
420 rh &= m62;
421 poly_step(ch, cl, pkh, pkl, rh, rl);
545 u64 rh, rl;
548 nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
549 rh &= m62;
552 rh, rl);
554 ADD128(ch, cl, rh, rl);