Lines Matching refs:product
399 uint128_t product;
401 product = mul_64_64(left[i], right[k - i]);
403 r01 = add_128_128(r01, product);
404 r2 += (r01.m_high < product.m_high);
416 /* Compute product = left * right, for a small right value. */
424 uint128_t product;
426 product = mul_64_64(left[k], right);
427 r01 = add_128_128(r01, product);
453 uint128_t product;
455 product = mul_64_64(left[i], left[k - i]);
458 r2 += product.m_high >> 63;
459 product.m_high = (product.m_high << 1) |
460 (product.m_low >> 63);
461 product.m_low <<= 1;
464 r01 = add_128_128(r01, product);
465 r2 += (r01.m_high < product.m_high);
511 * Computes result = product % mod
519 static void vli_mmod_special(u64 *result, const u64 *product,
526 vli_set(r, product, ndigits * 2);
540 * Computes result = product % mod
553 static void vli_mmod_special2(u64 *result, const u64 *product,
566 vli_set(r, product, ndigits);
568 vli_set(q, product + ndigits, ndigits);
598 * Computes result = product % mod, where product is 2N words long.
602 static void vli_mmod_slow(u64 *result, u64 *product, const u64 *mod,
607 u64 *v[2] = { tmp, product };
643 /* Computes result = product % mod using Barrett's reduction with precomputed
652 static void vli_mmod_barrett(u64 *result, u64 *product, const u64 *mod,
659 vli_mult(q, product + ndigits, mu, ndigits);
661 vli_add(q + ndigits, q + ndigits, product + ndigits, ndigits);
663 vli_sub(r, product, r, ndigits * 2);
678 static void vli_mmod_fast_192(u64 *result, const u64 *product,
684 vli_set(result, product, ndigits);
686 vli_set(tmp, &product[3], ndigits);
690 tmp[1] = product[3];
691 tmp[2] = product[4];
694 tmp[0] = tmp[1] = product[5];
702 /* Computes result = product % curve_prime
705 static void vli_mmod_fast_256(u64 *result, const u64 *product,
712 vli_set(result, product, ndigits);
716 tmp[1] = product[5] & 0xffffffff00000000ull;
717 tmp[2] = product[6];
718 tmp[3] = product[7];
723 tmp[1] = product[6] << 32;
724 tmp[2] = (product[6] >> 32) | (product[7] << 32);
725 tmp[3] = product[7] >> 32;
730 tmp[0] = product[4];
731 tmp[1] = product[5] & 0xffffffff;
733 tmp[3] = product[7];
737 tmp[0] = (product[4] >> 32) | (product[5] << 32);
738 tmp[1] = (product[5] >> 32) | (product[6] & 0xffffffff00000000ull);
739 tmp[2] = product[7];
740 tmp[3] = (product[6] >> 32) | (product[4] << 32);
744 tmp[0] = (product[5] >> 32) | (product[6] << 32);
745 tmp[1] = (product[6] >> 32);
747 tmp[3] = (product[4] & 0xffffffff) | (product[5] << 32);
751 tmp[0] = product[6];
752 tmp[1] = product[7];
754 tmp[3] = (product[4] >> 32) | (product[5] & 0xffffffff00000000ull);
758 tmp[0] = (product[6] >> 32) | (product[7] << 32);
759 tmp[1] = (product[7] >> 32) | (product[4] << 32);
760 tmp[2] = (product[4] >> 32) | (product[5] << 32);
761 tmp[3] = (product[6] << 32);
765 tmp[0] = product[7];
766 tmp[1] = product[4] & 0xffffffff00000000ull;
767 tmp[2] = product[5];
768 tmp[3] = product[6] & 0xffffffff00000000ull;
785 /* Computes result = product % curve_prime
788 static void vli_mmod_fast_384(u64 *result, const u64 *product,
795 vli_set(result, product, ndigits);
800 tmp[2] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
801 tmp[3] = product[11]>>32; // 0 ||a23
808 tmp[0] = product[6]; //a13||a12
809 tmp[1] = product[7]; //a15||a14
810 tmp[2] = product[8]; //a17||a16
811 tmp[3] = product[9]; //a19||a18
812 tmp[4] = product[10]; //a21||a20
813 tmp[5] = product[11]; //a23||a22
817 tmp[0] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
818 tmp[1] = SL32OR32(product[6], (product[11]>>32)); //a12||a23
819 tmp[2] = SL32OR32(product[7], (product[6])>>32); //a14||a13
820 tmp[3] = SL32OR32(product[8], (product[7]>>32)); //a16||a15
821 tmp[4] = SL32OR32(product[9], (product[8]>>32)); //a18||a17
822 tmp[5] = SL32OR32(product[10], (product[9]>>32)); //a20||a19
826 tmp[0] = AND64H(product[11]); //a23|| 0
827 tmp[1] = (product[10]<<32); //a20|| 0
828 tmp[2] = product[6]; //a13||a12
829 tmp[3] = product[7]; //a15||a14
830 tmp[4] = product[8]; //a17||a16
831 tmp[5] = product[9]; //a19||a18
837 tmp[2] = product[10]; //a21||a20
838 tmp[3] = product[11]; //a23||a22
844 tmp[0] = AND64L(product[10]); // 0 ||a20
845 tmp[1] = AND64H(product[10]); //a21|| 0
846 tmp[2] = product[11]; //a23||a22
853 tmp[0] = SL32OR32(product[6], (product[11]>>32)); //a12||a23
854 tmp[1] = SL32OR32(product[7], (product[6]>>32)); //a14||a13
855 tmp[2] = SL32OR32(product[8], (product[7]>>32)); //a16||a15
856 tmp[3] = SL32OR32(product[9], (product[8]>>32)); //a18||a17
857 tmp[4] = SL32OR32(product[10], (product[9]>>32)); //a20||a19
858 tmp[5] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
862 tmp[0] = (product[10]<<32); //a20|| 0
863 tmp[1] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
864 tmp[2] = (product[11]>>32); // 0 ||a23
872 tmp[1] = AND64H(product[11]); //a23|| 0
873 tmp[2] = product[11]>>32; // 0 ||a23
894 /* Computes result = product % curve_prime for different curve_primes.
899 static bool vli_mmod_fast(u64 *result, u64 *product,
910 vli_mmod_special(result, product, curve_prime,
915 vli_mmod_special2(result, product, curve_prime,
919 vli_mmod_barrett(result, product, curve_prime, ndigits);
925 vli_mmod_fast_192(result, product, curve_prime, tmp);
928 vli_mmod_fast_256(result, product, curve_prime, tmp);
931 vli_mmod_fast_384(result, product, curve_prime, tmp);
947 u64 product[ECC_MAX_DIGITS * 2];
949 vli_mult(product, left, right, ndigits);
950 vli_mmod_slow(result, product, mod, ndigits);
958 u64 product[2 * ECC_MAX_DIGITS];
960 vli_mult(product, left, right, curve->g.ndigits);
961 vli_mmod_fast(result, product, curve);
968 u64 product[2 * ECC_MAX_DIGITS];
970 vli_square(product, left, curve->g.ndigits);
971 vli_mmod_fast(result, product, curve);
1600 struct ecc_point *product, *pk;
1630 product = ecc_alloc_point(ndigits);
1631 if (!product) {
1636 ecc_point_mult(product, pk, priv, rand_z, curve, ndigits);
1638 if (ecc_point_is_zero(product)) {
1643 ecc_swap_digits(product->x, secret, ndigits);
1648 ecc_free_point(product);