1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Cryptographic API. 4 * 5 * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using 6 * Supplemental SSE3 instructions. 7 * 8 * This file is based on sha1_generic.c 9 * 10 * Copyright (c) Alan Smithee. 11 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> 12 * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> 13 * Copyright (c) Mathias Krause <minipli@googlemail.com> 14 * Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com> 15 */ 16 17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19#include <crypto/internal/hash.h> 20#include <crypto/internal/simd.h> 21#include <linux/init.h> 22#include <linux/module.h> 23#include <linux/mm.h> 24#include <linux/types.h> 25#include <crypto/sha1.h> 26#include <crypto/sha1_base.h> 27#include <asm/cpu_device_id.h> 28#include <asm/simd.h> 29 30static const struct x86_cpu_id module_cpu_ids[] = { 31 X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL), 32 X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL), 33 X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL), 34 {} 35}; 36MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids); 37 38static int sha1_update(struct shash_desc *desc, const u8 *data, 39 unsigned int len, sha1_block_fn *sha1_xform) 40{ 41 struct sha1_state *sctx = shash_desc_ctx(desc); 42 43 if (!crypto_simd_usable() || 44 (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) 45 return crypto_sha1_update(desc, data, len); 46 47 /* 48 * Make sure struct sha1_state begins directly with the SHA1 49 * 160-bit internal state, as this is what the asm functions expect. 50 */ 51 BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0); 52 53 kernel_fpu_begin(); 54 sha1_base_do_update(desc, data, len, sha1_xform); 55 kernel_fpu_end(); 56 57 return 0; 58} 59 60static int sha1_finup(struct shash_desc *desc, const u8 *data, 61 unsigned int len, u8 *out, sha1_block_fn *sha1_xform) 62{ 63 if (!crypto_simd_usable()) 64 return crypto_sha1_finup(desc, data, len, out); 65 66 kernel_fpu_begin(); 67 if (len) 68 sha1_base_do_update(desc, data, len, sha1_xform); 69 sha1_base_do_finalize(desc, sha1_xform); 70 kernel_fpu_end(); 71 72 return sha1_base_finish(desc, out); 73} 74 75asmlinkage void sha1_transform_ssse3(struct sha1_state *state, 76 const u8 *data, int blocks); 77 78static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data, 79 unsigned int len) 80{ 81 return sha1_update(desc, data, len, sha1_transform_ssse3); 82} 83 84static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data, 85 unsigned int len, u8 *out) 86{ 87 return sha1_finup(desc, data, len, out, sha1_transform_ssse3); 88} 89 90/* Add padding and return the message digest. */ 91static int sha1_ssse3_final(struct shash_desc *desc, u8 *out) 92{ 93 return sha1_ssse3_finup(desc, NULL, 0, out); 94} 95 96static struct shash_alg sha1_ssse3_alg = { 97 .digestsize = SHA1_DIGEST_SIZE, 98 .init = sha1_base_init, 99 .update = sha1_ssse3_update, 100 .final = sha1_ssse3_final, 101 .finup = sha1_ssse3_finup, 102 .descsize = sizeof(struct sha1_state), 103 .base = { 104 .cra_name = "sha1", 105 .cra_driver_name = "sha1-ssse3", 106 .cra_priority = 150, 107 .cra_blocksize = SHA1_BLOCK_SIZE, 108 .cra_module = THIS_MODULE, 109 } 110}; 111 112static int register_sha1_ssse3(void) 113{ 114 if (boot_cpu_has(X86_FEATURE_SSSE3)) 115 return crypto_register_shash(&sha1_ssse3_alg); 116 return 0; 117} 118 119static void unregister_sha1_ssse3(void) 120{ 121 if (boot_cpu_has(X86_FEATURE_SSSE3)) 122 crypto_unregister_shash(&sha1_ssse3_alg); 123} 124 125asmlinkage void sha1_transform_avx(struct sha1_state *state, 126 const u8 *data, int blocks); 127 128static int sha1_avx_update(struct shash_desc *desc, const u8 *data, 129 unsigned int len) 130{ 131 return sha1_update(desc, data, len, sha1_transform_avx); 132} 133 134static int sha1_avx_finup(struct shash_desc *desc, const u8 *data, 135 unsigned int len, u8 *out) 136{ 137 return sha1_finup(desc, data, len, out, sha1_transform_avx); 138} 139 140static int sha1_avx_final(struct shash_desc *desc, u8 *out) 141{ 142 return sha1_avx_finup(desc, NULL, 0, out); 143} 144 145static struct shash_alg sha1_avx_alg = { 146 .digestsize = SHA1_DIGEST_SIZE, 147 .init = sha1_base_init, 148 .update = sha1_avx_update, 149 .final = sha1_avx_final, 150 .finup = sha1_avx_finup, 151 .descsize = sizeof(struct sha1_state), 152 .base = { 153 .cra_name = "sha1", 154 .cra_driver_name = "sha1-avx", 155 .cra_priority = 160, 156 .cra_blocksize = SHA1_BLOCK_SIZE, 157 .cra_module = THIS_MODULE, 158 } 159}; 160 161static bool avx_usable(void) 162{ 163 if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { 164 if (boot_cpu_has(X86_FEATURE_AVX)) 165 pr_info("AVX detected but unusable.\n"); 166 return false; 167 } 168 169 return true; 170} 171 172static int register_sha1_avx(void) 173{ 174 if (avx_usable()) 175 return crypto_register_shash(&sha1_avx_alg); 176 return 0; 177} 178 179static void unregister_sha1_avx(void) 180{ 181 if (avx_usable()) 182 crypto_unregister_shash(&sha1_avx_alg); 183} 184 185#define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */ 186 187asmlinkage void sha1_transform_avx2(struct sha1_state *state, 188 const u8 *data, int blocks); 189 190static bool avx2_usable(void) 191{ 192 if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) 193 && boot_cpu_has(X86_FEATURE_BMI1) 194 && boot_cpu_has(X86_FEATURE_BMI2)) 195 return true; 196 197 return false; 198} 199 200static void sha1_apply_transform_avx2(struct sha1_state *state, 201 const u8 *data, int blocks) 202{ 203 /* Select the optimal transform based on data block size */ 204 if (blocks >= SHA1_AVX2_BLOCK_OPTSIZE) 205 sha1_transform_avx2(state, data, blocks); 206 else 207 sha1_transform_avx(state, data, blocks); 208} 209 210static int sha1_avx2_update(struct shash_desc *desc, const u8 *data, 211 unsigned int len) 212{ 213 return sha1_update(desc, data, len, sha1_apply_transform_avx2); 214} 215 216static int sha1_avx2_finup(struct shash_desc *desc, const u8 *data, 217 unsigned int len, u8 *out) 218{ 219 return sha1_finup(desc, data, len, out, sha1_apply_transform_avx2); 220} 221 222static int sha1_avx2_final(struct shash_desc *desc, u8 *out) 223{ 224 return sha1_avx2_finup(desc, NULL, 0, out); 225} 226 227static struct shash_alg sha1_avx2_alg = { 228 .digestsize = SHA1_DIGEST_SIZE, 229 .init = sha1_base_init, 230 .update = sha1_avx2_update, 231 .final = sha1_avx2_final, 232 .finup = sha1_avx2_finup, 233 .descsize = sizeof(struct sha1_state), 234 .base = { 235 .cra_name = "sha1", 236 .cra_driver_name = "sha1-avx2", 237 .cra_priority = 170, 238 .cra_blocksize = SHA1_BLOCK_SIZE, 239 .cra_module = THIS_MODULE, 240 } 241}; 242 243static int register_sha1_avx2(void) 244{ 245 if (avx2_usable()) 246 return crypto_register_shash(&sha1_avx2_alg); 247 return 0; 248} 249 250static void unregister_sha1_avx2(void) 251{ 252 if (avx2_usable()) 253 crypto_unregister_shash(&sha1_avx2_alg); 254} 255 256#ifdef CONFIG_AS_SHA1_NI 257asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data, 258 int rounds); 259 260static int sha1_ni_update(struct shash_desc *desc, const u8 *data, 261 unsigned int len) 262{ 263 return sha1_update(desc, data, len, sha1_ni_transform); 264} 265 266static int sha1_ni_finup(struct shash_desc *desc, const u8 *data, 267 unsigned int len, u8 *out) 268{ 269 return sha1_finup(desc, data, len, out, sha1_ni_transform); 270} 271 272static int sha1_ni_final(struct shash_desc *desc, u8 *out) 273{ 274 return sha1_ni_finup(desc, NULL, 0, out); 275} 276 277static struct shash_alg sha1_ni_alg = { 278 .digestsize = SHA1_DIGEST_SIZE, 279 .init = sha1_base_init, 280 .update = sha1_ni_update, 281 .final = sha1_ni_final, 282 .finup = sha1_ni_finup, 283 .descsize = sizeof(struct sha1_state), 284 .base = { 285 .cra_name = "sha1", 286 .cra_driver_name = "sha1-ni", 287 .cra_priority = 250, 288 .cra_blocksize = SHA1_BLOCK_SIZE, 289 .cra_module = THIS_MODULE, 290 } 291}; 292 293static int register_sha1_ni(void) 294{ 295 if (boot_cpu_has(X86_FEATURE_SHA_NI)) 296 return crypto_register_shash(&sha1_ni_alg); 297 return 0; 298} 299 300static void unregister_sha1_ni(void) 301{ 302 if (boot_cpu_has(X86_FEATURE_SHA_NI)) 303 crypto_unregister_shash(&sha1_ni_alg); 304} 305 306#else 307static inline int register_sha1_ni(void) { return 0; } 308static inline void unregister_sha1_ni(void) { } 309#endif 310 311static int __init sha1_ssse3_mod_init(void) 312{ 313 if (!x86_match_cpu(module_cpu_ids)) 314 return -ENODEV; 315 316 if (register_sha1_ssse3()) 317 goto fail; 318 319 if (register_sha1_avx()) { 320 unregister_sha1_ssse3(); 321 goto fail; 322 } 323 324 if (register_sha1_avx2()) { 325 unregister_sha1_avx(); 326 unregister_sha1_ssse3(); 327 goto fail; 328 } 329 330 if (register_sha1_ni()) { 331 unregister_sha1_avx2(); 332 unregister_sha1_avx(); 333 unregister_sha1_ssse3(); 334 goto fail; 335 } 336 337 return 0; 338fail: 339 return -ENODEV; 340} 341 342static void __exit sha1_ssse3_mod_fini(void) 343{ 344 unregister_sha1_ni(); 345 unregister_sha1_avx2(); 346 unregister_sha1_avx(); 347 unregister_sha1_ssse3(); 348} 349 350module_init(sha1_ssse3_mod_init); 351module_exit(sha1_ssse3_mod_fini); 352 353MODULE_LICENSE("GPL"); 354MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated"); 355 356MODULE_ALIAS_CRYPTO("sha1"); 357MODULE_ALIAS_CRYPTO("sha1-ssse3"); 358MODULE_ALIAS_CRYPTO("sha1-avx"); 359MODULE_ALIAS_CRYPTO("sha1-avx2"); 360#ifdef CONFIG_AS_SHA1_NI 361MODULE_ALIAS_CRYPTO("sha1-ni"); 362#endif 363