/kernel/linux/linux-5.10/crypto/ |
H A D | aegis128-core.c | 32 union aegis_block blocks[AEGIS128_STATE_BLOCKS]; member 79 tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1]; in crypto_aegis128_update() 81 crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], in crypto_aegis128_update() 82 &state->blocks[i]); in crypto_aegis128_update() 83 crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); in crypto_aegis128_update() 95 crypto_aegis_block_xor(&state->blocks[0], msg); in crypto_aegis128_update_a() 106 crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); in crypto_aegis128_update_u() 119 state->blocks[ in crypto_aegis128_init() [all...] |
/kernel/linux/linux-5.10/crypto/async_tx/ |
H A D | async_raid6_recov.c | 154 struct page **blocks, unsigned int *offs, in __2data_recov_4() 168 p = blocks[disks-2]; in __2data_recov_4() 170 q = blocks[disks-1]; in __2data_recov_4() 173 a = blocks[faila]; in __2data_recov_4() 175 b = blocks[failb]; in __2data_recov_4() 204 struct page **blocks, unsigned int *offs, in __2data_recov_5() 222 if (blocks[i] == NULL) in __2data_recov_5() 231 p = blocks[disks-2]; in __2data_recov_5() 233 q = blocks[disks-1]; in __2data_recov_5() 235 g = blocks[goo in __2data_recov_5() 153 __2data_recov_4(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) __2data_recov_4() argument 203 __2data_recov_5(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) __2data_recov_5() argument 294 __2data_recov_n(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) __2data_recov_n() argument 393 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) async_raid6_2data_recov() argument 471 async_raid6_datap_recov(int disks, size_t bytes, int faila, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) async_raid6_datap_recov() argument [all...] |
H A D | async_pq.c | 20 /* the struct page *blocks[] parameter passed to async_gen_syndrome() 22 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] 107 do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, in do_sync_gen_syndrome() argument 117 srcs = (void **) blocks; in do_sync_gen_syndrome() 120 if (blocks[i] == NULL) { in do_sync_gen_syndrome() 124 srcs[i] = page_address(blocks[i]) + offsets[i]; in do_sync_gen_syndrome() 157 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 159 * @disks: number of blocks (includin 177 async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, size_t len, struct async_submit_ctl *submit) async_gen_syndrome() argument 272 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) pq_val_chan() argument 298 async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks, size_t len, enum sum_check_flags *pqres, struct page *spare, unsigned int s_off, struct async_submit_ctl *submit) async_syndrome_val() argument [all...] |
/kernel/linux/linux-6.6/crypto/async_tx/ |
H A D | async_raid6_recov.c | 154 struct page **blocks, unsigned int *offs, in __2data_recov_4() 168 p = blocks[disks-2]; in __2data_recov_4() 170 q = blocks[disks-1]; in __2data_recov_4() 173 a = blocks[faila]; in __2data_recov_4() 175 b = blocks[failb]; in __2data_recov_4() 204 struct page **blocks, unsigned int *offs, in __2data_recov_5() 222 if (blocks[i] == NULL) in __2data_recov_5() 231 p = blocks[disks-2]; in __2data_recov_5() 233 q = blocks[disks-1]; in __2data_recov_5() 235 g = blocks[goo in __2data_recov_5() 153 __2data_recov_4(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) __2data_recov_4() argument 203 __2data_recov_5(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) __2data_recov_5() argument 294 __2data_recov_n(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) __2data_recov_n() argument 393 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) async_raid6_2data_recov() argument 471 async_raid6_datap_recov(int disks, size_t bytes, int faila, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) async_raid6_datap_recov() argument [all...] |
H A D | async_pq.c | 20 /* the struct page *blocks[] parameter passed to async_gen_syndrome() 22 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] 107 do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, in do_sync_gen_syndrome() argument 117 srcs = (void **) blocks; in do_sync_gen_syndrome() 120 if (blocks[i] == NULL) { in do_sync_gen_syndrome() 124 srcs[i] = page_address(blocks[i]) + offsets[i]; in do_sync_gen_syndrome() 157 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 159 * @disks: number of blocks (includin 177 async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, size_t len, struct async_submit_ctl *submit) async_gen_syndrome() argument 272 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) pq_val_chan() argument 298 async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks, size_t len, enum sum_check_flags *pqres, struct page *spare, unsigned int s_off, struct async_submit_ctl *submit) async_syndrome_val() argument [all...] |
/kernel/linux/linux-6.6/crypto/ |
H A D | aegis128-core.c | 32 union aegis_block blocks[AEGIS128_STATE_BLOCKS]; member 66 tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1]; in crypto_aegis128_update() 68 crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], in crypto_aegis128_update() 69 &state->blocks[i]); in crypto_aegis128_update() 70 crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); in crypto_aegis128_update() 83 crypto_aegis_block_xor(&state->blocks[0], msg); in crypto_aegis128_update_a() 95 crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); in crypto_aegis128_update_u() 108 state->blocks[ in crypto_aegis128_init() [all...] |
/kernel/linux/linux-5.10/arch/arm64/crypto/ |
H A D | aes-neonbs-glue.c | 29 int rounds, int blocks); 31 int rounds, int blocks); 34 int rounds, int blocks, u8 iv[]); 37 int rounds, int blocks, u8 iv[], u8 final[]); 40 int rounds, int blocks, u8 iv[]); 42 int rounds, int blocks, u8 iv[]); 46 int rounds, int blocks); 48 int rounds, int blocks, u8 iv[]); 99 int rounds, int blocks)) in __ecb_crypt() 109 unsigned int blocks in __ecb_crypt() local 97 __ecb_crypt(struct skcipher_request *req, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks)) __ecb_crypt() argument 169 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; cbc_encrypt() local 192 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; cbc_decrypt() local 240 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; ctr_encrypt() local 318 __xts_crypt(struct skcipher_request *req, bool encrypt, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 iv[])) __xts_crypt() argument 358 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; __xts_crypt() local [all...] |
H A D | sha512-ce-glue.c | 30 int blocks); 32 asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks); 35 int blocks) in __sha512_ce_transform() 37 while (blocks) { in __sha512_ce_transform() 41 rem = sha512_ce_transform(sst, src, blocks); in __sha512_ce_transform() 43 src += (blocks - rem) * SHA512_BLOCK_SIZE; in __sha512_ce_transform() 44 blocks = rem; in __sha512_ce_transform() 49 int blocks) in __sha512_block_data_order() 51 sha512_block_data_order(sst->state, src, blocks); in __sha512_block_data_order() 34 __sha512_ce_transform(struct sha512_state *sst, u8 const *src, int blocks) __sha512_ce_transform() argument 48 __sha512_block_data_order(struct sha512_state *sst, u8 const *src, int blocks) __sha512_block_data_order() argument
|
H A D | sha2-ce-glue.c | 34 int blocks); 37 int blocks) in __sha2_ce_transform() 39 while (blocks) { in __sha2_ce_transform() 44 sst), src, blocks); in __sha2_ce_transform() 46 src += (blocks - rem) * SHA256_BLOCK_SIZE; in __sha2_ce_transform() 47 blocks = rem; in __sha2_ce_transform() 56 asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks); 59 int blocks) in __sha256_block_data_order() 61 sha256_block_data_order(sst->state, src, blocks); in __sha256_block_data_order() 36 __sha2_ce_transform(struct sha256_state *sst, u8 const *src, int blocks) __sha2_ce_transform() argument 58 __sha256_block_data_order(struct sha256_state *sst, u8 const *src, int blocks) __sha256_block_data_order() argument
|
H A D | aes-glue.c | 75 int rounds, int blocks); 77 int rounds, int blocks); 80 int rounds, int blocks, u8 iv[]); 82 int rounds, int blocks, u8 iv[]); 90 int rounds, int blocks, u8 ctr[]); 100 int rounds, int blocks, u8 iv[], 103 int rounds, int blocks, u8 iv[], 107 int blocks, u8 dg[], int enc_before, 179 unsigned int blocks; in ecb_encrypt() local 183 while ((blocks in ecb_encrypt() 199 unsigned int blocks; ecb_decrypt() local 219 unsigned int blocks; cbc_encrypt_walk() local 248 unsigned int blocks; cbc_decrypt_walk() local 407 unsigned int blocks; essiv_cbc_encrypt() local 429 unsigned int blocks; essiv_cbc_decrypt() local 451 int blocks; ctr_encrypt() local 849 mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks, u8 dg[], int enc_before, int enc_after) mac_do_update() argument 891 int blocks = len / AES_BLOCK_SIZE; mac_update() local [all...] |
H A D | ghash-ce-glue.c | 49 asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src, 52 asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src, 71 static void ghash_do_update(int blocks, u64 dg[], const char *src, in ghash_do_update() argument 81 blocks++; in ghash_do_update() 89 } while (--blocks); in ghash_do_update() 96 void ghash_do_simd_update(int blocks, u64 dg[], const char *src, in ghash_do_simd_update() argument 98 void (*simd_update)(int blocks, u64 dg[], in ghash_do_simd_update() 105 simd_update(blocks, dg, src, key->h, head); in ghash_do_simd_update() 108 ghash_do_update(blocks, dg, src, key, head); in ghash_do_simd_update() 125 int blocks; in ghash_update() local 288 int blocks = count / GHASH_BLOCK_SIZE; gcm_update_mac() local 398 int blocks = walk.nbytes / AES_BLOCK_SIZE; gcm_encrypt() local 512 int blocks = walk.nbytes / AES_BLOCK_SIZE; gcm_decrypt() local [all...] |
/kernel/linux/linux-6.6/arch/x86/crypto/ |
H A D | ecb_cbc_helpers.h | 32 #define ECB_WALK_ADVANCE(blocks) do { \ 33 dst += (blocks) * __bsize; \ 34 src += (blocks) * __bsize; \ 35 nbytes -= (blocks) * __bsize; \ 38 #define ECB_BLOCK(blocks, func) do { \ 39 const int __blocks = (blocks); \ 46 ECB_WALK_ADVANCE(blocks); \ 61 #define CBC_DEC_BLOCK(blocks, func) do { \ 62 const int __blocks = (blocks); \ 68 const u8 *__iv = src + ((blocks) [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/ |
H A D | i915_buddy.c | 112 struct list_head *blocks, in igt_check_blocks() 125 list_for_each_entry(block, blocks, link) { in igt_check_blocks() 328 LIST_HEAD(blocks); in igt_buddy_alloc_smoke() 364 list_add_tail(&block->link, &blocks); in igt_buddy_alloc_smoke() 381 err = igt_check_blocks(&mm, &blocks, total, false); in igt_buddy_alloc_smoke() 383 i915_buddy_free_list(&mm, &blocks); in igt_buddy_alloc_smoke() 413 LIST_HEAD(blocks); in igt_buddy_alloc_pessimistic() 438 list_add_tail(&block->link, &blocks); in igt_buddy_alloc_pessimistic() 448 list_add_tail(&block->link, &blocks); in igt_buddy_alloc_pessimistic() 456 list_add_tail(&block->link, &blocks); in igt_buddy_alloc_pessimistic() 111 igt_check_blocks(struct i915_buddy_mm *mm, struct list_head *blocks, u64 expected_size, bool is_contiguous) igt_check_blocks() argument [all...] |
/kernel/linux/linux-6.6/arch/arm64/crypto/ |
H A D | aes-neonbs-glue.c | 29 int rounds, int blocks); 31 int rounds, int blocks); 34 int rounds, int blocks, u8 iv[]); 37 int rounds, int blocks, u8 iv[]); 40 int rounds, int blocks, u8 iv[]); 42 int rounds, int blocks, u8 iv[]); 46 int rounds, int blocks); 48 int rounds, int blocks, u8 iv[]); 96 int rounds, int blocks)) in __ecb_crypt() 106 unsigned int blocks in __ecb_crypt() local 94 __ecb_crypt(struct skcipher_request *req, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks)) __ecb_crypt() argument 166 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; cbc_encrypt() local 189 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; cbc_decrypt() local 217 int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7; ctr_encrypt() local 276 __xts_crypt(struct skcipher_request *req, bool encrypt, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 iv[])) __xts_crypt() argument 316 int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7; __xts_crypt() local [all...] |
H A D | sha512-ce-glue.c | 30 int blocks); 32 asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks); 35 int blocks) in __sha512_ce_transform() 37 while (blocks) { in __sha512_ce_transform() 41 rem = sha512_ce_transform(sst, src, blocks); in __sha512_ce_transform() 43 src += (blocks - rem) * SHA512_BLOCK_SIZE; in __sha512_ce_transform() 44 blocks = rem; in __sha512_ce_transform() 49 int blocks) in __sha512_block_data_order() 51 sha512_block_data_order(sst->state, src, blocks); in __sha512_block_data_order() 34 __sha512_ce_transform(struct sha512_state *sst, u8 const *src, int blocks) __sha512_ce_transform() argument 48 __sha512_block_data_order(struct sha512_state *sst, u8 const *src, int blocks) __sha512_block_data_order() argument
|
H A D | sha2-ce-glue.c | 34 int blocks); 37 int blocks) in __sha2_ce_transform() 39 while (blocks) { in __sha2_ce_transform() 44 sst), src, blocks); in __sha2_ce_transform() 46 src += (blocks - rem) * SHA256_BLOCK_SIZE; in __sha2_ce_transform() 47 blocks = rem; in __sha2_ce_transform() 56 asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks); 59 int blocks) in __sha256_block_data_order() 61 sha256_block_data_order(sst->state, src, blocks); in __sha256_block_data_order() 36 __sha2_ce_transform(struct sha256_state *sst, u8 const *src, int blocks) __sha2_ce_transform() argument 58 __sha256_block_data_order(struct sha256_state *sst, u8 const *src, int blocks) __sha256_block_data_order() argument
|
/kernel/linux/linux-6.6/arch/m68k/emu/ |
H A D | nfblock.c | 40 static inline s32 nfhd_get_capacity(u32 major, u32 minor, u32 *blocks, in nfhd_get_capacity() argument 44 virt_to_phys(blocks), virt_to_phys(blocksize)); in nfhd_get_capacity() 55 u32 blocks, bsize; member 84 geo->cylinders = dev->blocks >> (6 - dev->bshift); in nfhd_getgeo() 97 static int __init nfhd_init_one(int id, u32 blocks, u32 bsize) in nfhd_init_one() argument 103 pr_info("nfhd%u: found device with %u blocks (%u bytes)\n", dev_id, in nfhd_init_one() 104 blocks, bsize); in nfhd_init_one() 116 dev->blocks = blocks; in nfhd_init_one() 130 set_capacity(dev->disk, (sector_t)blocks * (bsiz in nfhd_init_one() 150 u32 blocks, bsize; nfhd_init() local [all...] |
/kernel/linux/linux-5.10/drivers/mtd/ |
H A D | rfd_ftl.c | 88 struct block *blocks; member 95 struct block *block = &part->blocks[block_no]; in build_block_map() 188 part->blocks = kcalloc(part->total_blocks, sizeof(struct block), in scan_header() 190 if (!part->blocks) in scan_header() 238 kfree(part->blocks); in scan_header() 280 erase->addr = part->blocks[block].offset; in erase_block() 283 part->blocks[block].state = BLOCK_ERASING; in erase_block() 284 part->blocks[block].free_sectors = 0; in erase_block() 291 part->blocks[block].state = BLOCK_FAILED; in erase_block() 292 part->blocks[bloc in erase_block() [all...] |
/kernel/linux/linux-6.6/drivers/mtd/ |
H A D | rfd_ftl.c | 88 struct block *blocks; member 95 struct block *block = &part->blocks[block_no]; in build_block_map() 188 part->blocks = kcalloc(part->total_blocks, sizeof(struct block), in scan_header() 190 if (!part->blocks) in scan_header() 235 kfree(part->blocks); in scan_header() 277 erase->addr = part->blocks[block].offset; in erase_block() 280 part->blocks[block].state = BLOCK_ERASING; in erase_block() 281 part->blocks[block].free_sectors = 0; in erase_block() 288 part->blocks[block].state = BLOCK_FAILED; in erase_block() 289 part->blocks[bloc in erase_block() [all...] |
/kernel/linux/linux-5.10/arch/arm/crypto/ |
H A D | aes-neonbs-glue.c | 29 int rounds, int blocks); 31 int rounds, int blocks); 34 int rounds, int blocks, u8 iv[]); 37 int rounds, int blocks, u8 ctr[], u8 final[]); 40 int rounds, int blocks, u8 iv[], int); 42 int rounds, int blocks, u8 iv[], int); 87 int rounds, int blocks)) in __ecb_crypt() 97 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in __ecb_crypt() local 100 blocks = round_down(blocks, in __ecb_crypt() 85 __ecb_crypt(struct skcipher_request *req, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks)) __ecb_crypt() argument 171 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; cbc_decrypt() local 243 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; ctr_encrypt() local 341 __xts_crypt(struct skcipher_request *req, bool encrypt, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 iv[], int)) __xts_crypt() argument 373 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; __xts_crypt() local [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/ |
H A D | intel_memory_region.c | 35 struct list_head *blocks) in intel_memory_region_free_pages() 40 list_for_each_entry_safe(block, on, blocks, link) { in intel_memory_region_free_pages() 44 INIT_LIST_HEAD(blocks); in intel_memory_region_free_pages() 51 struct list_head *blocks) in __intel_memory_region_put_pages_buddy() 54 mem->avail += intel_memory_region_free_pages(mem, blocks); in __intel_memory_region_put_pages_buddy() 61 struct list_head blocks; in __intel_memory_region_put_block_buddy() local 63 INIT_LIST_HEAD(&blocks); in __intel_memory_region_put_block_buddy() 64 list_add(&block->link, &blocks); in __intel_memory_region_put_block_buddy() 65 __intel_memory_region_put_pages_buddy(block->private, &blocks); in __intel_memory_region_put_block_buddy() 72 struct list_head *blocks) in __intel_memory_region_get_pages_buddy() 34 intel_memory_region_free_pages(struct intel_memory_region *mem, struct list_head *blocks) intel_memory_region_free_pages() argument 50 __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem, struct list_head *blocks) __intel_memory_region_put_pages_buddy() argument 69 __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem, resource_size_t size, unsigned int flags, struct list_head *blocks) __intel_memory_region_get_pages_buddy() argument [all...] |
/kernel/linux/linux-5.10/arch/m68k/emu/ |
H A D | nfblock.c | 41 static inline s32 nfhd_get_capacity(u32 major, u32 minor, u32 *blocks, in nfhd_get_capacity() argument 45 virt_to_phys(blocks), virt_to_phys(blocksize)); in nfhd_get_capacity() 56 u32 blocks, bsize; member 87 geo->cylinders = dev->blocks >> (6 - dev->bshift); in nfhd_getgeo() 100 static int __init nfhd_init_one(int id, u32 blocks, u32 bsize) in nfhd_init_one() argument 105 pr_info("nfhd%u: found device with %u blocks (%u bytes)\n", dev_id, in nfhd_init_one() 106 blocks, bsize); in nfhd_init_one() 118 dev->blocks = blocks; in nfhd_init_one() 137 set_capacity(dev->disk, (sector_t)blocks * (bsiz in nfhd_init_one() 156 u32 blocks, bsize; nfhd_init() local [all...] |
/kernel/linux/linux-5.10/drivers/mfd/ |
H A D | stmpe.c | 29 * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*) 39 unsigned int blocks; member 47 static int __stmpe_enable(struct stmpe *stmpe, unsigned int blocks) in __stmpe_enable() argument 49 return stmpe->variant->enable(stmpe, blocks, true); in __stmpe_enable() 52 static int __stmpe_disable(struct stmpe *stmpe, unsigned int blocks) in __stmpe_disable() argument 54 return stmpe->variant->enable(stmpe, blocks, false); in __stmpe_disable() 128 * stmpe_enable - enable blocks on an STMPE device 130 * @blocks: Mask of blocks (enu 132 stmpe_enable(struct stmpe *stmpe, unsigned int blocks) stmpe_enable() argument 149 stmpe_disable(struct stmpe *stmpe, unsigned int blocks) stmpe_disable() argument 412 stmpe801_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) stmpe801_enable() argument 529 stmpe811_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) stmpe811_enable() argument 644 stmpe1600_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) stmpe1600_enable() argument 778 stmpe1601_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) stmpe1601_enable() argument 879 stmpe1801_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) stmpe1801_enable() argument 1001 stmpe24xx_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) stmpe24xx_enable() argument [all...] |
/kernel/linux/linux-6.6/drivers/mfd/ |
H A D | stmpe.c | 28 * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*) 35 unsigned int blocks; member 41 static int __stmpe_enable(struct stmpe *stmpe, unsigned int blocks) in __stmpe_enable() argument 43 return stmpe->variant->enable(stmpe, blocks, true); in __stmpe_enable() 46 static int __stmpe_disable(struct stmpe *stmpe, unsigned int blocks) in __stmpe_disable() argument 48 return stmpe->variant->enable(stmpe, blocks, false); in __stmpe_disable() 122 * stmpe_enable - enable blocks on an STMPE device 124 * @blocks: Mask of blocks (enu 126 stmpe_enable(struct stmpe *stmpe, unsigned int blocks) stmpe_enable() argument 143 stmpe_disable(struct stmpe *stmpe, unsigned int blocks) stmpe_disable() argument 408 stmpe801_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) stmpe801_enable() argument 527 stmpe811_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) stmpe811_enable() argument 642 stmpe1600_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) stmpe1600_enable() argument 776 stmpe1601_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) stmpe1601_enable() argument 877 stmpe1801_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) stmpe1801_enable() argument 999 stmpe24xx_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) stmpe24xx_enable() argument [all...] |
/kernel/linux/linux-6.6/arch/arm/crypto/ |
H A D | aes-neonbs-glue.c | 32 int rounds, int blocks); 34 int rounds, int blocks); 37 int rounds, int blocks, u8 iv[]); 40 int rounds, int blocks, u8 ctr[]); 43 int rounds, int blocks, u8 iv[], int); 45 int rounds, int blocks, u8 iv[], int); 90 int rounds, int blocks)) in __ecb_crypt() 100 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in __ecb_crypt() local 103 blocks = round_down(blocks, in __ecb_crypt() 88 __ecb_crypt(struct skcipher_request *req, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks)) __ecb_crypt() argument 174 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; cbc_decrypt() local 337 __xts_crypt(struct skcipher_request *req, bool encrypt, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 iv[], int)) __xts_crypt() argument 369 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; __xts_crypt() local [all...] |