/kernel/linux/linux-5.10/drivers/md/ |
H A D | dm-region-hash.c | 103 struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */ member 117 static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) in dm_rh_sector_to_region() argument 119 return sector >> rh->region_shift; in dm_rh_sector_to_region() 122 sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region) in dm_rh_region_to_sector() argument 124 return region << rh->region_shift; in dm_rh_region_to_sector() 128 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) in dm_rh_bio_to_region() argument 130 return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - in dm_rh_bio_to_region() 131 rh->target_begin); in dm_rh_bio_to_region() 137 return reg->rh->context; in dm_rh_region_context() 147 sector_t dm_rh_get_region_size(struct dm_region_hash *rh) in dm_rh_get_region_size() argument 170 struct dm_region_hash *rh; dm_region_hash_create() local 237 dm_region_hash_destroy(struct dm_region_hash *rh) dm_region_hash_destroy() argument 260 dm_rh_dirty_log(struct dm_region_hash *rh) dm_rh_dirty_log() argument 266 rh_hash(struct dm_region_hash *rh, region_t region) rh_hash() argument 271 __rh_lookup(struct dm_region_hash *rh, region_t region) __rh_lookup() argument 283 __rh_insert(struct dm_region_hash *rh, struct dm_region *reg) __rh_insert() argument 288 __rh_alloc(struct dm_region_hash *rh, region_t region) __rh_alloc() argument 324 __rh_find(struct dm_region_hash *rh, region_t region) __rh_find() argument 338 dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block) dm_rh_get_state() argument 366 struct dm_region_hash *rh = reg->rh; complete_resync_work() local 395 dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) dm_rh_mark_nosync() argument 440 dm_rh_update_states(struct dm_region_hash *rh, int errors_handled) dm_rh_update_states() argument 503 rh_inc(struct dm_region_hash *rh, region_t region) rh_inc() argument 526 dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) dm_rh_inc_pending() argument 538 dm_rh_dec(struct dm_region_hash *rh, region_t region) dm_rh_dec() argument 587 __rh_recovery_prepare(struct dm_region_hash *rh) __rh_recovery_prepare() argument 622 dm_rh_recovery_prepare(struct dm_region_hash *rh) dm_rh_recovery_prepare() argument 645 dm_rh_recovery_start(struct dm_region_hash *rh) dm_rh_recovery_start() argument 663 struct dm_region_hash *rh = reg->rh; dm_rh_recovery_end() local 678 dm_rh_recovery_in_flight(struct dm_region_hash *rh) dm_rh_recovery_in_flight() argument 684 dm_rh_flush(struct dm_region_hash *rh) dm_rh_flush() argument 690 dm_rh_delay(struct dm_region_hash *rh, struct bio *bio) dm_rh_delay() argument 701 dm_rh_stop_recovery(struct dm_region_hash *rh) dm_rh_stop_recovery() argument 711 dm_rh_start_recovery(struct dm_region_hash *rh) dm_rh_start_recovery() argument [all...] |
H A D | dm-raid1.c | 65 struct dm_region_hash *rh; member 336 sector_t region_size = dm_rh_get_region_size(ms->rh); in recover() 341 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); in recover() 360 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); in recover() 387 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in do_recovery() 392 dm_rh_recovery_prepare(ms->rh); in do_recovery() 397 while ((reg = dm_rh_recovery_start(ms->rh))) in do_recovery() 439 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_available() 440 region_t region = dm_rh_bio_to_region(ms->rh, bio); in mirror_available() 555 int state = dm_rh_get_state(ms->rh, regio in region_in_sync() [all...] |
/kernel/linux/linux-6.6/drivers/md/ |
H A D | dm-region-hash.c | 106 struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */ member 120 static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) in dm_rh_sector_to_region() argument 122 return sector >> rh->region_shift; in dm_rh_sector_to_region() 125 sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region) in dm_rh_region_to_sector() argument 127 return region << rh->region_shift; in dm_rh_region_to_sector() 131 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) in dm_rh_bio_to_region() argument 133 return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - in dm_rh_bio_to_region() 134 rh->target_begin); in dm_rh_bio_to_region() 140 return reg->rh->context; in dm_rh_region_context() 150 sector_t dm_rh_get_region_size(struct dm_region_hash *rh) in dm_rh_get_region_size() argument 173 struct dm_region_hash *rh; dm_region_hash_create() local 240 dm_region_hash_destroy(struct dm_region_hash *rh) dm_region_hash_destroy() argument 263 dm_rh_dirty_log(struct dm_region_hash *rh) dm_rh_dirty_log() argument 269 rh_hash(struct dm_region_hash *rh, region_t region) rh_hash() argument 274 __rh_lookup(struct dm_region_hash *rh, region_t region) __rh_lookup() argument 286 __rh_insert(struct dm_region_hash *rh, struct dm_region *reg) __rh_insert() argument 291 __rh_alloc(struct dm_region_hash *rh, region_t region) __rh_alloc() argument 327 __rh_find(struct dm_region_hash *rh, region_t region) __rh_find() argument 341 dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block) dm_rh_get_state() argument 369 struct dm_region_hash *rh = reg->rh; complete_resync_work() local 398 dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) dm_rh_mark_nosync() argument 443 dm_rh_update_states(struct dm_region_hash *rh, int errors_handled) dm_rh_update_states() argument 506 rh_inc(struct dm_region_hash *rh, region_t region) rh_inc() argument 529 dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) dm_rh_inc_pending() argument 541 dm_rh_dec(struct dm_region_hash *rh, region_t region) dm_rh_dec() argument 590 __rh_recovery_prepare(struct dm_region_hash *rh) __rh_recovery_prepare() argument 625 dm_rh_recovery_prepare(struct dm_region_hash *rh) dm_rh_recovery_prepare() argument 648 dm_rh_recovery_start(struct dm_region_hash *rh) dm_rh_recovery_start() argument 666 struct dm_region_hash *rh = reg->rh; dm_rh_recovery_end() local 681 dm_rh_recovery_in_flight(struct dm_region_hash *rh) dm_rh_recovery_in_flight() argument 687 dm_rh_flush(struct dm_region_hash *rh) dm_rh_flush() argument 693 dm_rh_delay(struct dm_region_hash *rh, struct bio *bio) dm_rh_delay() argument 704 dm_rh_stop_recovery(struct dm_region_hash *rh) dm_rh_stop_recovery() argument 714 dm_rh_start_recovery(struct dm_region_hash *rh) dm_rh_start_recovery() argument [all...] |
H A D | dm-raid1.c | 70 struct dm_region_hash *rh; member 342 sector_t region_size = dm_rh_get_region_size(ms->rh); in recover() 347 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); in recover() 366 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); in recover() 393 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in do_recovery() 398 dm_rh_recovery_prepare(ms->rh); in do_recovery() 403 while ((reg = dm_rh_recovery_start(ms->rh))) in do_recovery() 447 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_available() 448 region_t region = dm_rh_bio_to_region(ms->rh, bio); in mirror_available() 563 int state = dm_rh_get_state(ms->rh, regio in region_in_sync() [all...] |
/kernel/linux/linux-6.6/drivers/i3c/master/mipi-i3c-hci/ |
H A D | dma.c | 55 #define rh_reg_read(r) readl(rh->regs + (RH_##r)) 56 #define rh_reg_write(r, v) writel(v, rh->regs + (RH_##r)) 168 struct hci_rh_data *rh; in hci_dma_cleanup() local 175 rh = &rings->headers[i]; in hci_dma_cleanup() 182 if (rh->xfer) in hci_dma_cleanup() 184 rh->xfer_struct_sz * rh->xfer_entries, in hci_dma_cleanup() 185 rh->xfer, rh->xfer_dma); in hci_dma_cleanup() 186 if (rh in hci_dma_cleanup() 211 struct hci_rh_data *rh; hci_dma_init() local 358 struct hci_rh_data *rh; hci_dma_queue_xfer() local 443 struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number]; hci_dma_dequeue_xfer() local 495 hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh) hci_dma_xfer_done() argument 582 hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh) hci_dma_process_ibi() argument 738 struct hci_rh_data *rh; hci_dma_irq_handler() local [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | dm-region-hash.h | 43 void dm_region_hash_destroy(struct dm_region_hash *rh); 45 struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh); 50 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio); 51 sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region); 57 sector_t dm_rh_get_region_size(struct dm_region_hash *rh); 64 int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block); 65 void dm_rh_set_state(struct dm_region_hash *rh, region_t region, 69 void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled); 72 int dm_rh_flush(struct dm_region_hash *rh); 75 void dm_rh_inc_pending(struct dm_region_hash *rh, struc [all...] |
H A D | math64.h | 203 } rl, rm, rn, rh, a0, b0; in mul_u64_u64_shr() local 212 rh.ll = mul_u32_u32(a0.l.high, b0.l.high); in mul_u64_u64_shr() 220 rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low; in mul_u64_u64_shr() 221 rh.l.high = (c >> 32) + rh.l.high; in mul_u64_u64_shr() 224 * The 128-bit result of the multiplication is in rl.ll and rh.ll, in mul_u64_u64_shr() 230 return (rl.ll >> shift) | (rh.ll << (64 - shift)); in mul_u64_u64_shr() 231 return rh.ll >> (shift & 63); in mul_u64_u64_shr() 249 } u, rl, rh; in mul_u64_u32_div() local [all...] |
/kernel/linux/linux-6.6/include/linux/ |
H A D | dm-region-hash.h | 46 void dm_region_hash_destroy(struct dm_region_hash *rh); 48 struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh); 53 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio); 54 sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region); 60 sector_t dm_rh_get_region_size(struct dm_region_hash *rh); 67 int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block); 68 void dm_rh_set_state(struct dm_region_hash *rh, region_t region, 72 void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled); 75 int dm_rh_flush(struct dm_region_hash *rh); 78 void dm_rh_inc_pending(struct dm_region_hash *rh, struc [all...] |
H A D | math64.h | 208 } rl, rm, rn, rh, a0, b0; in mul_u64_u64_shr() local 217 rh.ll = mul_u32_u32(a0.l.high, b0.l.high); in mul_u64_u64_shr() 225 rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low; in mul_u64_u64_shr() 226 rh.l.high = (c >> 32) + rh.l.high; in mul_u64_u64_shr() 229 * The 128-bit result of the multiplication is in rl.ll and rh.ll, in mul_u64_u64_shr() 235 return (rl.ll >> shift) | (rh.ll << (64 - shift)); in mul_u64_u64_shr() 236 return rh.ll >> (shift & 63); in mul_u64_u64_shr() 272 } u, rl, rh; in mul_u64_u32_div() local [all...] |
H A D | rethook.h | 67 void rethook_stop(struct rethook *rh); 68 void rethook_free(struct rethook *rh); 69 void rethook_add_node(struct rethook *rh, struct rethook_node *node); 70 struct rethook_node *rethook_try_get(struct rethook *rh);
|
/kernel/linux/linux-6.6/kernel/trace/ |
H A D | rethook.c | 38 struct rethook *rh = container_of(head, struct rethook, rcu); in rethook_free_rcu() local 43 node = rh->pool.head; in rethook_free_rcu() 51 /* The rh->ref is the number of pooled node + 1 */ in rethook_free_rcu() 52 if (refcount_sub_and_test(count, &rh->ref)) in rethook_free_rcu() 53 kfree(rh); in rethook_free_rcu() 58 * @rh: the struct rethook to stop. 64 void rethook_stop(struct rethook *rh) in rethook_stop() argument 66 rcu_assign_pointer(rh->handler, NULL); in rethook_stop() 71 * @rh: the struct rethook to be freed. 74 * @rh 79 rethook_free(struct rethook *rh) rethook_free() argument 86 rethook_get_handler(struct rethook *rh) rethook_get_handler() argument 103 struct rethook *rh = kzalloc(sizeof(struct rethook), GFP_KERNEL); rethook_alloc() local 126 rethook_add_node(struct rethook *rh, struct rethook_node *node) rethook_add_node() argument 168 rethook_try_get(struct rethook *rh) rethook_try_get() argument 218 struct rethook_node *rh = NULL; __rethook_find_ret_addr() local [all...] |
H A D | fprobe.c | 28 struct rethook_node *rh = NULL; in __fprobe_handler() local 36 rh = rethook_try_get(fp->rethook); in __fprobe_handler() 37 if (!rh) { in __fprobe_handler() 41 fpr = container_of(rh, struct fprobe_rethook_node, node); in __fprobe_handler() 52 if (rh) { in __fprobe_handler() 54 rethook_recycle(rh); in __fprobe_handler() 56 rethook_hook(rh, ftrace_get_regs(fregs), true); in __fprobe_handler() 122 static void fprobe_exit_handler(struct rethook_node *rh, void *data, in fprobe_exit_handler() argument 132 fpr = container_of(rh, struct fprobe_rethook_node, node); in fprobe_exit_handler()
|
/kernel/linux/linux-5.10/crypto/ |
H A D | vmac.c | 103 #define ADD128(rh, rl, ih, il) \ 108 (rh)++; \ 109 (rh) += (ih); \ 114 #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ 118 rh = MUL32(_i1>>32, _i2>>32); \ 120 ADD128(rh, rl, (m >> 32), (m << 32)); \ 123 #define MUL64(rh, rl, i1, i2) \ 128 rh = MUL32(_i1>>32, _i2>>32); \ 130 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \ 131 ADD128(rh, r 360 u64 rh, rl, t, z = 0; l3hash() local 407 u64 rh, rl; vhash_blocks() local 545 u64 rh, rl; vhash_final() local [all...] |
/kernel/linux/linux-6.6/crypto/ |
H A D | vmac.c | 104 #define ADD128(rh, rl, ih, il) \ 109 (rh)++; \ 110 (rh) += (ih); \ 115 #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ 119 rh = MUL32(_i1>>32, _i2>>32); \ 121 ADD128(rh, rl, (m >> 32), (m << 32)); \ 124 #define MUL64(rh, rl, i1, i2) \ 129 rh = MUL32(_i1>>32, _i2>>32); \ 131 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \ 132 ADD128(rh, r 361 u64 rh, rl, t, z = 0; l3hash() local 408 u64 rh, rl; vhash_blocks() local 546 u64 rh, rl; vhash_final() local [all...] |
/kernel/linux/linux-6.6/arch/arm64/crypto/ |
H A D | sm3-neon-core.S | 48 #define rh w10 define 359 ldp rg, rh, [RSTATE, #24] 401 R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 0, 0, IW, _, 0) 402 R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 1, 1, IW, _, 0) 403 R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 2, 2, IW, _, 0) 404 R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 3, 3, IW, _, 0) 407 R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 4, 0, IW, _, 0) 408 R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 5, 1, IW, _, 0) 409 R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 6, 2, IW, SCHED_W_W0W1W2W3W4W5_1, 12) 410 R1(rb, rc, rd, ra, rf, rg, rh, r [all...] |
/kernel/linux/linux-5.10/arch/powerpc/sysdev/ |
H A D | fsl_85xx_cache_sram.c | 47 offset = rh_alloc_align(cache_sram->rh, size, align, NULL); in mpc85xx_cache_sram_alloc() 65 rh_free(cache_sram->rh, ptr - cache_sram->base_virt); in mpc85xx_cache_sram_free() 106 cache_sram->rh = rh_create(sizeof(unsigned int)); in instantiate_cache_sram() 107 if (IS_ERR(cache_sram->rh)) { in instantiate_cache_sram() 110 ret = PTR_ERR(cache_sram->rh); in instantiate_cache_sram() 114 rh_attach_region(cache_sram->rh, 0, cache_sram->size); in instantiate_cache_sram() 137 rh_detach_region(cache_sram->rh, 0, cache_sram->size); in remove_cache_sram() 138 rh_destroy(cache_sram->rh); in remove_cache_sram()
|
/kernel/linux/linux-5.10/drivers/dma/bestcomm/ |
H A D | sram.c | 97 bcom_sram->rh = rh_create(4); in bcom_sram_init() 110 rh_attach_region(bcom_sram->rh, 0, bcom_sram->size); in bcom_sram_init() 115 rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]); in bcom_sram_init() 140 rh_destroy(bcom_sram->rh); in bcom_sram_cleanup() 154 offset = rh_alloc_align(bcom_sram->rh, size, align, NULL); in bcom_sram_alloc() 175 rh_free(bcom_sram->rh, offset); in bcom_sram_free()
|
/kernel/linux/linux-6.6/drivers/dma/bestcomm/ |
H A D | sram.c | 90 bcom_sram->rh = rh_create(4); in bcom_sram_init() 103 rh_attach_region(bcom_sram->rh, 0, bcom_sram->size); in bcom_sram_init() 108 rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]); in bcom_sram_init() 133 rh_destroy(bcom_sram->rh); in bcom_sram_cleanup() 147 offset = rh_alloc_align(bcom_sram->rh, size, align, NULL); in bcom_sram_alloc() 168 rh_free(bcom_sram->rh, offset); in bcom_sram_free()
|
/kernel/linux/linux-5.10/net/ipv6/netfilter/ |
H A D | ip6t_rt.c | 34 const struct ipv6_rt_hdr *rh; in rt_mt6() local 51 rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route); in rt_mt6() 52 if (rh == NULL) { in rt_mt6() 57 hdrlen = ipv6_optlen(rh); in rt_mt6() 64 rh->segments_left, in rt_mt6() 70 ((rtinfo->rt_type == rh->type) ^ in rt_mt6()
|
/kernel/linux/linux-6.6/net/ipv6/netfilter/ |
H A D | ip6t_rt.c | 34 const struct ipv6_rt_hdr *rh; in rt_mt6() local 51 rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route); in rt_mt6() 52 if (rh == NULL) { in rt_mt6() 57 hdrlen = ipv6_optlen(rh); in rt_mt6() 64 rh->segments_left, in rt_mt6() 70 ((rtinfo->rt_type == rh->type) ^ in rt_mt6()
|
/kernel/linux/linux-6.6/tools/testing/selftests/kvm/x86_64/ |
H A D | hyperv_clock.c | 27 } rm, rn, rh, a0, b0; in mul_u64_u64_shr64() local 35 rh.ll = (u64)a0.l.high * b0.l.high; in mul_u64_u64_shr64() 37 rh.l.low = c = rm.l.high + rn.l.high + rh.l.low; in mul_u64_u64_shr64() 38 rh.l.high = (c >> 32) + rh.l.high; in mul_u64_u64_shr64() 40 return rh.ll; in mul_u64_u64_shr64()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/cavium/liquidio/ |
H A D | octeon_droq.c | 351 recv_pkt->rh = info->rh; in octeon_create_recv_info() 530 union octeon_rh *rh, in octeon_droq_dispatch_pkt() 539 disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode, in octeon_droq_dispatch_pkt() 540 (u16)rh->r.subcode); in octeon_droq_dispatch_pkt() 548 rinfo->recv_pkt->rh = *rh; in octeon_droq_dispatch_pkt() 556 (unsigned int)rh->r.opcode, in octeon_droq_dispatch_pkt() 557 (unsigned int)rh->r.subcode); in octeon_droq_dispatch_pkt() 599 union octeon_rh *rh; in octeon_droq_fast_process_packets() local 528 octeon_droq_dispatch_pkt(struct octeon_device *oct, struct octeon_droq *droq, union octeon_rh *rh, struct octeon_droq_info *info) octeon_droq_dispatch_pkt() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/cavium/liquidio/ |
H A D | octeon_droq.c | 353 recv_pkt->rh = info->rh; in octeon_create_recv_info() 532 union octeon_rh *rh, in octeon_droq_dispatch_pkt() 541 disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode, in octeon_droq_dispatch_pkt() 542 (u16)rh->r.subcode); in octeon_droq_dispatch_pkt() 550 rinfo->recv_pkt->rh = *rh; in octeon_droq_dispatch_pkt() 558 (unsigned int)rh->r.opcode, in octeon_droq_dispatch_pkt() 559 (unsigned int)rh->r.subcode); in octeon_droq_dispatch_pkt() 601 union octeon_rh *rh; in octeon_droq_fast_process_packets() local 530 octeon_droq_dispatch_pkt(struct octeon_device *oct, struct octeon_droq *droq, union octeon_rh *rh, struct octeon_droq_info *info) octeon_droq_dispatch_pkt() argument [all...] |
/kernel/linux/linux-6.6/arch/s390/kernel/ |
H A D | rethook.c | 6 void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount) in arch_rethook_prepare() argument 8 rh->ret_addr = regs->gprs[14]; in arch_rethook_prepare() 9 rh->frame = regs->gprs[15]; in arch_rethook_prepare()
|
/kernel/linux/linux-5.10/arch/arm/vfp/ |
H A D | vfp.h | 73 u64 rh, rma, rmb, rl; in mul64to128() local 86 rh = (u64)nh * mh; in mul64to128() 87 rh += ((u64)(rma < rmb) << 32) + (rma >> 32); in mul64to128() 91 rh += (rl < rma); in mul64to128() 94 *resh = rh; in mul64to128() 105 u64 rh, rl; in vfp_hi64multiply64() local 106 mul64to128(&rh, &rl, n, m); in vfp_hi64multiply64() 107 return rh | (rl != 0); in vfp_hi64multiply64()
|