/kernel/linux/linux-6.6/drivers/infiniband/sw/rxe/ |
H A D | rxe_mw.c | 18 struct rxe_mw *mw = to_rmw(ibmw); in rxe_alloc_mw() local 25 ret = rxe_add_to_pool(&rxe->mw_pool, mw); in rxe_alloc_mw() 31 mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); in rxe_alloc_mw() 32 mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ? in rxe_alloc_mw() 34 spin_lock_init(&mw->lock); in rxe_alloc_mw() 36 rxe_finalize(mw); in rxe_alloc_mw() 43 struct rxe_mw *mw = to_rmw(ibmw); in rxe_dealloc_mw() local 45 rxe_cleanup(mw); in rxe_dealloc_mw() 50 rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_mw *mw, struct rxe_mr *mr, int access) rxe_check_bind_mw() argument 135 rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_mw *mw, struct rxe_mr *mr, int access) rxe_do_bind_mw() argument 167 struct rxe_mw *mw; rxe_bind_mw() local 224 rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw) rxe_check_invalidate_mw() argument 236 rxe_do_invalidate_mw(struct rxe_mw *mw) rxe_do_invalidate_mw() argument 261 struct rxe_mw *mw; rxe_invalidate_mw() local 294 struct rxe_mw *mw; rxe_lookup_mw() local 314 struct rxe_mw *mw = container_of(elem, typeof(*mw), elem); rxe_mw_cleanup() local [all...] |
H A D | rxe.h | 58 #define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \ 59 "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__) 79 #define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \ 80 "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__) 100 #define rxe_info_mw(mw, fmt, ...) ibdev_info_ratelimited((mw) [all...] |
H A D | rxe_verbs.h | 453 static inline struct rxe_mw *to_rmw(struct ib_mw *mw) in to_rmw() argument 455 return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL; in to_rmw() 468 static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw) in rxe_mw_pd() argument 470 return to_rpd(mw->ibmw.pd); in rxe_mw_pd()
|
/kernel/linux/linux-5.10/net/netfilter/ipvs/ |
H A D | ip_vs_wrr.c | 28 * - mw: maximum weight 31 * As result, all weights are in the [di..mw] range with a step=di. 33 * First, we start with cw = mw and select dests with weight >= cw. 35 * Last pass should be with cw = di. We have mw/di passes in total: 47 * So, we modify how mw is calculated, now it is reduced with (di - 1), 63 int mw; /* maximum weight */ member 119 mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1); in ip_vs_wrr_init_svc() 120 mark->cw = mark->mw; in ip_vs_wrr_init_svc() 146 mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1); in ip_vs_wrr_dest_changed() 147 if (mark->cw > mark->mw || !mar in ip_vs_wrr_dest_changed() [all...] |
/kernel/linux/linux-6.6/net/netfilter/ipvs/ |
H A D | ip_vs_wrr.c | 28 * - mw: maximum weight 31 * As result, all weights are in the [di..mw] range with a step=di. 33 * First, we start with cw = mw and select dests with weight >= cw. 35 * Last pass should be with cw = di. We have mw/di passes in total: 47 * So, we modify how mw is calculated, now it is reduced with (di - 1), 63 int mw; /* maximum weight */ member 119 mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1); in ip_vs_wrr_init_svc() 120 mark->cw = mark->mw; in ip_vs_wrr_init_svc() 146 mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1); in ip_vs_wrr_dest_changed() 147 if (mark->cw > mark->mw || !mar in ip_vs_wrr_dest_changed() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx4/ |
H A D | mr.c | 815 struct mlx4_mw *mw) in mlx4_mw_alloc() 829 mw->key = hw_index_to_key(index); in mlx4_mw_alloc() 830 mw->pd = pd; in mlx4_mw_alloc() 831 mw->type = type; in mlx4_mw_alloc() 832 mw->enabled = MLX4_MPT_DISABLED; in mlx4_mw_alloc() 838 int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) in mlx4_mw_enable() argument 844 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key)); in mlx4_mw_enable() 858 mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key)); in mlx4_mw_enable() 859 mpt_entry->pd_flags = cpu_to_be32(mw->pd); in mlx4_mw_enable() 860 if (mw in mlx4_mw_enable() 814 mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, struct mlx4_mw *mw) mlx4_mw_alloc() argument 888 mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw) mlx4_mw_free() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx4/ |
H A D | mr.c | 815 struct mlx4_mw *mw) in mlx4_mw_alloc() 829 mw->key = hw_index_to_key(index); in mlx4_mw_alloc() 830 mw->pd = pd; in mlx4_mw_alloc() 831 mw->type = type; in mlx4_mw_alloc() 832 mw->enabled = MLX4_MPT_DISABLED; in mlx4_mw_alloc() 838 int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) in mlx4_mw_enable() argument 844 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key)); in mlx4_mw_enable() 858 mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key)); in mlx4_mw_enable() 859 mpt_entry->pd_flags = cpu_to_be32(mw->pd); in mlx4_mw_enable() 860 if (mw in mlx4_mw_enable() 814 mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, struct mlx4_mw *mw) mlx4_mw_alloc() argument 888 mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw) mlx4_mw_free() argument [all...] |
/kernel/linux/linux-5.10/drivers/ntb/ |
H A D | ntb_transport.c | 619 struct ntb_transport_mw *mw; in ntb_transport_setup_qp_mw() local 631 mw = &nt->mw_vec[mw_num]; in ntb_transport_setup_qp_mw() 633 if (!mw->virt_addr) in ntb_transport_setup_qp_mw() 641 rx_size = (unsigned int)mw->xlat_size / num_qps_mw; in ntb_transport_setup_qp_mw() 642 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); in ntb_transport_setup_qp_mw() 795 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; in ntb_free_mw() local 798 if (!mw->virt_addr) in ntb_free_mw() 802 dma_free_coherent(&pdev->dev, mw->alloc_size, in ntb_free_mw() 803 mw->alloc_addr, mw in ntb_free_mw() 811 ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, struct device *dma_dev, size_t align) ntb_alloc_mw_buffer() argument 858 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; ntb_set_mw() local 1253 struct ntb_transport_mw *mw; ntb_transport_probe() local [all...] |
/kernel/linux/linux-6.6/drivers/ntb/ |
H A D | ntb_transport.c | 617 struct ntb_transport_mw *mw; in ntb_transport_setup_qp_mw() local 629 mw = &nt->mw_vec[mw_num]; in ntb_transport_setup_qp_mw() 631 if (!mw->virt_addr) in ntb_transport_setup_qp_mw() 639 rx_size = (unsigned int)mw->xlat_size / num_qps_mw; in ntb_transport_setup_qp_mw() 640 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); in ntb_transport_setup_qp_mw() 793 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; in ntb_free_mw() local 796 if (!mw->virt_addr) in ntb_free_mw() 800 dma_free_coherent(&pdev->dev, mw->alloc_size, in ntb_free_mw() 801 mw->alloc_addr, mw in ntb_free_mw() 809 ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, struct device *dma_dev, size_t align) ntb_alloc_mw_buffer() argument 856 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; ntb_set_mw() local 1251 struct ntb_transport_mw *mw; ntb_transport_probe() local [all...] |
/kernel/linux/linux-5.10/fs/ocfs2/ |
H A D | dlmglue.c | 435 struct ocfs2_mask_waiter *mw, int ret) in ocfs2_update_lock_stats() 448 kt = ktime_sub(ktime_get(), mw->mw_lock_start); in ocfs2_update_lock_stats() 475 struct ocfs2_mask_waiter *mw; in ocfs2_track_lock_wait() local 482 mw = list_first_entry(&lockres->l_mask_waiters, in ocfs2_track_lock_wait() 485 ktime_to_us(ktime_mono_to_real(mw->mw_lock_start)); in ocfs2_track_lock_wait() 488 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) in ocfs2_init_start_time() argument 490 mw->mw_lock_start = ktime_get(); in ocfs2_init_start_time() 497 int level, struct ocfs2_mask_waiter *mw, int ret) in ocfs2_update_lock_stats() 506 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) in ocfs2_init_start_time() argument 892 struct ocfs2_mask_waiter *mw, *tm in lockres_set_flags() local 434 ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, struct ocfs2_mask_waiter *mw, int ret) ocfs2_update_lock_stats() argument 496 ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, struct ocfs2_mask_waiter *mw, int ret) ocfs2_update_lock_stats() argument 1390 ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw) ocfs2_init_mask_waiter() argument 1397 ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw) ocfs2_wait_for_mask() argument 1405 lockres_add_mask_waiter(struct ocfs2_lock_res *lockres, struct ocfs2_mask_waiter *mw, unsigned long mask, unsigned long goal) lockres_add_mask_waiter() argument 1422 __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres, struct ocfs2_mask_waiter *mw) __lockres_remove_mask_waiter() argument 1440 lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres, struct ocfs2_mask_waiter *mw) lockres_remove_mask_waiter() argument 1454 ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw, struct ocfs2_lock_res *lockres) ocfs2_wait_for_mask_interruptible() argument 1477 struct ocfs2_mask_waiter mw; __ocfs2_cluster_lock() local 1912 struct ocfs2_mask_waiter mw; ocfs2_flock_handle_signal() local 1982 struct ocfs2_mask_waiter mw; ocfs2_file_lock() local 2078 struct ocfs2_mask_waiter mw; ocfs2_file_unlock() local 3502 struct ocfs2_mask_waiter mw; ocfs2_mark_lockres_freeing() local [all...] |
/kernel/linux/linux-6.6/fs/ocfs2/ |
H A D | dlmglue.c | 434 struct ocfs2_mask_waiter *mw, int ret) in ocfs2_update_lock_stats() 447 kt = ktime_sub(ktime_get(), mw->mw_lock_start); in ocfs2_update_lock_stats() 474 struct ocfs2_mask_waiter *mw; in ocfs2_track_lock_wait() local 481 mw = list_first_entry(&lockres->l_mask_waiters, in ocfs2_track_lock_wait() 484 ktime_to_us(ktime_mono_to_real(mw->mw_lock_start)); in ocfs2_track_lock_wait() 487 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) in ocfs2_init_start_time() argument 489 mw->mw_lock_start = ktime_get(); in ocfs2_init_start_time() 496 int level, struct ocfs2_mask_waiter *mw, int ret) in ocfs2_update_lock_stats() 505 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) in ocfs2_init_start_time() argument 891 struct ocfs2_mask_waiter *mw, *tm in lockres_set_flags() local 433 ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, struct ocfs2_mask_waiter *mw, int ret) ocfs2_update_lock_stats() argument 495 ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, struct ocfs2_mask_waiter *mw, int ret) ocfs2_update_lock_stats() argument 1389 ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw) ocfs2_init_mask_waiter() argument 1396 ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw) ocfs2_wait_for_mask() argument 1404 lockres_add_mask_waiter(struct ocfs2_lock_res *lockres, struct ocfs2_mask_waiter *mw, unsigned long mask, unsigned long goal) lockres_add_mask_waiter() argument 1421 __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres, struct ocfs2_mask_waiter *mw) __lockres_remove_mask_waiter() argument 1439 lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres, struct ocfs2_mask_waiter *mw) lockres_remove_mask_waiter() argument 1453 ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw, struct ocfs2_lock_res *lockres) ocfs2_wait_for_mask_interruptible() argument 1476 struct ocfs2_mask_waiter mw; __ocfs2_cluster_lock() local 1911 struct ocfs2_mask_waiter mw; ocfs2_flock_handle_signal() local 1981 struct ocfs2_mask_waiter mw; ocfs2_file_lock() local 2077 struct ocfs2_mask_waiter mw; ocfs2_file_unlock() local 3512 struct ocfs2_mask_waiter mw; ocfs2_mark_lockres_freeing() local [all...] |
/kernel/linux/linux-5.10/arch/mips/txx9/generic/ |
H A D | mem_tx4927.c | 46 unsigned int mw = 0; in tx4927_process_sdccr() local 61 mw = 8 >> sdccr_mw; in tx4927_process_sdccr() 64 return rs * cs * mw * bs; in tx4927_process_sdccr()
|
/kernel/linux/linux-6.6/arch/mips/txx9/generic/ |
H A D | mem_tx4927.c | 46 unsigned int mw = 0; in tx4927_process_sdccr() local 61 mw = 8 >> sdccr_mw; in tx4927_process_sdccr() 64 return rs * cs * mw * bs; in tx4927_process_sdccr()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/marvell/octeontx2/af/ |
H A D | rvu.c | 36 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, 1668 struct mbox_wq_info *mw; in __rvu_mbox_handler() local 1673 mw = &rvu->afpf_wq_info; in __rvu_mbox_handler() 1676 mw = &rvu->afvf_wq_info; in __rvu_mbox_handler() 1682 devid = mwork - mw->mbox_wrk; in __rvu_mbox_handler() 1683 mbox = &mw->mbox; in __rvu_mbox_handler() 1688 if (mw->mbox_wrk[devid].num_msgs == 0) in __rvu_mbox_handler() 1693 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) { in __rvu_mbox_handler() 1726 mw->mbox_wrk[devid].num_msgs = 0; in __rvu_mbox_handler() 1752 struct mbox_wq_info *mw; in __rvu_mbox_up_handler() local 1828 rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, int type, int num, void (mbox_handler)(struct work_struct *), void (mbox_up_handler)(struct work_struct *)) rvu_mbox_init() argument 1916 rvu_mbox_destroy(struct mbox_wq_info *mw) rvu_mbox_destroy() argument 1931 rvu_queue_work(struct mbox_wq_info *mw, int first, int mdevs, u64 intr) rvu_queue_work() argument [all...] |
/kernel/linux/linux-5.10/drivers/clk/rockchip/ |
H A D | clk.h | 425 #define COMPOSITE(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\ 436 .mux_width = mw, \ 446 #define COMPOSITE_DIV_OFFSET(_id, cname, pnames, f, mo, ms, mw, \ 457 .mux_width = mw, \ 505 #define COMPOSITE_NODIV(_id, cname, pnames, f, mo, ms, mw, mf, \ 516 .mux_width = mw, \ 523 #define COMPOSITE_NOGATE(_id, cname, pnames, f, mo, ms, mw, mf, \ 534 .mux_width = mw, \ 543 mw, mf, ds, dw, df, dt) \ 553 .mux_width = mw, \ [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/ |
H A D | hns_roce_mr.c | 523 struct hns_roce_mw *mw) in hns_roce_mw_free() 528 if (mw->enabled) { in hns_roce_mw_free() 530 key_to_hw_index(mw->rkey) & in hns_roce_mw_free() 536 key_to_hw_index(mw->rkey)); in hns_roce_mw_free() 540 key_to_hw_index(mw->rkey), BITMAP_NO_RR); in hns_roce_mw_free() 544 struct hns_roce_mw *mw) in hns_roce_mw_enable() 549 unsigned long mtpt_idx = key_to_hw_index(mw->rkey); in hns_roce_mw_enable() 563 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw); in hns_roce_mw_enable() 576 mw->enabled = 1; in hns_roce_mw_enable() 594 struct hns_roce_mw *mw in hns_roce_alloc_mw() local 522 hns_roce_mw_free(struct hns_roce_dev *hr_dev, struct hns_roce_mw *mw) hns_roce_mw_free() argument 543 hns_roce_mw_enable(struct hns_roce_dev *hr_dev, struct hns_roce_mw *mw) hns_roce_mw_enable() argument 625 struct hns_roce_mw *mw = to_hr_mw(ibmw); hns_roce_dealloc_mw() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/hns/ |
H A D | hns_roce_mr.c | 460 struct hns_roce_mw *mw) in hns_roce_mw_free() 465 if (mw->enabled) { in hns_roce_mw_free() 467 key_to_hw_index(mw->rkey) & in hns_roce_mw_free() 473 key_to_hw_index(mw->rkey)); in hns_roce_mw_free() 477 (int)key_to_hw_index(mw->rkey)); in hns_roce_mw_free() 481 struct hns_roce_mw *mw) in hns_roce_mw_enable() 486 unsigned long mtpt_idx = key_to_hw_index(mw->rkey); in hns_roce_mw_enable() 500 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw); in hns_roce_mw_enable() 513 mw->enabled = 1; in hns_roce_mw_enable() 533 struct hns_roce_mw *mw in hns_roce_alloc_mw() local 459 hns_roce_mw_free(struct hns_roce_dev *hr_dev, struct hns_roce_mw *mw) hns_roce_mw_free() argument 480 hns_roce_mw_enable(struct hns_roce_dev *hr_dev, struct hns_roce_mw *mw) hns_roce_mw_enable() argument 567 struct hns_roce_mw *mw = to_hr_mw(ibmw); hns_roce_dealloc_mw() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/marvell/octeontx2/af/ |
H A D | rvu.c | 33 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, 2124 struct mbox_wq_info *mw; in __rvu_mbox_handler() local 2129 mw = &rvu->afpf_wq_info; in __rvu_mbox_handler() 2132 mw = &rvu->afvf_wq_info; in __rvu_mbox_handler() 2138 devid = mwork - mw->mbox_wrk; in __rvu_mbox_handler() 2139 mbox = &mw->mbox; in __rvu_mbox_handler() 2144 if (mw->mbox_wrk[devid].num_msgs == 0) in __rvu_mbox_handler() 2149 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) { in __rvu_mbox_handler() 2182 mw->mbox_wrk[devid].num_msgs = 0; in __rvu_mbox_handler() 2214 struct mbox_wq_info *mw; in __rvu_mbox_up_handler() local 2350 rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, int type, int num, void (mbox_handler)(struct work_struct *), void (mbox_up_handler)(struct work_struct *)) rvu_mbox_init() argument 2469 rvu_mbox_destroy(struct mbox_wq_info *mw) rvu_mbox_destroy() argument 2490 rvu_queue_work(struct mbox_wq_info *mw, int first, int mdevs, u64 intr) rvu_queue_work() argument [all...] |
/kernel/linux/linux-6.6/drivers/clk/rockchip/ |
H A D | clk.h | 550 #define COMPOSITE(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\ 561 .mux_width = mw, \ 571 #define COMPOSITE_DIV_OFFSET(_id, cname, pnames, f, mo, ms, mw, \ 582 .mux_width = mw, \ 630 #define COMPOSITE_NODIV(_id, cname, pnames, f, mo, ms, mw, mf, \ 641 .mux_width = mw, \ 648 #define COMPOSITE_NOGATE(_id, cname, pnames, f, mo, ms, mw, mf, \ 659 .mux_width = mw, \ 668 mw, mf, ds, dw, df, dt) \ 678 .mux_width = mw, \ [all...] |
/kernel/linux/linux-5.10/scripts/dtc/include-prefixes/dt-bindings/usb/ |
H A D | pd.h | 44 #define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT)
|
/kernel/linux/linux-5.10/include/dt-bindings/usb/ |
H A D | pd.h | 44 #define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT)
|
/kernel/linux/linux-5.10/include/linux/usb/ |
H A D | pd.h | 248 #define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT) 373 #define RDO_BATT_OP_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_OP_PWR_SHIFT) 374 #define RDO_BATT_MAX_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_MAX_PWR_SHIFT)
|
/kernel/linux/linux-6.6/include/linux/usb/ |
H A D | pd.h | 249 #define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT) 374 #define RDO_BATT_OP_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_OP_PWR_SHIFT) 375 #define RDO_BATT_MAX_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_MAX_PWR_SHIFT)
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx4/ |
H A D | mr.c | 616 struct mlx4_ib_mw *mw = to_mmw(ibmw); in mlx4_ib_alloc_mw() local 620 to_mlx4_type(ibmw->type), &mw->mmw); in mlx4_ib_alloc_mw() 624 err = mlx4_mw_enable(dev->dev, &mw->mmw); in mlx4_ib_alloc_mw() 628 ibmw->rkey = mw->mmw.key; in mlx4_ib_alloc_mw() 632 mlx4_mw_free(dev->dev, &mw->mmw); in mlx4_ib_alloc_mw() 638 struct mlx4_ib_mw *mw = to_mmw(ibmw); in mlx4_ib_dealloc_mw() local 640 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); in mlx4_ib_dealloc_mw()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx4/ |
H A D | mr.c | 616 struct mlx4_ib_mw *mw = to_mmw(ibmw); in mlx4_ib_alloc_mw() local 620 to_mlx4_type(ibmw->type), &mw->mmw); in mlx4_ib_alloc_mw() 624 err = mlx4_mw_enable(dev->dev, &mw->mmw); in mlx4_ib_alloc_mw() 628 ibmw->rkey = mw->mmw.key; in mlx4_ib_alloc_mw() 632 mlx4_mw_free(dev->dev, &mw->mmw); in mlx4_ib_alloc_mw() 638 struct mlx4_ib_mw *mw = to_mmw(ibmw); in mlx4_ib_dealloc_mw() local 640 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); in mlx4_ib_dealloc_mw()
|