Home
last modified time | relevance | path

Searched refs:se (Results 1 - 25 of 242) sorted by relevance

12345678910

/kernel/linux/linux-6.6/drivers/soc/qcom/
H A Dqcom-geni-se.c17 #include <linux/soc/qcom/geni-se.h>
192 * @se: Pointer to the corresponding serial engine.
196 u32 geni_se_get_qup_hw_version(struct geni_se *se) in geni_se_get_qup_hw_version() argument
198 struct geni_wrapper *wrapper = se->wrapper; in geni_se_get_qup_hw_version()
237 static void geni_se_irq_clear(struct geni_se *se) in geni_se_irq_clear() argument
239 writel_relaxed(0, se->base + SE_GSI_EVENT_EN); in geni_se_irq_clear()
240 writel_relaxed(0xffffffff, se->base + SE_GENI_M_IRQ_CLEAR); in geni_se_irq_clear()
241 writel_relaxed(0xffffffff, se->base + SE_GENI_S_IRQ_CLEAR); in geni_se_irq_clear()
242 writel_relaxed(0xffffffff, se->base + SE_DMA_TX_IRQ_CLR); in geni_se_irq_clear()
243 writel_relaxed(0xffffffff, se in geni_se_irq_clear()
256 geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr) geni_se_init() argument
277 geni_se_select_fifo_mode(struct geni_se *se) geni_se_select_fifo_mode() argument
300 geni_se_select_dma_mode(struct geni_se *se) geni_se_select_dma_mode() argument
323 geni_se_select_gpi_mode(struct geni_se *se) geni_se_select_gpi_mode() argument
348 geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode) geni_se_select_mode() argument
431 geni_se_config_packing(struct geni_se *se, int bpw, int pack_words, bool msb_to_lsb, bool tx_cfg, bool rx_cfg) geni_se_config_packing() argument
486 geni_se_clks_off(struct geni_se *se) geni_se_clks_off() argument
501 geni_se_resources_off(struct geni_se *se) geni_se_resources_off() argument
517 geni_se_clks_on(struct geni_se *se) geni_se_clks_on() argument
539 geni_se_resources_on(struct geni_se *se) geni_se_resources_on() argument
571 geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl) geni_se_clk_tbl_get() argument
618 geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq, unsigned int *index, unsigned long *res_freq, bool exact) geni_se_clk_freq_match() argument
674 geni_se_tx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len) geni_se_tx_init_dma() argument
700 geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len, dma_addr_t *iova) geni_se_tx_dma_prep() argument
725 geni_se_rx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len) geni_se_rx_init_dma() argument
752 geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len, dma_addr_t *iova) geni_se_rx_dma_prep() argument
777 geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len) geni_se_tx_dma_unprep() argument
794 geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len) geni_se_rx_dma_unprep() argument
803 geni_icc_get(struct geni_se *se, const char *icc_ddr) geni_icc_get() argument
832 geni_icc_set_bw(struct geni_se *se) geni_icc_set_bw() argument
850 geni_icc_set_tag(struct geni_se *se, u32 tag) geni_icc_set_tag() argument
860 geni_icc_enable(struct geni_se *se) geni_icc_enable() argument
877 geni_icc_disable(struct geni_se *se) geni_icc_disable() argument
[all...]
/kernel/linux/linux-5.10/drivers/soc/qcom/
H A Dqcom-geni-se.c14 #include <linux/qcom-geni-se.h>
177 * @se: Pointer to the corresponding serial engine.
181 u32 geni_se_get_qup_hw_version(struct geni_se *se) in geni_se_get_qup_hw_version() argument
183 struct geni_wrapper *wrapper = se->wrapper; in geni_se_get_qup_hw_version()
222 static void geni_se_irq_clear(struct geni_se *se) in geni_se_irq_clear() argument
224 writel_relaxed(0, se->base + SE_GSI_EVENT_EN); in geni_se_irq_clear()
225 writel_relaxed(0xffffffff, se->base + SE_GENI_M_IRQ_CLEAR); in geni_se_irq_clear()
226 writel_relaxed(0xffffffff, se->base + SE_GENI_S_IRQ_CLEAR); in geni_se_irq_clear()
227 writel_relaxed(0xffffffff, se->base + SE_DMA_TX_IRQ_CLR); in geni_se_irq_clear()
228 writel_relaxed(0xffffffff, se in geni_se_irq_clear()
241 geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr) geni_se_init() argument
262 geni_se_select_fifo_mode(struct geni_se *se) geni_se_select_fifo_mode() argument
286 geni_se_select_dma_mode(struct geni_se *se) geni_se_select_dma_mode() argument
315 geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode) geni_se_select_mode() argument
395 geni_se_config_packing(struct geni_se *se, int bpw, int pack_words, bool msb_to_lsb, bool tx_cfg, bool rx_cfg) geni_se_config_packing() argument
450 geni_se_clks_off(struct geni_se *se) geni_se_clks_off() argument
466 geni_se_resources_off(struct geni_se *se) geni_se_resources_off() argument
482 geni_se_clks_on(struct geni_se *se) geni_se_clks_on() argument
506 geni_se_resources_on(struct geni_se *se) geni_se_resources_on() argument
538 geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl) geni_se_clk_tbl_get() argument
585 geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq, unsigned int *index, unsigned long *res_freq, bool exact) geni_se_clk_freq_match() argument
643 geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len, dma_addr_t *iova) geni_se_tx_dma_prep() argument
679 geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len, dma_addr_t *iova) geni_se_rx_dma_prep() argument
713 geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len) geni_se_tx_dma_unprep() argument
730 geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len) geni_se_rx_dma_unprep() argument
739 geni_icc_get(struct geni_se *se, const char *icc_ddr) geni_icc_get() argument
768 geni_icc_set_bw(struct geni_se *se) geni_icc_set_bw() argument
786 geni_icc_set_tag(struct geni_se *se, u32 tag) geni_icc_set_tag() argument
796 geni_icc_enable(struct geni_se *se) geni_icc_enable() argument
813 geni_icc_disable(struct geni_se *se) geni_icc_disable() argument
[all...]
/kernel/linux/linux-6.6/include/linux/soc/qcom/
H A Dgeni-se.h306 u32 geni_se_get_qup_hw_version(struct geni_se *se);
310 * @se: Pointer to the concerned serial engine.
314 static inline u32 geni_se_read_proto(struct geni_se *se) in geni_se_read_proto() argument
318 val = readl_relaxed(se->base + GENI_FW_REVISION_RO); in geni_se_read_proto()
325 * @se: Pointer to the concerned serial engine.
332 static inline void geni_se_setup_m_cmd(struct geni_se *se, u32 cmd, u32 params) in geni_se_setup_m_cmd() argument
337 writel(m_cmd, se->base + SE_GENI_M_CMD0); in geni_se_setup_m_cmd()
342 * @se: Pointer to the concerned serial engine.
349 static inline void geni_se_setup_s_cmd(struct geni_se *se, u32 cmd, u32 params) in geni_se_setup_s_cmd() argument
353 s_cmd = readl_relaxed(se in geni_se_setup_s_cmd()
368 geni_se_cancel_m_cmd(struct geni_se *se) geni_se_cancel_m_cmd() argument
381 geni_se_cancel_s_cmd(struct geni_se *se) geni_se_cancel_s_cmd() argument
393 geni_se_abort_m_cmd(struct geni_se *se) geni_se_abort_m_cmd() argument
406 geni_se_abort_s_cmd(struct geni_se *se) geni_se_abort_s_cmd() argument
421 geni_se_get_tx_fifo_depth(struct geni_se *se) geni_se_get_tx_fifo_depth() argument
448 geni_se_get_tx_fifo_width(struct geni_se *se) geni_se_get_tx_fifo_width() argument
467 geni_se_get_rx_fifo_depth(struct geni_se *se) geni_se_get_rx_fifo_depth() argument
[all...]
/kernel/linux/linux-5.10/include/linux/
H A Dqcom-geni-se.h271 u32 geni_se_get_qup_hw_version(struct geni_se *se);
275 * @se: Pointer to the concerned serial engine.
279 static inline u32 geni_se_read_proto(struct geni_se *se) in geni_se_read_proto() argument
283 val = readl_relaxed(se->base + GENI_FW_REVISION_RO); in geni_se_read_proto()
290 * @se: Pointer to the concerned serial engine.
297 static inline void geni_se_setup_m_cmd(struct geni_se *se, u32 cmd, u32 params) in geni_se_setup_m_cmd() argument
302 writel(m_cmd, se->base + SE_GENI_M_CMD0); in geni_se_setup_m_cmd()
307 * @se: Pointer to the concerned serial engine.
314 static inline void geni_se_setup_s_cmd(struct geni_se *se, u32 cmd, u32 params) in geni_se_setup_s_cmd() argument
318 s_cmd = readl_relaxed(se in geni_se_setup_s_cmd()
333 geni_se_cancel_m_cmd(struct geni_se *se) geni_se_cancel_m_cmd() argument
346 geni_se_cancel_s_cmd(struct geni_se *se) geni_se_cancel_s_cmd() argument
358 geni_se_abort_m_cmd(struct geni_se *se) geni_se_abort_m_cmd() argument
371 geni_se_abort_s_cmd(struct geni_se *se) geni_se_abort_s_cmd() argument
385 geni_se_get_tx_fifo_depth(struct geni_se *se) geni_se_get_tx_fifo_depth() argument
403 geni_se_get_tx_fifo_width(struct geni_se *se) geni_se_get_tx_fifo_width() argument
421 geni_se_get_rx_fifo_depth(struct geni_se *se) geni_se_get_rx_fifo_depth() argument
[all...]
/kernel/linux/linux-5.10/drivers/i2c/busses/
H A Di2c-qcom-geni.c15 #include <linux/qcom-geni-se.h>
76 struct geni_se se; member
157 writel_relaxed(0, gi2c->se.base + SE_GENI_CLK_SEL); in qcom_geni_i2c_conf()
160 writel_relaxed(val, gi2c->se.base + GENI_SER_M_CLK_CFG); in qcom_geni_i2c_conf()
165 writel_relaxed(val, gi2c->se.base + SE_I2C_SCL_COUNTERS); in qcom_geni_i2c_conf()
170 u32 m_cmd = readl_relaxed(gi2c->se.base + SE_GENI_M_CMD0); in geni_i2c_err_misc()
171 u32 m_stat = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS); in geni_i2c_err_misc()
172 u32 geni_s = readl_relaxed(gi2c->se.base + SE_GENI_STATUS); in geni_i2c_err_misc()
173 u32 geni_ios = readl_relaxed(gi2c->se.base + SE_GENI_IOS); in geni_i2c_err_misc()
174 u32 dma = readl_relaxed(gi2c->se in geni_i2c_err_misc()
384 struct geni_se *se = &gi2c->se; geni_i2c_rx_one_msg() local
425 struct geni_se *se = &gi2c->se; geni_i2c_tx_one_msg() local
[all...]
/kernel/linux/linux-5.10/drivers/spi/
H A Dspi-geni-qcom.c12 #include <linux/qcom-geni-se.h>
67 struct geni_se se; member
98 ret = geni_se_clk_freq_match(&mas->se, in get_spi_clk_cfg()
126 struct geni_se *se = &mas->se; in handle_fifo_timeout() local
130 writel(0, se->base + SE_GENI_TX_WATERMARK_REG); in handle_fifo_timeout()
132 geni_se_cancel_m_cmd(se); in handle_fifo_timeout()
141 geni_se_abort_m_cmd(se); in handle_fifo_timeout()
158 struct geni_se *se = &mas->se; in spi_geni_is_abort_still_pending() local
194 struct geni_se *se = &mas->se; spi_geni_set_cs() local
233 struct geni_se *se = &mas->se; spi_setup_word_len() local
254 struct geni_se *se = &mas->se; geni_spi_set_clock_and_bw() local
293 struct geni_se *se = &mas->se; setup_fifo_params() local
343 struct geni_se *se = &mas->se; spi_geni_init() local
403 struct geni_se *se = &mas->se; geni_spi_handle_tx() local
441 struct geni_se *se = &mas->se; geni_spi_handle_rx() local
489 struct geni_se *se = &mas->se; setup_fifo_xfer() local
579 struct geni_se *se = &mas->se; geni_spi_isr() local
[all...]
/kernel/linux/linux-5.10/kernel/sched/
H A Dfair.c290 static inline struct task_struct *task_of(struct sched_entity *se) in task_of() argument
292 SCHED_WARN_ON(!entity_is_task(se)); in task_of()
293 return container_of(se, struct task_struct, se); in task_of()
297 #define for_each_sched_entity(se) \
298 for (; se; se = se->parent)
302 return p->se.cfs_rq; in task_cfs_rq()
306 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() argument
430 is_same_group(struct sched_entity *se, struct sched_entity *pse) is_same_group() argument
438 parent_entity(struct sched_entity *se) parent_entity() argument
444 find_matching_se(struct sched_entity **se, struct sched_entity **pse) find_matching_se() argument
477 task_of(struct sched_entity *se) task_of() argument
490 cfs_rq_of(struct sched_entity *se) cfs_rq_of() argument
526 parent_entity(struct sched_entity *se) parent_entity() argument
532 find_matching_se(struct sched_entity **se, struct sched_entity **pse) find_matching_se() argument
584 struct sched_entity *se; update_min_vruntime() local
604 __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __enqueue_entity() argument
634 __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __dequeue_entity() argument
649 __pick_next_entity(struct sched_entity *se) __pick_next_entity() argument
700 calc_delta_fair(u64 delta, struct sched_entity *se) calc_delta_fair() argument
730 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) sched_slice() argument
767 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) sched_vslice() argument
779 init_entity_runnable_average(struct sched_entity *se) init_entity_runnable_average() argument
827 struct sched_entity *se = &p->se; post_init_entity_util_avg() local
866 init_entity_runnable_average(struct sched_entity *se) init_entity_runnable_average() argument
921 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_start() argument
939 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_end() argument
971 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_enqueue_sleeper() argument
1042 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_stats_enqueue() argument
1059 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_stats_dequeue() argument
1088 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_curr_start() argument
3026 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_enqueue() argument
3041 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_dequeue() argument
3103 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) enqueue_load_avg() argument
3110 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) dequeue_load_avg() argument
3117 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) enqueue_load_avg() argument
3119 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) dequeue_load_avg() argument
3122 reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight) reweight_entity() argument
3151 struct sched_entity *se = &p->se; reweight_task() local
3276 update_cfs_group(struct sched_entity *se) update_cfs_group() argument
3300 update_cfs_group(struct sched_entity *se) update_cfs_group() argument
3365 set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next) set_task_rq_fair() argument
3477 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_util() argument
3502 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_runnable() argument
3527 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_load() argument
3594 propagate_entity_load_avg(struct sched_entity *se) propagate_entity_load_avg() argument
3625 skip_blocked_update(struct sched_entity *se) skip_blocked_update() argument
3655 propagate_entity_load_avg(struct sched_entity *se) propagate_entity_load_avg() argument
3750 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) attach_entity_load_avg() argument
3805 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) detach_entity_load_avg() argument
3834 update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_load_avg() argument
3894 sync_entity_load_avg(struct sched_entity *se) sync_entity_load_avg() argument
3907 remove_entity_load_avg(struct sched_entity *se) remove_entity_load_avg() argument
4318 update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) update_load_avg() argument
4323 remove_entity_load_avg(struct sched_entity *se) remove_entity_load_avg() argument
4326 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) attach_entity_load_avg() argument
4328 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) detach_entity_load_avg() argument
4348 check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) check_spread() argument
4361 entity_is_long_sleeper(struct sched_entity *se) entity_is_long_sleeper() argument
4385 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) place_entity() argument
4492 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) enqueue_entity() argument
4553 __clear_buddies_last(struct sched_entity *se) __clear_buddies_last() argument
4564 __clear_buddies_next(struct sched_entity *se) __clear_buddies_next() argument
4575 __clear_buddies_skip(struct sched_entity *se) __clear_buddies_skip() argument
4586 clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) clear_buddies() argument
4601 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) dequeue_entity() argument
4659 struct sched_entity *se; check_preempt_tick() local
4693 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) set_next_entity() argument
4739 struct sched_entity *se; pick_next_entity() local
5046 struct sched_entity *se; throttle_cfs_rq() local
5118 struct sched_entity *se; unthrottle_cfs_rq() local
5699 struct sched_entity *se = &p->se; hrtick_start_fair() local
5781 check_preempt_from_idle(struct cfs_rq *cfs, struct sched_entity *se) check_preempt_from_idle() argument
5818 struct sched_entity *se = &p->se; enqueue_task_fair() local
5932 struct sched_entity *se = &p->se; dequeue_task_fair() local
7210 struct sched_entity *se = &p->se; migrate_task_rq_fair() local
7271 wakeup_latency_gran(struct sched_entity *curr, struct sched_entity *se) wakeup_latency_gran() argument
7303 wakeup_gran(struct sched_entity *se) wakeup_gran() argument
7338 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) wakeup_preempt_entity() argument
7357 set_last_buddy(struct sched_entity *se) set_last_buddy() argument
7369 set_next_buddy(struct sched_entity *se) set_next_buddy() argument
7381 set_skip_buddy(struct sched_entity *se) set_skip_buddy() argument
7393 struct sched_entity *se = &curr->se, *pse = &p->se; check_preempt_wakeup() local
7477 struct sched_entity *se; pick_next_task_fair() local
7551 set_next_entity(cfs_rq_of(se), se); pick_next_task_fair() local
7627 struct sched_entity *se = &prev->se; put_prev_task_fair() local
7645 struct sched_entity *se = &curr->se; yield_task_fair() local
7674 struct sched_entity *se = &p->se; yield_to_task_fair() local
8428 struct sched_entity *se; __update_blocked_fair() local
8440 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); __update_blocked_fair() local
8465 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; update_cfs_rq_h_load() local
11511 struct sched_entity *se = &curr->se; task_tick_fair() local
11533 struct sched_entity *se = &p->se, *curr; task_fork_fair() local
11588 struct sched_entity *se = &p->se; vruntime_normalized() local
11619 propagate_entity_cfs_rq(struct sched_entity *se) propagate_entity_cfs_rq() argument
11642 propagate_entity_cfs_rq(struct sched_entity *se) propagate_entity_cfs_rq() argument
11645 detach_entity_cfs_rq(struct sched_entity *se) detach_entity_cfs_rq() argument
11656 attach_entity_cfs_rq(struct sched_entity *se) attach_entity_cfs_rq() argument
11677 struct sched_entity *se = &p->se; detach_task_cfs_rq() local
11694 struct sched_entity *se = &p->se; attach_task_cfs_rq() local
11732 struct sched_entity *se = &p->se; set_next_task_fair() local
11768 struct sched_entity *se = &p->se; task_set_group_fair() local
11818 struct sched_entity *se; alloc_fair_sched_group() local
11859 struct sched_entity *se; online_fair_sched_group() local
11900 init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, struct sched_entity *parent) init_tg_cfs_entry() argument
11952 struct sched_entity *se = tg->se[i]; sched_group_set_shares() local
11959 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); sched_group_set_shares() local
11987 struct sched_entity *se = &task->se; get_rr_interval_fair() local
12162 struct sched_entity *se = &p->se; walt_fixup_sched_stats_fair() local
[all...]
H A Ddebug.c429 struct sched_entity *se = tg->se[cpu]; in print_cfs_group_stats() local
436 if (!se) in print_cfs_group_stats()
439 PN(se->exec_start); in print_cfs_group_stats()
440 PN(se->vruntime); in print_cfs_group_stats()
441 PN(se->sum_exec_runtime); in print_cfs_group_stats()
444 PN_SCHEDSTAT(se->statistics.wait_start); in print_cfs_group_stats()
445 PN_SCHEDSTAT(se->statistics.sleep_start); in print_cfs_group_stats()
446 PN_SCHEDSTAT(se->statistics.block_start); in print_cfs_group_stats()
447 PN_SCHEDSTAT(se in print_cfs_group_stats()
[all...]
H A Dpelt.c212 * se has been already dequeued but cfs_rq->curr still points to it. in ___update_load_sum()
277 * se_weight() = se->load.weight
288 * load_avg = se_weight(se) * load_sum
292 * runnable_sum = \Sum se->avg.runnable_sum
293 * runnable_avg = \Sum se->avg.runnable_avg
295 * load_sum = \Sum se_weight(se) * se->avg.load_sum
296 * load_avg = \Sum se->avg.load_avg
299 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se) in __update_load_avg_blocked_se() argument
301 if (___update_load_sum(now, &se in __update_load_avg_blocked_se()
310 __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) __update_load_avg_se() argument
[all...]
/kernel/linux/linux-6.6/kernel/sched/
H A Dfair.c378 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) in calc_delta_fair() argument
380 if (unlikely(se->load.weight != NICE_0_LOAD)) in calc_delta_fair()
381 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair()
395 #define for_each_sched_entity(se) \
396 for (; se; se = se->parent)
498 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group() argument
500 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
501 return se in is_same_group()
506 parent_entity(const struct sched_entity *se) parent_entity() argument
512 find_matching_se(struct sched_entity **se, struct sched_entity **pse) find_matching_se() argument
553 se_is_idle(struct sched_entity *se) se_is_idle() argument
581 parent_entity(struct sched_entity *se) parent_entity() argument
587 find_matching_se(struct sched_entity **se, struct sched_entity **pse) find_matching_se() argument
601 se_is_idle(struct sched_entity *se) se_is_idle() argument
639 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) entity_key() argument
706 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se) avg_vruntime_add() argument
716 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se) avg_vruntime_sub() argument
777 update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) update_entity_lag() argument
805 entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se) entity_eligible() argument
837 struct sched_entity *se = __pick_first_entity(cfs_rq); update_min_vruntime() local
868 __update_min_deadline(struct sched_entity *se, struct rb_node *node) __update_min_deadline() argument
880 min_deadline_update(struct sched_entity *se, bool exit) min_deadline_update() argument
898 __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __enqueue_entity() argument
906 __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __dequeue_entity() argument
961 struct sched_entity *se = __node_2_se(node); __pick_eevdf() local
1017 struct sched_entity *se = __node_2_se(node); __pick_eevdf() local
1038 struct sched_entity *se = __pick_eevdf(cfs_rq); pick_eevdf() local
1088 update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) update_deadline() argument
1122 init_entity_runnable_average(struct sched_entity *se) init_entity_runnable_average() argument
1168 struct sched_entity *se = &p->se; post_init_entity_util_avg() local
1205 init_entity_runnable_average(struct sched_entity *se) init_entity_runnable_average() argument
1266 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_start_fair() argument
1283 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_end_fair() argument
1309 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_enqueue_sleeper_fair() argument
1329 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_stats_enqueue_fair() argument
1346 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_stats_dequeue_fair() argument
1378 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_curr_start() argument
3595 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_enqueue() argument
3612 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_dequeue() argument
3676 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) enqueue_load_avg() argument
3683 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) dequeue_load_avg() argument
3693 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) enqueue_load_avg() argument
3695 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) dequeue_load_avg() argument
3698 reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight) reweight_eevdf() argument
3805 reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight) reweight_entity() argument
3859 struct sched_entity *se = &p->se; reweight_task() local
3984 update_cfs_group(struct sched_entity *se) update_cfs_group() argument
4001 reweight_entity(cfs_rq_of(se), se, shares); update_cfs_group() local
4005 update_cfs_group(struct sched_entity *se) update_cfs_group() argument
4138 set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next) set_task_rq_fair() argument
4232 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_util() argument
4264 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_runnable() argument
4294 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_load() argument
4368 propagate_entity_load_avg(struct sched_entity *se) propagate_entity_load_avg() argument
4399 skip_blocked_update(struct sched_entity *se) skip_blocked_update() argument
4429 propagate_entity_load_avg(struct sched_entity *se) propagate_entity_load_avg() argument
4439 migrate_se_pelt_lag(struct sched_entity *se) migrate_se_pelt_lag() argument
4518 migrate_se_pelt_lag(struct sched_entity *se) migrate_se_pelt_lag() argument
4608 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) attach_entity_load_avg() argument
4663 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) detach_entity_load_avg() argument
4694 update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_load_avg() argument
4740 sync_entity_load_avg(struct sched_entity *se) sync_entity_load_avg() argument
4753 remove_entity_load_avg(struct sched_entity *se) remove_entity_load_avg() argument
5165 update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) update_load_avg() argument
5170 remove_entity_load_avg(struct sched_entity *se) remove_entity_load_avg() argument
5173 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) attach_entity_load_avg() argument
5175 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) detach_entity_load_avg() argument
5196 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) place_entity() argument
5302 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) enqueue_entity() argument
5369 __clear_buddies_next(struct sched_entity *se) __clear_buddies_next() argument
5380 clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) clear_buddies() argument
5389 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) dequeue_entity() argument
5442 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) set_next_entity() argument
5789 struct sched_entity *se; throttle_cfs_rq() local
5880 struct sched_entity *se; unthrottle_cfs_rq() local
6658 struct sched_entity *se = &p->se; hrtick_start_fair() local
6739 check_preempt_from_idle(struct cfs_rq *cfs, struct sched_entity *se) check_preempt_from_idle() argument
6776 struct sched_entity *se = &p->se; enqueue_task_fair() local
6874 struct sched_entity *se = &p->se; dequeue_task_fair() local
8343 struct sched_entity *se = &p->se; migrate_task_rq_fair() local
8383 wakeup_latency_gran(struct sched_entity *curr, struct sched_entity *se) wakeup_latency_gran() argument
8415 wakeup_gran(struct sched_entity *se) wakeup_gran() argument
8450 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) wakeup_preempt_entity() argument
8469 set_next_buddy(struct sched_entity *se) set_next_buddy() argument
8486 struct sched_entity *se = &curr->se, *pse = &p->se; check_preempt_wakeup() local
8566 struct sched_entity *se; pick_task_fair() local
8600 struct sched_entity *se; pick_next_task_fair() local
8674 set_next_entity(cfs_rq_of(se), se); pick_next_task_fair() local
8751 struct sched_entity *se = &prev->se; put_prev_task_fair() local
8767 struct sched_entity *se = &curr->se; yield_task_fair() local
8794 struct sched_entity *se = &p->se; yield_to_task_fair() local
9565 struct sched_entity *se; __update_blocked_fair() local
9580 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); __update_blocked_fair() local
9605 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; update_cfs_rq_h_load() local
12940 __entity_slice_used(struct sched_entity *se, int min_nr_tasks) __entity_slice_used() argument
12976 se_fi_update(const struct sched_entity *se, unsigned int fi_seq, bool forceidle) se_fi_update() argument
12994 struct sched_entity *se = &p->se; task_vruntime_update() local
13076 struct sched_entity *se = &curr->se; task_tick_fair() local
13099 struct sched_entity *se = &p->se, *curr; task_fork_fair() local
13145 propagate_entity_cfs_rq(struct sched_entity *se) propagate_entity_cfs_rq() argument
13171 propagate_entity_cfs_rq(struct sched_entity *se) propagate_entity_cfs_rq() argument
13174 detach_entity_cfs_rq(struct sched_entity *se) detach_entity_cfs_rq() argument
13196 attach_entity_cfs_rq(struct sched_entity *se) attach_entity_cfs_rq() argument
13209 struct sched_entity *se = &p->se; detach_task_cfs_rq() local
13216 struct sched_entity *se = &p->se; attach_task_cfs_rq() local
13250 struct sched_entity *se = &p->se; set_next_task_fair() local
13317 struct sched_entity *se; alloc_fair_sched_group() local
13358 struct sched_entity *se; online_fair_sched_group() local
13401 init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, struct sched_entity *parent) init_tg_cfs_entry() argument
13454 struct sched_entity *se = tg->se[i]; __sched_group_set_shares() local
13461 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); __sched_group_set_shares() local
13505 struct sched_entity *se = tg->se[i]; sched_group_set_idle() local
13575 struct sched_entity *se = &task->se; get_rr_interval_fair() local
13769 struct sched_entity *se = &p->se; walt_fixup_sched_stats_fair() local
[all...]
H A Dpelt.c208 * se has been already dequeued but cfs_rq->curr still points to it. in ___update_load_sum()
273 * se_weight() = se->load.weight
284 * load_avg = se_weight(se) * load_sum
288 * runnable_sum = \Sum se->avg.runnable_sum
289 * runnable_avg = \Sum se->avg.runnable_avg
291 * load_sum = \Sum se_weight(se) * se->avg.load_sum
292 * load_avg = \Sum se->avg.load_avg
295 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se) in __update_load_avg_blocked_se() argument
297 if (___update_load_sum(now, &se in __update_load_avg_blocked_se()
306 __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) __update_load_avg_se() argument
[all...]
H A Ddebug.c494 struct sched_entity *se = tg->se[cpu]; in print_cfs_group_stats() local
503 if (!se) in print_cfs_group_stats()
506 PN(se->exec_start); in print_cfs_group_stats()
507 PN(se->vruntime); in print_cfs_group_stats()
508 PN(se->sum_exec_runtime); in print_cfs_group_stats()
512 stats = __schedstats_from_se(se); in print_cfs_group_stats()
526 P(se->load.weight); in print_cfs_group_stats()
528 P(se->avg.load_avg); in print_cfs_group_stats()
529 P(se in print_cfs_group_stats()
[all...]
/kernel/linux/linux-6.6/drivers/i2c/busses/
H A Di2c-qcom-geni.c17 #include <linux/soc/qcom/geni-se.h>
81 struct geni_se se; member
174 writel_relaxed(0, gi2c->se.base + SE_GENI_CLK_SEL); in qcom_geni_i2c_conf()
177 writel_relaxed(val, gi2c->se.base + GENI_SER_M_CLK_CFG); in qcom_geni_i2c_conf()
182 writel_relaxed(val, gi2c->se.base + SE_I2C_SCL_COUNTERS); in qcom_geni_i2c_conf()
187 u32 m_cmd = readl_relaxed(gi2c->se.base + SE_GENI_M_CMD0); in geni_i2c_err_misc()
188 u32 m_stat = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS); in geni_i2c_err_misc()
189 u32 geni_s = readl_relaxed(gi2c->se.base + SE_GENI_STATUS); in geni_i2c_err_misc()
190 u32 geni_ios = readl_relaxed(gi2c->se.base + SE_GENI_IOS); in geni_i2c_err_misc()
191 u32 dma = readl_relaxed(gi2c->se in geni_i2c_err_misc()
410 struct geni_se *se = &gi2c->se; geni_i2c_rx_one_msg() local
449 struct geni_se *se = &gi2c->se; geni_i2c_tx_one_msg() local
[all...]
/kernel/linux/linux-6.6/drivers/spi/
H A Dspi-geni-qcom.c16 #include <linux/soc/qcom/geni-se.h>
79 struct geni_se se; member
108 struct geni_se *se = &mas->se; in spi_slv_setup() local
110 writel(SPI_SLAVE_EN, se->base + SE_SPI_SLAVE_EN); in spi_slv_setup()
111 writel(GENI_IO_MUX_0_EN, se->base + GENI_OUTPUT_CTRL); in spi_slv_setup()
112 writel(START_TRIGGER, se->base + SE_GENI_CFG_SEQ_START); in spi_slv_setup()
125 ret = geni_se_clk_freq_match(&mas->se, in get_spi_clk_cfg()
153 struct geni_se *se = &mas->se; in handle_se_timeout() local
255 struct geni_se *se = &mas->se; spi_geni_is_abort_still_pending() local
291 struct geni_se *se = &mas->se; spi_geni_set_cs() local
341 struct geni_se *se = &mas->se; spi_setup_word_len() local
362 struct geni_se *se = &mas->se; geni_spi_set_clock_and_bw() local
401 struct geni_se *se = &mas->se; setup_fifo_params() local
651 struct geni_se *se = &mas->se; spi_geni_init() local
745 struct geni_se *se = &mas->se; geni_spi_handle_tx() local
783 struct geni_se *se = &mas->se; geni_spi_handle_rx() local
831 struct geni_se *se = &mas->se; setup_se_xfer() local
944 struct geni_se *se = &mas->se; geni_spi_isr() local
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/v3d/
H A Dv3d_gem.c402 u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue) in v3d_job_init()
406 bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); in v3d_job_init()
425 if (se->in_sync_count && se->wait_stage == queue) { in v3d_job_init()
426 struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs); in v3d_job_init()
428 for (i = 0; i < se->in_sync_count; i++) { in v3d_job_init()
482 struct v3d_submit_ext *se, in v3d_attach_fences_and_unlock_reservation()
486 bool has_multisync = se && (se in v3d_attach_fences_and_unlock_reservation()
400 v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, void **container, size_t size, void (*free)(struct kref *ref), u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue) v3d_job_init() argument
478 v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, struct v3d_job *job, struct ww_acquire_ctx *acquire_ctx, u32 out_sync, struct v3d_submit_ext *se, struct dma_fence *done_fence) v3d_attach_fences_and_unlock_reservation() argument
520 v3d_put_multisync_post_deps(struct v3d_submit_ext *se) v3d_put_multisync_post_deps() argument
533 v3d_get_multisync_post_deps(struct drm_file *file_priv, struct v3d_submit_ext *se, u32 count, u64 handles) v3d_get_multisync_post_deps() argument
589 struct v3d_submit_ext *se = data; v3d_get_multisync_submit_deps() local
667 struct v3d_submit_ext se = {0}; v3d_submit_cl_ioctl() local
820 struct v3d_submit_ext se = {0}; v3d_submit_tfu_ioctl() local
914 struct v3d_submit_ext se = {0}; v3d_submit_csd_ioctl() local
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_mqd_manager.c102 int i, se, sh, cu; in mqd_symmetrically_map_cu_mask() local
127 * Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1. in mqd_symmetrically_map_cu_mask()
131 for (se = 0; se < cu_info.num_shader_engines; se++) in mqd_symmetrically_map_cu_mask()
133 cu_per_sh[se][sh] = hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]); in mqd_symmetrically_map_cu_mask()
164 for (se = 0; se < cu_inf in mqd_symmetrically_map_cu_mask()
[all...]
/kernel/liteos_a/testsuites/unittest/net/netdb/full/
H A Dnet_netdb_test_012.cpp49 struct servent se, *result = NULL; in GetServByPortRTest() local
54 int ret = getservbyport_r(htons(test_port_no), "tcp", &se, buf, sizeof buf, &result); in GetServByPortRTest()
57 ICUNIT_ASSERT_STRING_EQUAL(se.s_name, "ssh", -1); in GetServByPortRTest()
58 ICUNIT_ASSERT_STRING_EQUAL(se.s_proto, "tcp", -1); in GetServByPortRTest()
59 ICUNIT_ASSERT_STRING_EQUAL(se.s_aliases[0], "ssh", -1); in GetServByPortRTest()
64 ret = getservbyport_r(htons(test_port_no), "udp", &se, buf, sizeof buf, &result); in GetServByPortRTest()
67 ret = getservbyport_r(htons(test_port_no), "udp", &se, buf1, sizeof buf1, &result); in GetServByPortRTest()
70 ret = getservbyport_r(htons(test_port_no), "ud", &se, buf, sizeof buf, &result); in GetServByPortRTest()
H A Dnet_netdb_test_017.cpp46 struct servent se; in GetServByNameRTest() local
53 ret = getservbyname_r("ssh", "tcp", &se, buf1, sizeof buf1, &result); in GetServByNameRTest()
56 ICUNIT_ASSERT_STRING_EQUAL(se.s_name, "ssh", -1); in GetServByNameRTest()
57 ICUNIT_ASSERT_STRING_EQUAL(se.s_proto, "tcp", -1); in GetServByNameRTest()
58 ICUNIT_ASSERT_STRING_EQUAL(se.s_aliases[0], "ssh", -1); in GetServByNameRTest()
63 ret = getservbyname_r("ssh", "tcp", &se, buf2, sizeof buf2, &result); in GetServByNameRTest()
66 ret = getservbyname_r("ssh", "tp", &se, buf1, sizeof buf1, &result); in GetServByNameRTest()
69 ret = getservbyname_r("sh", "tcp", &se, buf1, sizeof buf1, &result); in GetServByNameRTest()
H A Dnet_netdb_test_011.cpp51 struct servent *se = getservbyport(htons(test_port_no), "tcp"); in GetServByPortTest() local
52 ICUNIT_ASSERT_NOT_EQUAL(se, NULL, -1); in GetServByPortTest()
53 ICUNIT_ASSERT_STRING_EQUAL(se->s_name, "ssh", -1); in GetServByPortTest()
54 ICUNIT_ASSERT_STRING_EQUAL(se->s_proto, "tcp", -1); in GetServByPortTest()
55 ICUNIT_ASSERT_STRING_EQUAL(se->s_aliases[0], "ssh", -1); in GetServByPortTest()
/kernel/linux/linux-5.10/net/nfc/
H A Dcore.c534 struct nfc_se *se; in nfc_find_se() local
536 list_for_each_entry(se, &dev->secure_elements, list) in nfc_find_se()
537 if (se->idx == se_idx) in nfc_find_se()
538 return se; in nfc_find_se()
546 struct nfc_se *se; in nfc_enable_se() local
549 pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); in nfc_enable_se()
573 se = nfc_find_se(dev, se_idx); in nfc_enable_se()
574 if (!se) { in nfc_enable_se()
579 if (se->state == NFC_SE_ENABLED) { in nfc_enable_se()
586 se in nfc_enable_se()
595 struct nfc_se *se; nfc_disable_se() local
867 struct nfc_se *se; nfc_add_se() local
901 struct nfc_se *se, *n; nfc_remove_se() local
959 struct nfc_se *se, *n; nfc_release() local
[all...]
/kernel/linux/linux-6.6/net/nfc/
H A Dcore.c536 struct nfc_se *se; in nfc_find_se() local
538 list_for_each_entry(se, &dev->secure_elements, list) in nfc_find_se()
539 if (se->idx == se_idx) in nfc_find_se()
540 return se; in nfc_find_se()
548 struct nfc_se *se; in nfc_enable_se() local
551 pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); in nfc_enable_se()
575 se = nfc_find_se(dev, se_idx); in nfc_enable_se()
576 if (!se) { in nfc_enable_se()
581 if (se->state == NFC_SE_ENABLED) { in nfc_enable_se()
588 se in nfc_enable_se()
597 struct nfc_se *se; nfc_disable_se() local
873 struct nfc_se *se; nfc_add_se() local
907 struct nfc_se *se, *n; nfc_remove_se() local
965 struct nfc_se *se, *n; nfc_release() local
[all...]
/kernel/linux/linux-6.6/drivers/tty/serial/
H A Dqcom_geni_serial.c18 #include <linux/soc/qcom/geni-se.h>
119 struct geni_se se; member
196 port->se.base = uport->membase; in qcom_geni_serial_request_port()
493 geni_se_cancel_m_cmd(&port->se); in qcom_geni_serial_console_write()
496 geni_se_abort_m_cmd(&port->se); in qcom_geni_serial_console_write()
599 geni_se_tx_dma_unprep(&port->se, port->tx_dma_addr, in qcom_geni_serial_stop_tx_dma()
605 geni_se_cancel_m_cmd(&port->se); in qcom_geni_serial_stop_tx_dma()
610 geni_se_abort_m_cmd(&port->se); in qcom_geni_serial_stop_tx_dma()
638 ret = geni_se_tx_dma_prep(&port->se, &xmit->buf[xmit->tail], in qcom_geni_serial_start_tx_dma()
677 geni_se_cancel_m_cmd(&port->se); in qcom_geni_serial_stop_tx_fifo()
1389 qcom_geni_serial_enable_early_read(struct geni_se *se, struct console *con) qcom_geni_serial_enable_early_read() argument
1396 qcom_geni_serial_enable_early_read(struct geni_se *se, struct console *con) qcom_geni_serial_enable_early_read() argument
1412 struct geni_se se; qcom_geni_serial_earlycon_setup() local
[all...]
/kernel/linux/linux-5.10/tools/testing/selftests/timers/
H A Dalarmtimer-suspend.c123 struct sigevent se; in main() local
134 memset(&se, 0, sizeof(se)); in main()
135 se.sigev_notify = SIGEV_SIGNAL; in main()
136 se.sigev_signo = signum; in main()
137 se.sigev_value.sival_int = 0; in main()
144 if (timer_create(alarm_clock_id, &se, &tm1) == -1) { in main()
/kernel/linux/linux-6.6/tools/testing/selftests/timers/
H A Dalarmtimer-suspend.c123 struct sigevent se; in main() local
134 memset(&se, 0, sizeof(se)); in main()
135 se.sigev_notify = SIGEV_SIGNAL; in main()
136 se.sigev_signo = signum; in main()
137 se.sigev_value.sival_int = 0; in main()
144 if (timer_create(alarm_clock_id, &se, &tm1) == -1) { in main()
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_mqd_manager.c106 int i, se, sh, cu, cu_bitmap_sh_mul, cu_inc = wgp_mode_req ? 2 : 1; in mqd_symmetrically_map_cu_mask() local
140 * Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1. in mqd_symmetrically_map_cu_mask()
145 for (se = 0; se < cu_info.num_shader_engines; se++) in mqd_symmetrically_map_cu_mask()
147 cu_per_sh[se][sh] = hweight32( in mqd_symmetrically_map_cu_mask()
148 cu_info.cu_bitmap[xcc_inst][se % 4][sh + (se / 4) * in mqd_symmetrically_map_cu_mask()
193 for (se = 0; se < cu_inf in mqd_symmetrically_map_cu_mask()
[all...]

Completed in 33 milliseconds

12345678910