/device/soc/rockchip/common/vendor/drivers/rockchip/ |
H A D | rockchip_ramdisk.c | 29 * Look up and return a rd's page for a given sector. 31 static struct page *rd_lookup_page(struct rd_device *rd, sector_t sector) in rd_lookup_page() argument 37 page = phys_to_page(rd->mem_addr + (idx << PAGE_SHIFT)); in rd_lookup_page() 44 * Copy n bytes from src to the rd starting at sector. Does not sleep. 46 static void copy_to_rd(struct rd_device *rd, const void *src, sector_t sector, size_t n) in copy_to_rd() argument 54 page = rd_lookup_page(rd, sector); in copy_to_rd() 65 page = rd_lookup_page(rd, sector); in copy_to_rd() 75 * Copy n bytes to dst from the rd starting at sector. Does not sleep. 77 static void copy_from_rd(void *dst, struct rd_device *rd, sector_t sector, size_t n) in copy_from_rd() argument 85 page = rd_lookup_page(rd, secto in copy_from_rd() 112 rd_do_bvec(struct rd_device *rd, struct page *page, unsigned int len, unsigned int off, unsigned int op, sector_t sector) rd_do_bvec() argument 132 struct rd_device *rd = bio->bi_disk->private_data; rd_make_request() local 163 struct rd_device *rd = bdev->bd_disk->private_data; rd_rw_page() local 179 rd_init(struct rd_device *rd, int major, int minor) rd_init() argument 228 struct rd_device *rd; rd_probe() local [all...] |
/device/soc/rockchip/common/sdk_linux/kernel/sched/ |
H A D | topology.c | 339 * 5. schedutil is driving the frequency of all CPUs of the rd; 364 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains() local 375 pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n", cpumask_pr_args(cpu_map)); in build_perf_domains() 382 pr_warn("rd %*pbl: Disabling EAS, SMT is not supported\n", cpumask_pr_args(cpu_map)); in build_perf_domains() 401 if (rd->pd) { in build_perf_domains() 402 pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n", cpumask_pr_args(cpu_map)); in build_perf_domains() 425 WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n", cpumask_pr_args(cpu_map)); in build_perf_domains() 432 tmp = rd->pd; in build_perf_domains() 433 rcu_assign_pointer(rd in build_perf_domains() 458 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); free_rootdomain() local 470 rq_attach_root(struct rq *rq, struct root_domain *rd) rq_attach_root() argument 511 sched_get_rd(struct root_domain *rd) sched_get_rd() argument 516 sched_put_rd(struct root_domain *rd) sched_put_rd() argument 525 init_rootdomain(struct root_domain *rd) init_rootdomain() argument 589 struct root_domain *rd; alloc_rootdomain() local 711 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) cpu_attach_domain() argument 769 struct root_domain *rd; global() member 2402 struct root_domain *rd; partition_sched_domains_locked() local [all...] |
H A D | rt.c | 294 return atomic_read(&rq->rd->rto_count); in rt_overloaded() 303 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); in rt_set_overload() 314 atomic_inc(&rq->rd->rto_count); in rt_set_overload() 324 atomic_dec(&rq->rd->rto_count); in rt_clear_overload() 325 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); in rt_clear_overload() 616 return this_rq()->rd->span; in sched_rt_period_mask() 711 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; in do_balance_runtime() local 715 weight = cpumask_weight(rd->span); in do_balance_runtime() 719 for_each_cpu(i, rd in do_balance_runtime() 766 struct root_domain *rd = rq->rd; __disable_runtime() local 1812 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; find_cas_cpu() local 2290 rto_next_cpu(struct root_domain *rd) rto_next_cpu() argument 2385 struct root_domain *rd = container_of(work, struct root_domain, rto_push_work); rto_push_irq_work_func() local [all...] |
H A D | sched.h | 909 * CPUs of the rd. Protected by RCU. 919 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 920 extern void sched_get_rd(struct root_domain *rd); 921 extern void sched_put_rd(struct root_domain *rd); 1053 struct root_domain *rd; member 2103 if (!READ_ONCE(rq->rd->overload)) { in add_nr_running() 2104 WRITE_ONCE(rq->rd->overload, 1); in add_nr_running() 2420 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); in __dl_update() local 2424 for_each_cpu_and(i, rd->span, cpu_active_mask) in __dl_update()
|
H A D | fair.c | 4224 unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity; in task_fits_max() 5746 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) { in update_overutilized_status() 5747 WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); in update_overutilized_status() 5748 trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED); in update_overutilized_status() 6916 * The capacity state of CPUs of the current rd can be driven by CPUs in compute_energy() 6917 * of another rd if they belong to the same pd. So, account for the in compute_energy() 6919 * instead of the rd span. in compute_energy() 6921 * If an entire pd is outside of the current rd, it will not appear in in compute_energy() 6993 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; in find_energy_efficient_cpu() local 9573 struct root_domain *rd = env->dst_rq->rd; update_sd_lb_stats() local 9582 struct root_domain *rd = env->dst_rq->rd; update_sd_lb_stats() local 9789 struct root_domain *rd = env->dst_rq->rd; find_busiest_group() local 12324 sched_trace_rd_span(struct root_domain *rd) sched_trace_rd_span() argument [all...] |
H A D | core.c | 5865 cpumask_t *span = rq->rd->span; in __sched_setscheduler() 5873 rq->rd->dl_bw.bw == 0) { in __sched_setscheduler() 6502 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { in sched_setaffinity() 7215 !cpumask_intersects(task_rq(p)->rd->span, cs_effective_cpus)) { in task_can_attach() 7553 if (rq->rd) { in do_isolation_work_cpu_stop() 7554 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in do_isolation_work_cpu_stop() 7560 if (rq->rd) { in do_isolation_work_cpu_stop() 7775 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online() 7799 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline() 7885 if (rq->rd) { in sched_cpu_activate() [all...] |
/device/soc/rockchip/common/sdk_linux/drivers/gpio/ |
H A D | gpiolib-of.c | 747 struct of_reconfig_data *rd = arg; in of_gpio_notify() local 759 if (!of_property_read_bool(rd->dn, "gpio-hog")) { in of_gpio_notify() 763 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { in of_gpio_notify() 767 chip = of_find_gpiochip_by_node(rd->dn->parent); in of_gpio_notify() 772 ret = of_gpiochip_add_hog(chip, rd->dn); in of_gpio_notify() 774 pr_err("%s: failed to add hogs for %pOF\n", __func__, rd->dn); in of_gpio_notify() 775 of_node_clear_flag(rd->dn, OF_POPULATED); in of_gpio_notify() 781 if (!of_node_check_flag(rd->dn, OF_POPULATED)) { in of_gpio_notify() 785 chip = of_find_gpiochip_by_node(rd->dn->parent); in of_gpio_notify() 790 of_gpiochip_remove_hog(chip, rd in of_gpio_notify() [all...] |
/device/soc/rockchip/common/vendor/drivers/media/platform/rockchip/isp/ |
H A D | videobuf2-rdma-sg.c | 320 struct scatterlist *rd, *wr;
in vb2_dma_sg_dmabuf_ops_attach() local 340 rd = buf->dma_sgt->sgl;
in vb2_dma_sg_dmabuf_ops_attach() 343 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
in vb2_dma_sg_dmabuf_ops_attach() 344 rd = sg_next(rd);
in vb2_dma_sg_dmabuf_ops_attach()
|
/device/soc/rockchip/rk3588/kernel/drivers/media/platform/rockchip/isp/ |
H A D | videobuf2-rdma-sg.c | 311 struct scatterlist *rd, *wr; in vb2_dma_sg_dmabuf_ops_attach() local 330 rd = buf->dma_sgt->sgl; in vb2_dma_sg_dmabuf_ops_attach() 333 sg_set_page(wr, sg_page(rd), rd->length, rd->offset); in vb2_dma_sg_dmabuf_ops_attach() 334 rd = sg_next(rd); in vb2_dma_sg_dmabuf_ops_attach()
|
/device/soc/rockchip/common/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd_wifi6/ |
H A D | dhd_msgbuf.c | 402 uint16 rd; /* read index */ member 796 uint16 rd, wr; in dhd_prot_is_cmpl_ring_empty() local 805 rd = flow_ring->rd; in dhd_prot_is_cmpl_ring_empty() 807 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); in dhd_prot_is_cmpl_ring_empty() 809 ret = (wr == rd) ? TRUE : FALSE; in dhd_prot_is_cmpl_ring_empty() 817 DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__, in dhd_prot_dump_ring_ptrs() 818 ring->curr_rd, ring->rd, ring->wr)); in dhd_prot_dump_ring_ptrs() 874 " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n", in dhd_prot_d2h_sync_livelock() 877 ring->dma_buf.va, msg, ring->curr_rd, ring->rd, rin in dhd_prot_d2h_sync_livelock() 5258 uint16 rd = 0; dhd_prot_process_msgbuf_edl() local 7883 uint16 rd = 0; dhd_edl_ring_hdr_write() local 9109 uint16 rd; dhd_prot_get_read_addr() local 9529 uint16 rd, wr; dhd_prot_print_flow_ring() local 9884 uint16 rd, wr; dhd_prot_debug_info_print() local [all...] |
/device/soc/hisilicon/hi3861v100/sdk_liteos/platform/os/Huawei_LiteOS/components/lib/libc/musl/include/arpa/ |
H A D | nameser.h | 331 unsigned rd: 1; member 338 unsigned rd :1; member
|
/device/soc/rockchip/common/sdk_linux/include/trace/events/ |
H A D | sched.h | 536 DECLARE_TRACE(sched_overutilized_tp, TP_PROTO(struct root_domain *rd, bool overutilized), TP_ARGS(rd, overutilized));
|
/device/soc/rockchip/common/sdk_linux/include/linux/ |
H A D | sched.h | 2248 const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
|