/kernel/linux/linux-5.10/include/drm/ |
H A D | drm_vma_manager.h | 72 struct drm_vma_offset_node *node, unsigned long pages); 74 struct drm_vma_offset_node *node); 76 int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag); 77 void drm_vma_node_revoke(struct drm_vma_offset_node *node, 79 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, 83 * drm_vma_offset_exact_lookup_locked() - Look up node by exact address 88 * Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node. 99 struct drm_vma_offset_node *node; in drm_vma_offset_exact_lookup_locked() local 101 node = drm_vma_offset_lookup_locked(mgr, start, pages); in drm_vma_offset_exact_lookup_locked() 102 return (node in drm_vma_offset_exact_lookup_locked() 147 drm_vma_node_reset(struct drm_vma_offset_node *node) drm_vma_node_reset() argument 168 drm_vma_node_start(const struct drm_vma_offset_node *node) drm_vma_node_start() argument 185 drm_vma_node_size(struct drm_vma_offset_node *node) drm_vma_node_size() argument 202 drm_vma_node_offset_addr(struct drm_vma_offset_node *node) drm_vma_node_offset_addr() argument 219 drm_vma_node_unmap(struct drm_vma_offset_node *node, struct address_space *file_mapping) drm_vma_node_unmap() argument 240 drm_vma_node_verify_access(struct drm_vma_offset_node *node, struct drm_file *tag) drm_vma_node_verify_access() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_interrupt.c | 53 int kfd_interrupt_init(struct kfd_node *node) in kfd_interrupt_init() argument 57 r = kfifo_alloc(&node->ih_fifo, in kfd_interrupt_init() 58 KFD_IH_NUM_ENTRIES * node->kfd->device_info.ih_ring_entry_size, in kfd_interrupt_init() 61 dev_err(node->adev->dev, "Failed to allocate IH fifo\n"); in kfd_interrupt_init() 65 node->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1); in kfd_interrupt_init() 66 if (unlikely(!node->ih_wq)) { in kfd_interrupt_init() 67 kfifo_free(&node->ih_fifo); in kfd_interrupt_init() 68 dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n"); in kfd_interrupt_init() 71 spin_lock_init(&node->interrupt_lock); in kfd_interrupt_init() 73 INIT_WORK(&node in kfd_interrupt_init() 87 kfd_interrupt_exit(struct kfd_node *node) kfd_interrupt_exit() argument 113 enqueue_ih_ring_entry(struct kfd_node *node, const void *ih_ring_entry) enqueue_ih_ring_entry() argument 132 dequeue_ih_ring_entry(struct kfd_node *node, void *ih_ring_entry) dequeue_ih_ring_entry() argument [all...] |
/kernel/linux/linux-5.10/drivers/firmware/efi/ |
H A D | dev-path-parser.c | 34 static long __init parse_acpi_path(const struct efi_dev_path *node, in parse_acpi_path() argument 40 if (node->header.length != 12) in parse_acpi_path() 44 'A' + ((node->acpi.hid >> 10) & 0x1f) - 1, in parse_acpi_path() 45 'A' + ((node->acpi.hid >> 5) & 0x1f) - 1, in parse_acpi_path() 46 'A' + ((node->acpi.hid >> 0) & 0x1f) - 1, in parse_acpi_path() 47 node->acpi.hid >> 16); in parse_acpi_path() 48 sprintf(hid_uid.uid, "%u", node->acpi.uid); in parse_acpi_path() 72 static long __init parse_pci_path(const struct efi_dev_path *node, in parse_pci_path() argument 77 if (node->header.length != 6) in parse_pci_path() 82 devfn = PCI_DEVFN(node in parse_pci_path() 108 parse_end_path(const struct efi_dev_path *node, struct device *parent, struct device **child) parse_end_path() argument 159 efi_get_device_by_path(const struct efi_dev_path **node, size_t *len) efi_get_device_by_path() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/ |
H A D | i915_active.c | 27 struct rb_node node; member 33 #define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node) 48 static inline struct llist_node *barrier_to_ll(struct active_node *node) in barrier_to_ll() argument 50 GEM_BUG_ON(!is_barrier(&node->base)); in barrier_to_ll() 51 return (struct llist_node *)&node->base.cb.node; in barrier_to_ll() 55 __barrier_to_engine(struct active_node *node) in __barrier_to_engine() argument 57 return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev); in __barrier_to_engine() 61 barrier_to_engine(struct active_node *node) in barrier_to_engine() argument 291 struct active_node *node; active_instance() local 365 ____active_del_barrier(struct i915_active *ref, struct active_node *node, struct intel_engine_cs *engine) ____active_del_barrier() argument 408 __active_del_barrier(struct i915_active *ref, struct active_node *node) __active_del_barrier() argument 769 is_idle_barrier(struct active_node *node, u64 idx) is_idle_barrier() argument 798 struct active_node *node = reuse_idle_barrier() local 818 struct active_node *node = reuse_idle_barrier() local 880 struct active_node *node; i915_active_acquire_preallocate_barrier() local 927 struct active_node *node = barrier_from_ll(first); i915_active_acquire_preallocate_barrier() local 953 struct active_node *node = barrier_from_ll(pos); i915_active_acquire_barrier() local 982 ll_to_fence_slot(struct llist_node *node) ll_to_fence_slot() argument 990 struct llist_node *node, *next; i915_request_add_active_barriers() local [all...] |
/kernel/linux/linux-6.6/lib/ |
H A D | xarray.c | 26 * @node refers to an xa_node; usually the primary one being operated on by 29 * @parent refers to the @xa_node closer to the head than @node. 80 static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark) in node_marks() argument 82 return node->marks[(__force unsigned)mark]; in node_marks() 85 static inline bool node_get_mark(struct xa_node *node, in node_get_mark() argument 88 return test_bit(offset, node_marks(node, mark)); in node_get_mark() 92 static inline bool node_set_mark(struct xa_node *node, unsigned int offset, in node_set_mark() argument 95 return __test_and_set_bit(offset, node_marks(node, mark)); in node_set_mark() 99 static inline bool node_clear_mark(struct xa_node *node, unsigned int offset, in node_clear_mark() argument 102 return __test_and_clear_bit(offset, node_marks(node, mar in node_clear_mark() 105 node_any_mark(struct xa_node *node, xa_mark_t mark) node_any_mark() argument 110 node_mark_all(struct xa_node *node, xa_mark_t mark) node_mark_all() argument 144 get_offset(unsigned long index, struct xa_node *node) get_offset() argument 203 xas_descend(struct xa_state *xas, struct xa_node *node) xas_descend() argument 240 struct xa_node *node = xa_to_node(entry); xas_load() local 254 xa_node_free(struct xa_node *node) xa_node_free() argument 270 struct xa_node *next, *node = xas->xa_alloc; xas_destroy() local 351 xas_update(struct xa_state *xas, struct xa_node *node) xas_update() argument 362 struct xa_node *node = xas->xa_alloc; xas_alloc() local 440 struct xa_node *node = xas->xa_node; xas_shrink() local 483 struct xa_node *node = xas->xa_node; xas_delete_node() local 526 struct xa_node *node = top; xas_free_nodes() local 562 struct xa_node *node = NULL; xas_expand() local 644 struct xa_node *node = xas->xa_node; xas_create() local 725 struct xa_node *node = xas->xa_node; xas_create_range() local 747 update_node(struct xa_state *xas, struct xa_node *node, int count, int values) update_node() argument 777 struct xa_node *node; xas_store() local 878 struct xa_node *node = xas->xa_node; xas_set_mark() local 907 struct xa_node *node = xas->xa_node; xas_clear_mark() local 956 node_get_marks(struct xa_node *node, unsigned int offset) node_get_marks() argument 972 node_set_marks(struct xa_node *node, unsigned int offset, struct xa_node *child, unsigned int marks) node_set_marks() argument 1018 struct xa_node *node; xas_split_alloc() local 1058 struct xa_node *node; xas_split() local 1122 struct xa_node *node = xas->xa_node; xas_pause() local 1409 struct xa_node *node = xa_to_node(curr); xas_find_conflict() local 2040 struct xa_node *node = xas->xa_node; xas_sibling() local 2185 xa_delete_node(struct xa_node *node, xa_update_node_t update) xa_delete_node() argument 2232 xa_dump_node(const struct xa_node *node) xa_dump_node() argument 2276 struct xa_node *node = xa_to_node(entry); xa_dump_entry() local [all...] |
H A D | timerqueue.c | 18 rb_entry((_n), struct timerqueue_node, node) 29 * @node: timer node to be added 31 * Adds the timer node to the timerqueue, sorted by the node's expires 35 bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) in timerqueue_add() argument 38 WARN_ON_ONCE(!RB_EMPTY_NODE(&node->node)); in timerqueue_add() 40 return rb_add_cached(&node->node, in timerqueue_add() 53 timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) timerqueue_del() argument 73 timerqueue_iterate_next(struct timerqueue_node *node) timerqueue_iterate_next() argument [all...] |
/kernel/linux/linux-5.10/lib/ |
H A D | xarray.c | 24 * @node refers to an xa_node; usually the primary one being operated on by 27 * @parent refers to the @xa_node closer to the head than @node. 78 static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark) in node_marks() argument 80 return node->marks[(__force unsigned)mark]; in node_marks() 83 static inline bool node_get_mark(struct xa_node *node, in node_get_mark() argument 86 return test_bit(offset, node_marks(node, mark)); in node_get_mark() 90 static inline bool node_set_mark(struct xa_node *node, unsigned int offset, in node_set_mark() argument 93 return __test_and_set_bit(offset, node_marks(node, mark)); in node_set_mark() 97 static inline bool node_clear_mark(struct xa_node *node, unsigned int offset, in node_clear_mark() argument 100 return __test_and_clear_bit(offset, node_marks(node, mar in node_clear_mark() 103 node_any_mark(struct xa_node *node, xa_mark_t mark) node_any_mark() argument 108 node_mark_all(struct xa_node *node, xa_mark_t mark) node_mark_all() argument 142 get_offset(unsigned long index, struct xa_node *node) get_offset() argument 201 xas_descend(struct xa_state *xas, struct xa_node *node) xas_descend() argument 236 struct xa_node *node = xa_to_node(entry); xas_load() local 254 xa_node_free(struct xa_node *node) xa_node_free() argument 269 struct xa_node *next, *node = xas->xa_alloc; xas_destroy() local 350 xas_update(struct xa_state *xas, struct xa_node *node) xas_update() argument 361 struct xa_node *node = xas->xa_alloc; xas_alloc() local 439 struct xa_node *node = xas->xa_node; xas_shrink() local 482 struct xa_node *node = xas->xa_node; xas_delete_node() local 525 struct xa_node *node = top; xas_free_nodes() local 561 struct xa_node *node = NULL; xas_expand() local 643 struct xa_node *node = xas->xa_node; xas_create() local 724 struct xa_node *node = xas->xa_node; xas_create_range() local 746 update_node(struct xa_state *xas, struct xa_node *node, int count, int values) update_node() argument 776 struct xa_node *node; xas_store() local 877 struct xa_node *node = xas->xa_node; xas_set_mark() local 906 struct xa_node *node = xas->xa_node; xas_clear_mark() local 955 node_get_marks(struct xa_node *node, unsigned int offset) node_get_marks() argument 971 node_set_marks(struct xa_node *node, unsigned int offset, struct xa_node *child, unsigned int marks) node_set_marks() argument 1017 struct xa_node *node; xas_split_alloc() local 1056 struct xa_node *node; xas_split() local 1120 struct xa_node *node = xas->xa_node; xas_pause() local 1407 struct xa_node *node = xa_to_node(curr); xas_find_conflict() local 2032 struct xa_node *node = xas->xa_node; xas_sibling() local 2177 xa_delete_node(struct xa_node *node, xa_update_node_t update) xa_delete_node() argument 2224 xa_dump_node(const struct xa_node *node) xa_dump_node() argument 2268 struct xa_node *node = xa_to_node(entry); xa_dump_entry() local [all...] |
/kernel/linux/linux-6.6/drivers/scsi/elx/libefc/ |
H A D | efc_els.c | 22 node->display_name, __func__); \ 26 efc_log_err((struct efc *)els->node->efc,\ 28 els->node->display_name,\ 35 efc_els_io_alloc(struct efc_node *node, u32 reqlen) in efc_els_io_alloc() argument 37 return efc_els_io_alloc_size(node, reqlen, EFC_ELS_RSP_LEN); in efc_els_io_alloc() 41 efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen) in efc_els_io_alloc_size() argument 47 efc = node->efc; in efc_els_io_alloc_size() 49 if (!node->els_io_enabled) { in efc_els_io_alloc_size() 65 els->node = node; in efc_els_io_alloc_size() 112 struct efc_node *node; _efc_els_io_free() local 160 struct efc_node *node; efc_els_req_cb() local 256 efc_els_send_req(struct efc_node *node, struct efc_els_io_req *els, enum efc_disc_io_type io_type) efc_els_send_req() argument 322 struct efc_node *node; efc_els_acc_cb() local 357 struct efc_node *node = els->node; efc_els_send_rsp() local 393 efc_send_plogi(struct efc_node *node) efc_send_plogi() argument 420 efc_send_flogi(struct efc_node *node) efc_send_flogi() argument 449 efc_send_fdisc(struct efc_node *node) efc_send_fdisc() argument 478 efc_send_prli(struct efc_node *node) efc_send_prli() argument 518 efc_send_logo(struct efc_node *node) efc_send_logo() argument 550 efc_send_adisc(struct efc_node *node) efc_send_adisc() argument 585 efc_send_scr(struct efc_node *node) efc_send_scr() argument 611 efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_code, u32 reason_code_expl, u32 vendor_unique) efc_send_ls_rjt() argument 642 efc_send_plogi_acc(struct efc_node *node, u32 ox_id) efc_send_plogi_acc() argument 677 efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id) efc_send_flogi_p2p_acc() argument 710 efc_send_prli_acc(struct efc_node *node, u32 ox_id) efc_send_prli_acc() argument 752 efc_send_prlo_acc(struct efc_node *node, u32 ox_id) efc_send_prlo_acc() argument 788 efc_send_ls_acc(struct efc_node *node, u32 ox_id) efc_send_ls_acc() argument 816 efc_send_logo_acc(struct efc_node *node, u32 ox_id) efc_send_logo_acc() argument 844 efc_send_adisc_acc(struct efc_node *node, u32 ox_id) efc_send_adisc_acc() argument 892 efc_ns_send_rftid(struct efc_node *node) efc_ns_send_rftid() argument 929 efc_ns_send_rffid(struct efc_node *node) efc_ns_send_rffid() argument 969 efc_ns_send_gidpt(struct efc_node *node) efc_ns_send_gidpt() argument 1027 efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id, struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code, u32 reason_code, u32 reason_code_explanation) efc_send_ct_rsp() argument 1074 efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr) efc_send_bls_acc() argument [all...] |
/kernel/linux/linux-6.6/scripts/gdb/linux/ |
H A D | radixtree.py | 20 def is_internal_node(node): 22 return ((node.cast(long_type) & constants.LX_RADIX_TREE_ENTRY_MASK) == constants.LX_RADIX_TREE_INTERNAL_NODE) 24 def entry_to_node(node): 26 node_type = node.type 27 indirect_ptr = node.cast(long_type) & ~constants.LX_RADIX_TREE_INTERNAL_NODE 30 def node_maxindex(node): 31 return (constants.LX_RADIX_TREE_MAP_SIZE << node['shift']) - 1 35 node = root.dereference() 40 node = root['xa_head'] 41 if node [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/ |
H A D | i915_active.c | 31 struct rb_node node; member 37 #define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node) 52 static inline struct llist_node *barrier_to_ll(struct active_node *node) in barrier_to_ll() argument 54 GEM_BUG_ON(!is_barrier(&node->base)); in barrier_to_ll() 55 return (struct llist_node *)&node->base.cb.node; in barrier_to_ll() 59 __barrier_to_engine(struct active_node *node) in __barrier_to_engine() argument 61 return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev); in __barrier_to_engine() 65 barrier_to_engine(struct active_node *node) in barrier_to_engine() argument 298 struct active_node *node, *prealloc; active_instance() local 375 ____active_del_barrier(struct i915_active *ref, struct active_node *node, struct intel_engine_cs *engine) ____active_del_barrier() argument 418 __active_del_barrier(struct i915_active *ref, struct active_node *node) __active_del_barrier() argument 800 is_idle_barrier(struct active_node *node, u64 idx) is_idle_barrier() argument 829 struct active_node *node = reuse_idle_barrier() local 849 struct active_node *node = reuse_idle_barrier() local 911 struct active_node *node; i915_active_acquire_preallocate_barrier() local 958 struct active_node *node = barrier_from_ll(first); i915_active_acquire_preallocate_barrier() local 984 struct active_node *node = barrier_from_ll(pos); i915_active_acquire_barrier() local 1013 ll_to_fence_slot(struct llist_node *node) ll_to_fence_slot() argument 1021 struct llist_node *node, *next; i915_request_add_active_barriers() local [all...] |
/kernel/linux/linux-5.10/fs/hfsplus/ |
H A D | btree.c | 98 * it must also be a multiple of the node and block size. in hfsplus_calc_btree_clump_size() 116 * Round the clump size to a multiple of node and block size. in hfsplus_calc_btree_clump_size() 124 * greater than the clump size. If so, just use one block or node. in hfsplus_calc_btree_clump_size() 260 struct hfs_bnode *node; in hfs_btree_close() local 267 while ((node = tree->node_hash[i])) { in hfs_btree_close() 268 tree->node_hash[i] = node->next_hash; in hfs_btree_close() 269 if (atomic_read(&node->refcnt)) in hfs_btree_close() 270 pr_crit("node %d:%d " in hfs_btree_close() 272 node->tree->cnid, node in hfs_btree_close() 285 struct hfs_bnode *node; hfs_btree_write() local 315 struct hfs_bnode *node; hfs_bmap_new_bmap() local 375 struct hfs_bnode *node, *next_node; hfs_bmap_alloc() local 448 hfs_bmap_free(struct hfs_bnode *node) hfs_bmap_free() argument [all...] |
/kernel/linux/linux-6.6/fs/hfsplus/ |
H A D | btree.c | 98 * it must also be a multiple of the node and block size. in hfsplus_calc_btree_clump_size() 116 * Round the clump size to a multiple of node and block size. in hfsplus_calc_btree_clump_size() 124 * greater than the clump size. If so, just use one block or node. in hfsplus_calc_btree_clump_size() 261 struct hfs_bnode *node; in hfs_btree_close() local 268 while ((node = tree->node_hash[i])) { in hfs_btree_close() 269 tree->node_hash[i] = node->next_hash; in hfs_btree_close() 270 if (atomic_read(&node->refcnt)) in hfs_btree_close() 271 pr_crit("node %d:%d " in hfs_btree_close() 273 node->tree->cnid, node in hfs_btree_close() 286 struct hfs_bnode *node; hfs_btree_write() local 316 struct hfs_bnode *node; hfs_bmap_new_bmap() local 376 struct hfs_bnode *node, *next_node; hfs_bmap_alloc() local 449 hfs_bmap_free(struct hfs_bnode *node) hfs_bmap_free() argument [all...] |
/kernel/linux/linux-5.10/drivers/interconnect/imx/ |
H A D | imx.c | 28 static int imx_icc_node_set(struct icc_node *node) in imx_icc_node_set() argument 30 struct device *dev = node->provider->dev; in imx_icc_node_set() 31 struct imx_icc_node *node_data = node->data; in imx_icc_node_set() 37 freq = (node->avg_bw + node->peak_bw) * node_data->desc->adj->bw_mul; in imx_icc_node_set() 39 dev_dbg(dev, "node %s device %s avg_bw %ukBps peak_bw %ukBps min_freq %llukHz\n", in imx_icc_node_set() 40 node->name, dev_name(node_data->qos_dev), in imx_icc_node_set() 41 node->avg_bw, node->peak_bw, freq); in imx_icc_node_set() 45 node in imx_icc_node_set() 60 imx_icc_node_destroy(struct icc_node *node) imx_icc_node_destroy() argument 78 imx_icc_node_init_qos(struct icc_provider *provider, struct icc_node *node) imx_icc_node_init_qos() argument 128 struct icc_node *node; imx_icc_node_add() local 167 struct icc_node *node, *tmp; imx_icc_unregister_nodes() local 182 struct icc_node *node; imx_icc_register_nodes() local [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | rbtree_latch.h | 41 struct rb_node node[2]; member 70 __lt_from_rb(struct rb_node *node, int idx) in __lt_from_rb() argument 72 return container_of(node, struct latch_tree_node, node[idx]); in __lt_from_rb() 81 struct rb_node *node = <n->node[idx]; in __lt_insert() local 95 rb_link_node_rcu(node, parent, link); in __lt_insert() 96 rb_insert_color(node, root); in __lt_insert() 102 rb_erase(<n->node[idx], <r->tree[idx]); in __lt_erase() 107 int (*comp)(void *key, struct latch_tree_node *node)) in __lt_find() 106 __lt_find(void *key, struct latch_tree_root *ltr, int idx, int (*comp)(void *key, struct latch_tree_node *node)) __lt_find() argument 109 struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node); __lt_find() local 144 latch_tree_insert(struct latch_tree_node *node, struct latch_tree_root *root, const struct latch_tree_ops *ops) latch_tree_insert() argument 171 latch_tree_erase(struct latch_tree_node *node, struct latch_tree_root *root, const struct latch_tree_ops *ops) latch_tree_erase() argument 203 struct latch_tree_node *node; latch_tree_find() local [all...] |
/kernel/linux/linux-6.6/include/linux/ |
H A D | rbtree_latch.h | 41 struct rb_node node[2]; member 70 __lt_from_rb(struct rb_node *node, int idx) in __lt_from_rb() argument 72 return container_of(node, struct latch_tree_node, node[idx]); in __lt_from_rb() 81 struct rb_node *node = <n->node[idx]; in __lt_insert() local 95 rb_link_node_rcu(node, parent, link); in __lt_insert() 96 rb_insert_color(node, root); in __lt_insert() 102 rb_erase(<n->node[idx], <r->tree[idx]); in __lt_erase() 107 int (*comp)(void *key, struct latch_tree_node *node)) in __lt_find() 106 __lt_find(void *key, struct latch_tree_root *ltr, int idx, int (*comp)(void *key, struct latch_tree_node *node)) __lt_find() argument 109 struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node); __lt_find() local 144 latch_tree_insert(struct latch_tree_node *node, struct latch_tree_root *root, const struct latch_tree_ops *ops) latch_tree_insert() argument 171 latch_tree_erase(struct latch_tree_node *node, struct latch_tree_root *root, const struct latch_tree_ops *ops) latch_tree_erase() argument 203 struct latch_tree_node *node; latch_tree_find() local [all...] |
/kernel/liteos_m/components/exchook/ |
H A D | los_exchook.c | 47 STATIC VOID DoExcHookInRegOrder(EXC_TYPE excType, struct Node *node)
in DoExcHookInRegOrder() argument 49 if (node != NULL) {
in DoExcHookInRegOrder() 50 DoExcHookInRegOrder(excType, node->next);
in DoExcHookInRegOrder() 51 node->excHookFn(excType);
in DoExcHookInRegOrder() 68 struct Node *node = NULL;
in GetFreeNode() local 72 /* no free node now */
in GetFreeNode() 84 node = g_excHeads[EXC_TYPE_END];
in GetFreeNode() 85 g_excHeads[EXC_TYPE_END] = node->next;
in GetFreeNode() 86 return node;
in GetFreeNode() 92 struct Node *node in LOS_RegExcHook() local 114 struct Node *node = NULL; LOS_UnRegExcHook() local [all...] |
/kernel/linux/linux-5.10/fs/hfs/ |
H A D | btree.c | 139 struct hfs_bnode *node; in hfs_btree_close() local 146 while ((node = tree->node_hash[i])) { in hfs_btree_close() 147 tree->node_hash[i] = node->next_hash; in hfs_btree_close() 148 if (atomic_read(&node->refcnt)) in hfs_btree_close() 149 pr_err("node %d:%d still has %d user(s)!\n", in hfs_btree_close() 150 node->tree->cnid, node->this, in hfs_btree_close() 151 atomic_read(&node->refcnt)); in hfs_btree_close() 152 hfs_bnode_free(node); in hfs_btree_close() 163 struct hfs_bnode *node; in hfs_btree_write() local 191 struct hfs_bnode *node; hfs_bmap_new_bmap() local 249 struct hfs_bnode *node, *next_node; hfs_bmap_alloc() local 321 hfs_bmap_free(struct hfs_bnode *node) hfs_bmap_free() argument [all...] |
/kernel/linux/linux-6.6/fs/hfs/ |
H A D | btree.c | 141 struct hfs_bnode *node; in hfs_btree_close() local 148 while ((node = tree->node_hash[i])) { in hfs_btree_close() 149 tree->node_hash[i] = node->next_hash; in hfs_btree_close() 150 if (atomic_read(&node->refcnt)) in hfs_btree_close() 151 pr_err("node %d:%d still has %d user(s)!\n", in hfs_btree_close() 152 node->tree->cnid, node->this, in hfs_btree_close() 153 atomic_read(&node->refcnt)); in hfs_btree_close() 154 hfs_bnode_free(node); in hfs_btree_close() 165 struct hfs_bnode *node; in hfs_btree_write() local 194 struct hfs_bnode *node; hfs_bmap_new_bmap() local 252 struct hfs_bnode *node, *next_node; hfs_bmap_alloc() local 324 hfs_bmap_free(struct hfs_bnode *node) hfs_bmap_free() argument [all...] |
/kernel/linux/linux-5.10/arch/powerpc/platforms/cell/spufs/ |
H A D | sched.c | 125 * it again on a different node. But it shouldn't hurt anything in __spu_update_sched_info() 128 * runqueue. The context will be rescheduled on the proper node in __spu_update_sched_info() 139 int node; in spu_update_sched_info() local 142 node = ctx->spu->node; in spu_update_sched_info() 147 mutex_lock(&cbe_spu_info[node].list_mutex); in spu_update_sched_info() 149 mutex_unlock(&cbe_spu_info[node].list_mutex); in spu_update_sched_info() 155 static int __node_allowed(struct spu_context *ctx, int node) in __node_allowed() argument 157 if (nr_cpus_node(node)) { in __node_allowed() 158 const struct cpumask *mask = cpumask_of_node(node); in __node_allowed() 167 node_allowed(struct spu_context *ctx, int node) node_allowed() argument 180 int node; do_notify_spus_active() local 296 int node, n; aff_ref_location() local 368 ctx_location(struct spu *ref, int offset, int node) ctx_location() argument 562 int node, n; spu_get_idle() local 621 int node, n; find_victim() local 707 int node = spu->node; __spu_schedule() local 753 int node = spu->node; spu_unschedule() local 825 grab_runnable_context(int prio, int node) grab_runnable_context() argument 957 int nr_active = 0, node; count_active_contexts() local 997 int node; spusched_thread() local 1032 int node; spuctx_switch_state() local 1129 int node; spu_sched_exit() local [all...] |
/kernel/linux/linux-6.6/arch/powerpc/platforms/cell/spufs/ |
H A D | sched.c | 125 * it again on a different node. But it shouldn't hurt anything in __spu_update_sched_info() 128 * runqueue. The context will be rescheduled on the proper node in __spu_update_sched_info() 139 int node; in spu_update_sched_info() local 142 node = ctx->spu->node; in spu_update_sched_info() 147 mutex_lock(&cbe_spu_info[node].list_mutex); in spu_update_sched_info() 149 mutex_unlock(&cbe_spu_info[node].list_mutex); in spu_update_sched_info() 155 static int __node_allowed(struct spu_context *ctx, int node) in __node_allowed() argument 157 if (nr_cpus_node(node)) { in __node_allowed() 158 const struct cpumask *mask = cpumask_of_node(node); in __node_allowed() 167 node_allowed(struct spu_context *ctx, int node) node_allowed() argument 180 int node; do_notify_spus_active() local 292 int node, n; aff_ref_location() local 363 ctx_location(struct spu *ref, int offset, int node) ctx_location() argument 556 int node, n; spu_get_idle() local 615 int node, n; find_victim() local 701 int node = spu->node; __spu_schedule() local 747 int node = spu->node; spu_unschedule() local 819 grab_runnable_context(int prio, int node) grab_runnable_context() argument 951 int nr_active = 0, node; count_active_contexts() local 991 int node; spusched_thread() local 1026 int node; spuctx_switch_state() local 1125 int node; spu_sched_exit() local [all...] |
/kernel/linux/linux-6.6/scripts/dtc/ |
H A D | dtc.h | 217 struct node { struct 221 struct node *children; 223 struct node *parent; 224 struct node *next_sibling; 269 struct node *build_node(struct property *proplist, struct node *children, 271 struct node *build_node_delete(struct srcpos *srcpos); 272 struct node *name_node(struct node *node, cha [all...] |
/kernel/liteos_m/kernel/src/mm/ |
H A D | los_membox.c | 47 STATIC INLINE VOID OsMemBoxSetMagic(LOS_MEMBOX_NODE *node)
in OsMemBoxSetMagic() argument 50 node->pstNext = (LOS_MEMBOX_NODE *)(OS_MEMBOX_MAGIC | taskID);
in OsMemBoxSetMagic() 53 STATIC INLINE UINT32 OsMemBoxCheckMagic(LOS_MEMBOX_NODE *node)
in OsMemBoxCheckMagic() argument 55 UINT32 taskID = OS_MEMBOX_TASKID_GET(node->pstNext);
in OsMemBoxCheckMagic() 59 return (node->pstNext == (LOS_MEMBOX_NODE *)(OS_MEMBOX_MAGIC | taskID)) ? LOS_OK : LOS_NOK;
in OsMemBoxCheckMagic() 70 STATIC INLINE UINT32 OsCheckBoxMem(const LOS_MEMBOX_INFO *boxInfo, const VOID *node)
in OsCheckBoxMem() argument 78 offset = (UINT32)((UINTPTR)node - (UINTPTR)(boxInfo + 1));
in OsCheckBoxMem() 87 return OsMemBoxCheckMagic((LOS_MEMBOX_NODE *)node);
in OsCheckBoxMem() 115 LOS_MEMBOX_NODE *node = NULL;
in LOS_MemboxInit() local 140 node in LOS_MemboxInit() 163 LOS_MEMBOX_NODE *node = NULL; LOS_MemboxAlloc() local 196 LOS_MEMBOX_NODE *node = OS_MEMBOX_NODE_ADDR(box); LOS_MemboxFree() local 228 LOS_MEMBOX_NODE *node = NULL; LOS_ShowBox() local 267 LOS_MEMBOX_NODE *node = NULL; OsMemboxExcInfoGetSub() local [all...] |
/kernel/linux/linux-5.10/drivers/clk/ti/ |
H A D | gate.c | 76 /* Parent is the x2 node, get parent of parent for the m2 div */ in omap36xx_gate_clk_enable_with_hsdiv_restore() 96 static struct clk *_register_gate(struct device_node *node, const char *name, in _register_gate() argument 126 clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name); in _register_gate() 134 static void __init _of_ti_gate_clk_setup(struct device_node *node, in _of_ti_gate_clk_setup() argument 148 if (ti_clk_get_reg_addr(node, 0, ®)) in _of_ti_gate_clk_setup() 151 if (!of_property_read_u32(node, "ti,bit-shift", &val)) in _of_ti_gate_clk_setup() 155 if (of_clk_get_parent_count(node) != 1) { in _of_ti_gate_clk_setup() 156 pr_err("%pOFn must have 1 parent\n", node); in _of_ti_gate_clk_setup() 160 parent_name = of_clk_get_parent_name(node, 0); in _of_ti_gate_clk_setup() 162 if (of_property_read_bool(node, "t in _of_ti_gate_clk_setup() 177 _of_ti_composite_gate_clk_setup(struct device_node *node, const struct clk_hw_omap_ops *hw_ops) _of_ti_composite_gate_clk_setup() argument 203 of_ti_composite_no_wait_gate_clk_setup(struct device_node *node) of_ti_composite_no_wait_gate_clk_setup() argument 211 of_ti_composite_interface_clk_setup(struct device_node *node) of_ti_composite_interface_clk_setup() argument 219 of_ti_composite_gate_clk_setup(struct device_node *node) of_ti_composite_gate_clk_setup() argument 227 of_ti_clkdm_gate_clk_setup(struct device_node *node) of_ti_clkdm_gate_clk_setup() argument 234 of_ti_hsdiv_gate_clk_setup(struct device_node *node) of_ti_hsdiv_gate_clk_setup() argument 242 of_ti_gate_clk_setup(struct device_node *node) of_ti_gate_clk_setup() argument 248 of_ti_wait_gate_clk_setup(struct device_node *node) of_ti_wait_gate_clk_setup() argument 256 of_ti_am35xx_gate_clk_setup(struct device_node *node) of_ti_am35xx_gate_clk_setup() argument 264 of_ti_dss_gate_clk_setup(struct device_node *node) of_ti_dss_gate_clk_setup() argument [all...] |
/kernel/linux/linux-6.6/drivers/clk/ti/ |
H A D | gate.c | 68 /* Parent is the x2 node, get parent of parent for the m2 div */ in omap36xx_gate_clk_enable_with_hsdiv_restore() 88 static struct clk *_register_gate(struct device_node *node, const char *name, in _register_gate() argument 118 clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name); in _register_gate() 126 static void __init _of_ti_gate_clk_setup(struct device_node *node, in _of_ti_gate_clk_setup() argument 140 if (ti_clk_get_reg_addr(node, 0, ®)) in _of_ti_gate_clk_setup() 143 if (!of_property_read_u32(node, "ti,bit-shift", &val)) in _of_ti_gate_clk_setup() 147 if (of_clk_get_parent_count(node) != 1) { in _of_ti_gate_clk_setup() 148 pr_err("%pOFn must have 1 parent\n", node); in _of_ti_gate_clk_setup() 152 parent_name = of_clk_get_parent_name(node, 0); in _of_ti_gate_clk_setup() 154 if (of_property_read_bool(node, "t in _of_ti_gate_clk_setup() 169 _of_ti_composite_gate_clk_setup(struct device_node *node, const struct clk_hw_omap_ops *hw_ops) _of_ti_composite_gate_clk_setup() argument 195 of_ti_composite_no_wait_gate_clk_setup(struct device_node *node) of_ti_composite_no_wait_gate_clk_setup() argument 203 of_ti_composite_interface_clk_setup(struct device_node *node) of_ti_composite_interface_clk_setup() argument 211 of_ti_composite_gate_clk_setup(struct device_node *node) of_ti_composite_gate_clk_setup() argument 219 of_ti_clkdm_gate_clk_setup(struct device_node *node) of_ti_clkdm_gate_clk_setup() argument 226 of_ti_hsdiv_gate_clk_setup(struct device_node *node) of_ti_hsdiv_gate_clk_setup() argument 234 of_ti_gate_clk_setup(struct device_node *node) of_ti_gate_clk_setup() argument 240 of_ti_wait_gate_clk_setup(struct device_node *node) of_ti_wait_gate_clk_setup() argument 248 of_ti_am35xx_gate_clk_setup(struct device_node *node) of_ti_am35xx_gate_clk_setup() argument 256 of_ti_dss_gate_clk_setup(struct device_node *node) of_ti_dss_gate_clk_setup() argument [all...] |
/kernel/linux/linux-5.10/fs/xfs/libxfs/ |
H A D | xfs_iext_tree.c | 161 struct xfs_iext_node *node = ifp->if_u1.if_root; in xfs_iext_find_first_leaf() local 168 node = node->ptrs[0]; in xfs_iext_find_first_leaf() 169 ASSERT(node); in xfs_iext_find_first_leaf() 172 return node; in xfs_iext_find_first_leaf() 179 struct xfs_iext_node *node = ifp->if_u1.if_root; in xfs_iext_find_last_leaf() local 187 if (!node->ptrs[i]) in xfs_iext_find_last_leaf() 189 node = node->ptrs[i - 1]; in xfs_iext_find_last_leaf() 190 ASSERT(node); in xfs_iext_find_last_leaf() 276 xfs_iext_key_cmp( struct xfs_iext_node *node, int n, xfs_fileoff_t offset) xfs_iext_key_cmp() argument 309 struct xfs_iext_node *node = ifp->if_u1.if_root; xfs_iext_find_level() local 329 xfs_iext_node_pos( struct xfs_iext_node *node, xfs_fileoff_t offset) xfs_iext_node_pos() argument 344 xfs_iext_node_insert_pos( struct xfs_iext_node *node, xfs_fileoff_t offset) xfs_iext_node_insert_pos() argument 359 xfs_iext_node_nr_entries( struct xfs_iext_node *node, int start) xfs_iext_node_nr_entries() argument 401 struct xfs_iext_node *node = kmem_zalloc(NODE_SIZE, KM_NOFS); xfs_iext_grow() local 433 struct xfs_iext_node *node = ifp->if_u1.if_root; xfs_iext_update_node() local 456 struct xfs_iext_node *node = *nodep; xfs_iext_split_node() local 499 struct xfs_iext_node *node, *new; xfs_iext_insert_node() local 672 xfs_iext_rebalance_node( struct xfs_iext_node *parent, int *pos, struct xfs_iext_node *node, int nr_entries) xfs_iext_rebalance_node() argument 728 struct xfs_iext_node *node, *parent; xfs_iext_remove_node() local 1024 xfs_iext_destroy_node( struct xfs_iext_node *node, int level) xfs_iext_destroy_node() argument [all...] |