/kernel/linux/linux-6.6/fs/btrfs/ |
H A D | send.c | 88 * This makes the size of a cache entry to be exactly 192 bytes on x86_64, which 105 * A backref cache entry maps a leaf to a list of IDs of roots from which the 107 * With SEND_MAX_BACKREF_CACHE_ROOTS as 12, each cache entry is 128 bytes (on 111 struct btrfs_lru_cache_entry entry; member 118 static_assert(offsetof(struct backref_cache_entry, entry) == 0); 342 * The key in the entry is an inode number, and the generation matches 345 struct btrfs_lru_cache_entry entry; member 355 static_assert(offsetof(struct name_cache_entry, entry) == 0); 1407 struct backref_cache_entry *entry; in lookup_backref_cache() local 1432 entry in lookup_backref_cache() 2282 struct btrfs_lru_cache_entry *entry; name_cache_search() local 2785 struct btrfs_lru_cache_entry *entry; cache_dir_utimes() local 2920 struct btrfs_lru_cache_entry *entry; cache_dir_created() local 3116 struct orphan_dir_info *entry, *odi; add_orphan_dir_info() local 3150 struct orphan_dir_info *entry; get_orphan_dir_info() local 3323 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino); is_waiting_for_move() local 3332 struct waiting_dir_move *entry, *dm; add_waiting_dir_move() local 3364 struct waiting_dir_move *entry; get_waiting_dir_move() local 3397 struct pending_dir_move *entry = NULL, *pm; add_pending_dir_move() local 3459 struct pending_dir_move *entry; get_pending_dir_moves() local 4646 const struct recorded_ref *entry = rb_entry(node, struct recorded_ref, node); rbtree_ref_less() local 8083 struct btrfs_lru_cache_entry *entry; btrfs_ioctl_send() local [all...] |
/foundation/distributeddatamgr/kv_store/frameworks/libs/distributeddb/syncer/src/device/ |
H A D | remote_executor.cpp | 181 std::pair<std::string, Message *> entry; in ReceiveRemoteExecutorRequest() local 189 entry = searchMessageQueue_.front(); in ReceiveRemoteExecutorRequest() 192 ParseOneRequestMessage(entry.first, entry.second); in ReceiveRemoteExecutorRequest() 193 delete entry.second; in ReceiveRemoteExecutorRequest() 194 entry.second = nullptr; in ReceiveRemoteExecutorRequest() 347 for (auto &entry : searchTaskQueue_) { in CheckTaskExeStatus() 349 int currentQueueCount = static_cast<int>(entry.second.size()); in CheckTaskExeStatus() 733 auto entry = searchMessageQueue_.front(); in ClearInnerSource() local 735 delete entry in ClearInnerSource() [all...] |
/kernel/linux/linux-5.10/arch/sparc/mm/ |
H A D | srmmu.c | 397 static inline void remove_from_ctx_list(struct ctx_list *entry) in remove_from_ctx_list() argument 399 entry->next->prev = entry->prev; in remove_from_ctx_list() 400 entry->prev->next = entry->next; in remove_from_ctx_list() 403 static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) in add_to_ctx_list() argument 405 entry->next = head; in add_to_ctx_list() 406 (entry->prev = head->prev)->next = entry; in add_to_ctx_list() 407 head->prev = entry; in add_to_ctx_list() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/marvell/ |
H A D | pxa168_eth.c | 390 * return the calculated entry. 421 * This function will add/del an entry to the address table. 425 * skip - if 1, skip this address.Used in case of deleting an entry which is a 426 * part of chain in the hash table.We can't just delete the entry since 432 * address table entry is added/deleted. 440 struct addr_table_entry *entry, *start; in add_del_hash_entry() local 467 entry = start + hash_function(mac_addr); in add_del_hash_entry() 469 if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) { in add_del_hash_entry() 473 if (((le32_to_cpu(entry->lo) & 0xfffffff8) == in add_del_hash_entry() 475 (le32_to_cpu(entry in add_del_hash_entry() [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/lib/x86_64/ |
H A D | processor.c | 289 /* Fill in page table entry. */ in virt_pg_map() 675 * Locate a cpuid entry. 678 * function: The function of the cpuid entry to find. 679 * index: The index of the cpuid entry. 683 * Return: A pointer to the cpuid entry. Never returns NULL. 689 struct kvm_cpuid_entry2 *entry = NULL; in kvm_get_supported_cpuid_index() local 696 entry = &cpuid->entries[i]; in kvm_get_supported_cpuid_index() 701 TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).", in kvm_get_supported_cpuid_index() 703 return entry; in kvm_get_supported_cpuid_index() 783 struct kvm_msr_entry entry; vcpu_get_msr() member 818 struct kvm_msr_entry entry; _vcpu_set_msr() member 1117 struct kvm_cpuid_entry2 *entry; kvm_get_cpu_address_width() local [all...] |
/kernel/linux/linux-6.6/arch/sparc/mm/ |
H A D | srmmu.c | 398 static inline void remove_from_ctx_list(struct ctx_list *entry) in remove_from_ctx_list() argument 400 entry->next->prev = entry->prev; in remove_from_ctx_list() 401 entry->prev->next = entry->next; in remove_from_ctx_list() 404 static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) in add_to_ctx_list() argument 406 entry->next = head; in add_to_ctx_list() 407 (entry->prev = head->prev)->next = entry; in add_to_ctx_list() 408 head->prev = entry; in add_to_ctx_list() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/ice/ |
H A D | ice_eswitch_br.c | 359 struct ice_esw_br_fdb_entry *entry) in ice_eswitch_br_fdb_entry_notify_and_cleanup() 361 if (!(entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER)) in ice_eswitch_br_fdb_entry_notify_and_cleanup() 362 ice_eswitch_br_fdb_offload_notify(entry->dev, entry->data.addr, in ice_eswitch_br_fdb_entry_notify_and_cleanup() 363 entry->data.vid, in ice_eswitch_br_fdb_entry_notify_and_cleanup() 365 ice_eswitch_br_fdb_entry_delete(bridge, entry); in ice_eswitch_br_fdb_entry_notify_and_cleanup() 378 dev_err(dev, "FDB entry with mac: %pM and vid: %u not found\n", in ice_eswitch_br_fdb_entry_find_and_delete() 464 dev_err(dev, "Failed to create fdb entry, err: %d\n", err); in ice_eswitch_br_fdb_entry_create() 587 struct ice_esw_br_fdb_entry *entry, *tmp; in ice_eswitch_br_fdb_flush() local 589 list_for_each_entry_safe(entry, tm in ice_eswitch_br_fdb_flush() 358 ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br *bridge, struct ice_esw_br_fdb_entry *entry) ice_eswitch_br_fdb_entry_notify_and_cleanup() argument 1248 struct ice_esw_br_fdb_entry *entry, *tmp; ice_eswitch_br_update() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/marvell/ |
H A D | pxa168_eth.c | 390 * return the calculated entry. 421 * This function will add/del an entry to the address table. 425 * skip - if 1, skip this address.Used in case of deleting an entry which is a 426 * part of chain in the hash table.We can't just delete the entry since 432 * address table entry is added/deleted. 440 struct addr_table_entry *entry, *start; in add_del_hash_entry() local 467 entry = start + hash_function(mac_addr); in add_del_hash_entry() 469 if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) { in add_del_hash_entry() 473 if (((le32_to_cpu(entry->lo) & 0xfffffff8) == in add_del_hash_entry() 475 (le32_to_cpu(entry in add_del_hash_entry() [all...] |
/kernel/linux/linux-6.6/drivers/net/wireless/intel/iwlwifi/fw/ |
H A D | acpi.c | 255 * We need at least one entry in the wifi package that in iwl_acpi_get_wifi_pkg_range() 256 * describes the domain, and one more entry, otherwise there's in iwl_acpi_get_wifi_pkg_range() 865 union acpi_object *entry; in iwl_sar_get_wgds_table() local 867 entry = &wifi_pkg->package.elements[entry_idx]; in iwl_sar_get_wgds_table() 869 if (entry->type != ACPI_TYPE_INTEGER || in iwl_sar_get_wgds_table() 870 entry->integer.value > num_profiles) { in iwl_sar_get_wgds_table() 874 num_profiles = entry->integer.value; in iwl_sar_get_wgds_table() 901 union acpi_object *entry; in iwl_sar_get_wgds_table() local 912 entry = &wifi_pkg->package.elements[entry_idx]; in iwl_sar_get_wgds_table() 914 if (entry in iwl_sar_get_wgds_table() [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | madvise.c | 200 swp_entry_t entry; in swapin_walk_pmd_entry() local 212 entry = pte_to_swp_entry(pte); in swapin_walk_pmd_entry() 213 if (unlikely(non_swap_entry(entry))) in swapin_walk_pmd_entry() 219 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, in swapin_walk_pmd_entry() 250 swp_entry_t entry; in shmem_swapin_range() local 254 entry = radix_to_swp_entry(page); in shmem_swapin_range() 256 if (non_swap_entry(entry)) in shmem_swapin_range() 264 page = read_swap_cache_async(entry, mapping_gfp_mask(mapping), in shmem_swapin_range() 660 swp_entry_t entry; in madvise_free_pte_range() local 662 entry in madvise_free_pte_range() [all...] |
/third_party/libfuse/example/ |
H A D | passthrough_hp.cc | 722 struct dirent *entry; in do_readdir() local 724 entry = readdir(d->dp); in do_readdir() 725 if (!entry) { in do_readdir() 734 d->offset = entry->d_off; in do_readdir() 735 if (is_dot_or_dotdot(entry->d_name)) in do_readdir() 741 err = do_lookup(ino, entry->d_name, &e); in do_readdir() 744 entsize = fuse_add_direntry_plus(req, p, rem, entry->d_name, &e, entry->d_off); in do_readdir() 746 e.attr.st_ino = entry->d_ino; in do_readdir() 747 e.attr.st_mode = entry in do_readdir() [all...] |
/third_party/python/Tools/demo/ |
H A D | spreadsheet.py | 511 self.entry = Tk.Entry(self.root) 519 self.entry.pack(side="left", expand=1, fill="x") 521 self.entry.bind("<Return>", self.return_event) 522 self.entry.bind("<Shift-Return>", self.shift_return_event) 523 self.entry.bind("<Tab>", self.tab_event) 524 self.entry.bind("<Shift-Tab>", self.shift_tab_event) 525 self.entry.bind("<Delete>", self.delete_event) 526 self.entry.bind("<Escape>", self.escape_event) 542 self.entry.delete(0, 'end') 557 self.entry [all...] |
/third_party/skia/third_party/externals/freetype/src/gxvalid/ |
H A D | gxvcommn.c | 542 continue; /* ftxvalidator silently skips such an entry */ in gxv_LookupTable_fmt2_validate() 613 continue; /* ftxvalidator silently skips such an entry */ in gxv_LookupTable_fmt4_validate() 694 " (entry %d < nUnits=%d)\n", in gxv_LookupTable_fmt6_validate() 1028 FT_Byte entry; in gxv_StateArray_validate() local 1053 entry = FT_NEXT_BYTE( p ); in gxv_StateArray_validate() 1054 *maxEntry_p = (FT_Byte)FT_MAX( *maxEntry_p, entry ); in gxv_StateArray_validate() 1081 FT_Byte entry; in gxv_EntryTable_validate() local 1102 for ( entry = 0; entry <= maxEntry; entry in gxv_EntryTable_validate() 1413 FT_UShort entry; gxv_XStateArray_validate() local 1463 FT_UShort entry; gxv_XEntryTable_validate() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/efa/ |
H A D | efa_verbs.c | 452 struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); in efa_user_mmap_entry_insert() local 455 if (!entry) in efa_user_mmap_entry_insert() 458 entry->address = address; in efa_user_mmap_entry_insert() 459 entry->mmap_flag = mmap_flag; in efa_user_mmap_entry_insert() 461 err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry, in efa_user_mmap_entry_insert() 464 kfree(entry); in efa_user_mmap_entry_insert() 467 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry); in efa_user_mmap_entry_insert() 469 return &entry->rdma_entry; in efa_user_mmap_entry_insert() 1071 "Invalid entry siz in efa_create_cq() 1733 struct efa_user_mmap_entry *entry = to_emmap(rdma_entry); efa_mmap_free() local 1742 struct efa_user_mmap_entry *entry; __efa_mmap() local [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/dec/tulip/ |
H A D | de2104x.c | 608 unsigned int entry, tx_free; in de_start_xmit() local 622 entry = de->tx_head; in de_start_xmit() 624 txd = &de->tx_ring[entry]; in de_start_xmit() 629 if (entry == (DE_TX_RING_SIZE - 1)) in de_start_xmit() 637 de->tx_skb[entry].skb = skb; in de_start_xmit() 638 de->tx_skb[entry].mapping = mapping; in de_start_xmit() 644 de->tx_head = NEXT_TX(entry); in de_start_xmit() 646 entry, skb->len); in de_start_xmit() 673 __set_bit_le(255, hash_table); /* Broadcast entry */ in build_setup_frame_hash() 687 /* Fill the final entry wit in build_setup_frame_hash() 724 unsigned int entry; __de_set_rx_mode() local [all...] |
/kernel/linux/linux-5.10/fs/ext4/ |
H A D | inline.c | 33 struct ext4_xattr_entry *entry; in get_max_inline_xattr_value_size() local 48 * needs an empty 4 bytes to indicate the gap between the xattr entry in get_max_inline_xattr_value_size() 58 entry = IFIRST(header); in get_max_inline_xattr_value_size() 62 while (!IS_LAST_ENTRY(entry)) { in get_max_inline_xattr_value_size() 63 void *next = EXT4_XATTR_NEXT(entry); in get_max_inline_xattr_value_size() 70 if (!entry->e_value_inum && entry->e_value_size) { in get_max_inline_xattr_value_size() 71 size_t offs = le16_to_cpu(entry->e_value_offs); in get_max_inline_xattr_value_size() 75 entry = next; in get_max_inline_xattr_value_size() 78 ((void *)entry in get_max_inline_xattr_value_size() 180 struct ext4_xattr_entry *entry; ext4_read_inline_data() local 225 struct ext4_xattr_entry *entry; ext4_write_inline_data() local 1077 struct ext4_xattr_entry *entry; ext4_get_inline_xattr_pos() local [all...] |
/kernel/linux/linux-6.6/fs/dlm/ |
H A D | lowcomms.c | 124 /* An entry waiting to be sent */ 139 struct writequeue_entry *entry; member 235 struct writequeue_entry *entry = data; in writequeue_entry_ctor() local 237 INIT_LIST_HEAD(&entry->msgs); in writequeue_entry_ctor() 691 kref_put(&msg->entry->ref, dlm_page_release); in dlm_msg_release() 767 /* if we send a writequeue entry only a half way, we drop the in close_connection() 768 * whole entry because reconnection and that we not start of the in close_connection() 1100 * writequeue_entry_complete - try to delete and free write queue entry 1101 * @e: write queue entry to try to delete 1161 struct writequeue_entry *entry; in new_writequeue_entry() local [all...] |
/kernel/linux/linux-6.6/fs/ext4/ |
H A D | inline.c | 35 struct ext4_xattr_entry *entry; in get_max_inline_xattr_value_size() local 50 * needs an empty 4 bytes to indicate the gap between the xattr entry in get_max_inline_xattr_value_size() 60 entry = IFIRST(header); in get_max_inline_xattr_value_size() 64 while (!IS_LAST_ENTRY(entry)) { in get_max_inline_xattr_value_size() 65 void *next = EXT4_XATTR_NEXT(entry); in get_max_inline_xattr_value_size() 72 if (!entry->e_value_inum && entry->e_value_size) { in get_max_inline_xattr_value_size() 73 size_t offs = le16_to_cpu(entry->e_value_offs); in get_max_inline_xattr_value_size() 77 entry = next; in get_max_inline_xattr_value_size() 80 ((void *)entry in get_max_inline_xattr_value_size() 182 struct ext4_xattr_entry *entry; ext4_read_inline_data() local 226 struct ext4_xattr_entry *entry; ext4_write_inline_data() local 1049 struct ext4_xattr_entry *entry; ext4_get_inline_xattr_pos() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/dec/tulip/ |
H A D | de2104x.c | 608 unsigned int entry, tx_free; in de_start_xmit() local 622 entry = de->tx_head; in de_start_xmit() 624 txd = &de->tx_ring[entry]; in de_start_xmit() 629 if (entry == (DE_TX_RING_SIZE - 1)) in de_start_xmit() 637 de->tx_skb[entry].skb = skb; in de_start_xmit() 638 de->tx_skb[entry].mapping = mapping; in de_start_xmit() 644 de->tx_head = NEXT_TX(entry); in de_start_xmit() 646 entry, skb->len); in de_start_xmit() 673 __set_bit_le(255, hash_table); /* Broadcast entry */ in build_setup_frame_hash() 687 /* Fill the final entry wit in build_setup_frame_hash() 724 unsigned int entry; __de_set_rx_mode() local [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdgpu/ |
H A D | sdma_v4_0.c | 1540 * @pe: addr of the page entry 1567 * @pe: addr of the page entry 1596 * @pe: addr of the page entry 1743 struct amdgpu_iv_entry *entry); 2028 struct amdgpu_iv_entry *entry) in sdma_v4_0_process_trap_irq() 2033 instance = sdma_v4_0_irq_id_to_seq(entry->client_id); in sdma_v4_0_process_trap_irq() 2034 switch (entry->ring_id) { in sdma_v4_0_process_trap_irq() 2055 struct amdgpu_iv_entry *entry) in sdma_v4_0_process_ras_data_cb() 2066 instance = sdma_v4_0_irq_id_to_seq(entry->client_id); in sdma_v4_0_process_ras_data_cb() 2070 amdgpu_sdma_process_ras_data_cb(adev, err_data, entry); in sdma_v4_0_process_ras_data_cb() 2026 sdma_v4_0_process_trap_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) sdma_v4_0_process_trap_irq() argument 2053 sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, struct amdgpu_iv_entry *entry) sdma_v4_0_process_ras_data_cb() argument 2076 sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) sdma_v4_0_process_illegal_inst_irq() argument 2111 sdma_v4_0_print_iv_entry(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) sdma_v4_0_print_iv_entry() argument 2139 sdma_v4_0_process_vm_hole_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) sdma_v4_0_process_vm_hole_irq() argument 2148 sdma_v4_0_process_doorbell_invalid_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) sdma_v4_0_process_doorbell_invalid_irq() argument 2157 sdma_v4_0_process_pool_timeout_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) sdma_v4_0_process_pool_timeout_irq() argument 2167 sdma_v4_0_process_srbm_write_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) sdma_v4_0_process_srbm_write_irq() argument [all...] |
/foundation/ability/ability_runtime/frameworks/native/ability/native/ |
H A D | ui_ability_impl.cpp | 224 std::string entry = "AbilityManagerClient::AbilityTransitionDone; the transaction start."; in AbilityTransactionCallback() local 225 FreezeUtil::GetInstance().AddLifecycleEvent(flow, entry); in AbilityTransactionCallback() 440 std::string entry = "UIAbilityImpl::WindowLifeCycleImpl::AfterForeground; the foreground lifecycle."; in AfterForeground() local 441 FreezeUtil::GetInstance().AddLifecycleEvent(flow, entry); in AfterForeground() 457 entry = "AbilityManagerClient::AbilityTransitionDone; the transaction start."; in AfterForeground() 458 FreezeUtil::GetInstance().AddLifecycleEvent(flow, entry); in AfterForeground() 475 std::string entry = "UIAbilityImpl::WindowLifeCycleImpl::AfterBackground; the background lifecycle."; in AfterBackground() local 476 FreezeUtil::GetInstance().AddLifecycleEvent(flow, entry); in AfterBackground() 511 std::string entry = "ERROR UIAbilityImpl::WindowLifeCycleImpl::ForegroundFailed; GoForeground failed."; in ForegroundFailed() local 512 FreezeUtil::GetInstance().AppendLifecycleEvent(flow, entry); in ForegroundFailed() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_hw_eqs.c | 393 hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry); in aeq_interrupt() 414 hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry); in ceq_interrupt() 438 ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX) | in get_ctrl0_val() 457 ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX) | in get_ctrl0_val() 718 * @entry: msix entry associated with the event queue 724 struct msix_entry entry) in init_eq() 754 eq->msix_entry = entry; in init_eq() 788 /* set the attributes of the msix entry */ in init_eq() 789 hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry, in init_eq() 722 init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size, struct msix_entry entry) init_eq() argument [all...] |
/kernel/linux/linux-5.10/drivers/pci/controller/ |
H A D | pcie-rcar-host.c | 816 struct resource_entry *entry, in rcar_pcie_inbound_ranges() 819 u64 restype = entry->res->flags; in rcar_pcie_inbound_ranges() 820 u64 cpu_addr = entry->res->start; in rcar_pcie_inbound_ranges() 821 u64 cpu_end = entry->res->end; in rcar_pcie_inbound_ranges() 822 u64 pci_addr = entry->res->start - entry->offset; in rcar_pcie_inbound_ranges() 825 u64 size = resource_size(entry->res); in rcar_pcie_inbound_ranges() 868 struct resource_entry *entry; in rcar_pcie_parse_map_dma_ranges() local 871 resource_list_for_each_entry(entry, &bridge->dma_ranges) { in rcar_pcie_parse_map_dma_ranges() 872 err = rcar_pcie_inbound_ranges(&host->pcie, entry, in rcar_pcie_parse_map_dma_ranges() 815 rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, struct resource_entry *entry, int *index) rcar_pcie_inbound_ranges() argument [all...] |
H A D | pci-v3-semi.c | 598 struct resource_entry *entry, in v3_get_dma_range_config() 602 u64 cpu_addr = entry->res->start; in v3_get_dma_range_config() 603 u64 cpu_end = entry->res->end; in v3_get_dma_range_config() 604 u64 pci_end = cpu_end - entry->offset; in v3_get_dma_range_config() 605 u64 pci_addr = entry->res->start - entry->offset; in v3_get_dma_range_config() 621 switch (resource_size(entry->res)) { in v3_get_dma_range_config() 680 struct resource_entry *entry; in v3_pci_parse_map_dma_ranges() local 683 resource_list_for_each_entry(entry, &bridge->dma_ranges) { in v3_pci_parse_map_dma_ranges() 687 ret = v3_get_dma_range_config(v3, entry, in v3_pci_parse_map_dma_ranges() 597 v3_get_dma_range_config(struct v3_pci *v3, struct resource_entry *entry, u32 *pci_base, u32 *pci_map) v3_get_dma_range_config() argument [all...] |
/kernel/linux/linux-5.10/fs/debugfs/ |
H A D | file.c | 1135 struct debugfs_devm_entry *entry = inode->i_private; in debugfs_devm_entry_open() local 1137 return single_open(f, entry->read, entry->dev); in debugfs_devm_entry_open() 1162 struct debugfs_devm_entry *entry; in debugfs_create_devm_seqfile() local 1167 entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL); in debugfs_create_devm_seqfile() 1168 if (!entry) in debugfs_create_devm_seqfile() 1171 entry->read = read_fn; in debugfs_create_devm_seqfile() 1172 entry->dev = dev; in debugfs_create_devm_seqfile() 1174 debugfs_create_file(name, S_IRUGO, parent, entry, in debugfs_create_devm_seqfile() [all...] |