Home
last modified time | relevance | path

Searched refs:page (Results 6376 - 6400 of 6704) sorted by relevance

1...<<251252253254255256257258259260>>...269

/kernel/linux/linux-5.10/drivers/net/ethernet/sun/
H A Dniu.h2920 struct page **rxhash;
3133 u64 (*map_page)(struct device *dev, struct page *page,
/kernel/linux/linux-5.10/fs/ocfs2/
H A Ddlmglue.c1627 * This is helping work around a lock inversion between the page lock in __ocfs2_cluster_lock()
1628 * and dlm locks. One path holds the page lock while calling aops in __ocfs2_cluster_lock()
1630 * locks while acquiring page locks while down converting data locks. in __ocfs2_cluster_lock()
1632 * off to unlock its page lock before trying the dlm lock again. in __ocfs2_cluster_lock()
2523 * locks while holding a page lock and the downconvert thread which
2524 * blocks dlm lock acquiry while acquiring page locks.
2527 * methods that hold page locks and return a very specific *positive* error
2532 * our page so the downconvert thread can make progress. Once we've
2540 struct page *page) in ocfs2_inode_lock_with_page()
2537 ocfs2_inode_lock_with_page(struct inode *inode, struct buffer_head **ret_bh, int ex, struct page *page) ocfs2_inode_lock_with_page() argument
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/qlogic/
H A Dqla3xxx.c164 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) in ql_set_register_page() argument
169 writel(((ISP_CONTROL_NP_MASK << 16) | page), in ql_set_register_page()
172 qdev->current_page = page; in ql_set_register_page()
2854 * First allocate a page of shared memory and use it for shadow in ql_alloc_mem_resources()
/kernel/linux/linux-6.6/fs/ext4/
H A Dext4.h3541 struct page **pagep);
3547 struct page **pagep,
3727 /* page-io.c */
3738 int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *page,
/kernel/linux/linux-6.6/fs/ocfs2/
H A Ddlmglue.c1626 * This is helping work around a lock inversion between the page lock in __ocfs2_cluster_lock()
1627 * and dlm locks. One path holds the page lock while calling aops in __ocfs2_cluster_lock()
1629 * locks while acquiring page locks while down converting data locks. in __ocfs2_cluster_lock()
1631 * off to unlock its page lock before trying the dlm lock again. in __ocfs2_cluster_lock()
2533 * locks while holding a page lock and the downconvert thread which
2534 * blocks dlm lock acquiry while acquiring page locks.
2537 * methods that hold page locks and return a very specific *positive* error
2542 * our page so the downconvert thread can make progress. Once we've
2550 struct page *page) in ocfs2_inode_lock_with_page()
2547 ocfs2_inode_lock_with_page(struct inode *inode, struct buffer_head **ret_bh, int ex, struct page *page) ocfs2_inode_lock_with_page() argument
[all...]
/kernel/linux/linux-6.6/fs/smb/client/
H A Dconnect.c771 cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, in cifs_read_page_from_socket() argument
777 bvec_set_page(&bv, page, to_read, page_offset); in cifs_read_page_from_socket()
/kernel/linux/linux-6.6/drivers/block/mtip32xx/
H A Dmtip32xx.c473 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
1236 * @page page number to fetch
1239 * @sectors page length to fetch, in sectors
1244 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, in mtip_read_log_page() argument
1255 fis.lba_low = page; in mtip_read_log_page()
/kernel/linux/linux-6.6/drivers/net/ethernet/
H A Djme.c1956 struct page *page, in jme_fill_tx_map()
1963 dmaaddr = dma_map_page(&pdev->dev, page, page_offset, len, in jme_fill_tx_map()
1953 jme_fill_tx_map(struct pci_dev *pdev, struct txdesc *txdesc, struct jme_buffer_info *txbi, struct page *page, u32 page_offset, u32 len, bool hidma) jme_fill_tx_map() argument
/kernel/linux/linux-6.6/drivers/net/ethernet/sun/
H A Dniu.h2920 struct page **rxhash;
3133 u64 (*map_page)(struct device *dev, struct page *page,
/kernel/linux/linux-6.6/drivers/iommu/arm/arm-smmu-v3/
H A Darm-smmu-v3.c1765 * range size, which must be a power of two number of page sizes. We in arm_smmu_atc_inv_to_cmd()
1895 /* Get the leaf page size */ in __arm_smmu_tlb_inv_range()
1900 /* Convert page size of 12,14,16 (log2) to 1,2,3 */ in __arm_smmu_tlb_inv_range()
2892 void __iomem *page, in arm_smmu_init_one_queue()
2921 q->prod_reg = page + prod_off; in arm_smmu_init_one_queue()
2922 q->cons_reg = page + cons_off; in arm_smmu_init_one_queue()
2890 arm_smmu_init_one_queue(struct arm_smmu_device *smmu, struct arm_smmu_queue *q, void __iomem *page, unsigned long prod_off, unsigned long cons_off, size_t dwords, const char *name) arm_smmu_init_one_queue() argument
/kernel/linux/linux-6.6/mm/
H A Dslab.c26 * page long) and always contiguous), and each slab contains multiple
126 #include <asm/page.h>
415 * freelist will be at the end of slab page. The objects will be in cache_estimate()
1184 * Initialisation. Called after the page allocator have been initialised and
1201 * page orders on machines with more than 32MB of memory if in kmem_cache_init()
1295 * Register the timers that return unneeded pages to the page allocator in cpucache_init()
1341 * Interface to system's page allocator. No need to hold the
1376 * Interface to system's page release.
1385 page_mapcount_reset(&folio->page); in kmem_freepages()
1393 __free_pages(&folio->page, orde in kmem_freepages()
[all...]
/kernel/linux/linux-6.6/net/ipv4/
H A Dtcp_output.c3894 space = copy_page_from_iter(pfrag->page, pfrag->offset, in tcp_send_syn_data()
3901 skb_fill_page_desc(syn_data, 0, pfrag->page, in tcp_send_syn_data()
3903 page_ref_inc(pfrag->page); in tcp_send_syn_data()
/kernel/linux/linux-5.10/drivers/net/ethernet/marvell/
H A Dsky2.c400 /* select page 1 to access Fiber registers */ in sky2_phy_init()
528 /* select page 3 to access LED control register */ in sky2_phy_init()
547 /* restore page register */ in sky2_phy_init()
556 /* select page 3 to access LED control register */ in sky2_phy_init()
569 /* restore page register */ in sky2_phy_init()
595 /* set page register to 0 */ in sky2_phy_init()
610 /* set page register back to 0 */ in sky2_phy_init()
728 /* select page 2 to access MAC control register */ in sky2_phy_power_down()
736 /* set page register back to 0 */ in sky2_phy_power_down()
748 /* select page in sky2_phy_power_down()
1465 struct page *page = alloc_page(gfp); sky2_rx_alloc() local
[all...]
/kernel/linux/linux-5.10/drivers/net/ethernet/realtek/
H A Dr8169_main.c613 struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
3878 static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp, in rtl8169_alloc_rx_data()
3884 struct page *data; in rtl8169_alloc_rx_data()
3923 struct page *data; in rtl8169_rx_fill()
/kernel/linux/linux-5.10/drivers/net/wireless/realtek/rtw88/
H A Drtw8822c.c1863 u8 page; in query_phy_status() local
1865 page = *phy_status & 0xf; in query_phy_status()
1867 switch (page) { in query_phy_status()
1875 rtw_warn(rtwdev, "unused phy status page (%d)\n", page); in query_phy_status()
/kernel/linux/linux-5.10/drivers/nvme/host/
H A Dcore.c697 * discard page. If that's also busy, it's safe to return in nvme_setup_discard()
829 struct page *page = req->special_vec.bv_page; in nvme_cleanup_cmd() local
831 if (page == ns->ctrl->discard_page) in nvme_cleanup_cmd()
834 kfree(page_address(page) + req->special_vec.bv_offset); in nvme_cleanup_cmd()
2053 * The block layer can't support LBA sizes larger than the page size in nvme_update_disk_info()
2431 "Minimum device page size %u too large for host (%u)\n", in nvme_enable_ctrl()
4208 * raced with us in reading the log page, which could cause us to miss in nvme_clear_changed_ns_log()
/kernel/linux/linux-6.6/drivers/net/ethernet/realtek/
H A Dr8169_main.c609 struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
3830 static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp, in rtl8169_alloc_rx_data()
3836 struct page *data; in rtl8169_alloc_rx_data()
3875 struct page *data; in rtl8169_rx_fill()
/kernel/linux/linux-6.6/drivers/net/ethernet/marvell/
H A Dsky2.c399 /* select page 1 to access Fiber registers */ in sky2_phy_init()
527 /* select page 3 to access LED control register */ in sky2_phy_init()
546 /* restore page register */ in sky2_phy_init()
555 /* select page 3 to access LED control register */ in sky2_phy_init()
568 /* restore page register */ in sky2_phy_init()
594 /* set page register to 0 */ in sky2_phy_init()
609 /* set page register back to 0 */ in sky2_phy_init()
727 /* select page 2 to access MAC control register */ in sky2_phy_power_down()
735 /* set page register back to 0 */ in sky2_phy_power_down()
747 /* select page in sky2_phy_power_down()
1464 struct page *page = alloc_page(gfp); sky2_rx_alloc() local
[all...]
/kernel/linux/linux-6.6/arch/x86/kvm/svm/
H A Dsvm.c194 * Use nested page tables by default. Note, NPT may get forced off by
886 struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order); in svm_vcpu_alloc_msrpm()
1433 struct page *vmcb01_page; in svm_vcpu_create()
1434 struct page *vmsa_page = NULL; in svm_vcpu_create()
1447 * SEV-ES guests require a separate VMSA page used to contain in svm_vcpu_create()
1508 * The vmcb page can be recycled, causing a false negative in in svm_vcpu_free()
1510 * vmcb page recorded as its current vmcb. in svm_vcpu_free()
2412 /* All SVM instructions expect page aligned RAX */ in gp_interception()
4794 * the guest attempted to fetch from emulated MMIO or a guest page in svm_can_emulate_instruction()
4812 * supports DecodeAssist, a #NPF was raised, KVM's page faul in svm_can_emulate_instruction()
[all...]
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb4/
H A Dcxgb4_main.c4007 struct page *page; in adap_free_hma_mem() local
4021 page = sg_page(iter); in adap_free_hma_mem()
4022 if (page) in adap_free_hma_mem()
4023 __free_pages(page, HMA_PAGE_ORDER); in adap_free_hma_mem()
4036 struct page *newpage; in adap_config_hma()
4095 "Not enough memory for HMA page allocation\n"); in adap_config_hma()
6777 * a page size. in init_one()
6781 "Incorrect number of egress queues per page\n"); in init_one()
/kernel/linux/linux-6.6/drivers/net/ethernet/chelsio/cxgb4/
H A Dcxgb4_main.c4002 struct page *page; in adap_free_hma_mem() local
4016 page = sg_page(iter); in adap_free_hma_mem()
4017 if (page) in adap_free_hma_mem()
4018 __free_pages(page, HMA_PAGE_ORDER); in adap_free_hma_mem()
4031 struct page *newpage; in adap_config_hma()
4090 "Not enough memory for HMA page allocation\n"); in adap_config_hma()
6754 * a page size. in init_one()
6758 "Incorrect number of egress queues per page\n"); in init_one()
/kernel/linux/linux-6.6/net/bluetooth/
H A Dhci_event.c909 bt_dev_warn(hdev, "broken local ext features page 2"); in hci_cc_read_local_ext_features()
914 if (rp->page < HCI_MAX_PAGES) in hci_cc_read_local_ext_features()
915 memcpy(hdev->features[rp->page], rp->features, 8); in hci_cc_read_local_ext_features()
3747 cp.page = 0x01; in hci_remote_features_evt()
3852 /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554 in hci_cc_le_set_cig_params()
3864 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553 in hci_cc_le_set_cig_params()
4976 if (ev->page < HCI_MAX_PAGES) in hci_remote_ext_features_evt()
4977 memcpy(conn->features[ev->page], ev->features, 8); in hci_remote_ext_features_evt()
4979 if (!ev->status && ev->page == 0x01) { in hci_remote_ext_features_evt()
/kernel/linux/linux-5.10/arch/powerpc/platforms/cell/spufs/
H A Dfile.c1033 #error unsupported page size in spufs_signal1_mmap_fault()
1169 #error unsupported page size in spufs_signal2_mmap_fault()
/kernel/linux/linux-5.10/arch/powerpc/platforms/powernv/
H A Dpci-ioda.c1291 struct page *table_pages; in pnv_pci_ioda_dma_64bit_bypass()
1661 struct page *tce_mem = NULL; in pnv_pci_ioda1_setup_dma_pe()
1726 * Each TCE page is 4KB in size and each TCE entry occupies 8 in pnv_pci_ioda1_setup_dma_pe()
1903 /* iommu_table::it_map uses 1 bit per IOMMU page, hence 8 */ in pnv_pci_ioda2_setup_default_config()
1970 0/* table size */, 0/* page size */); in pnv_pci_ioda2_unset_window()
/kernel/linux/linux-6.6/arch/powerpc/platforms/cell/spufs/
H A Dfile.c1033 #error unsupported page size in spufs_signal1_mmap_fault()
1169 #error unsupported page size in spufs_signal2_mmap_fault()

Completed in 141 milliseconds

1...<<251252253254255256257258259260>>...269