/kernel/linux/linux-5.10/fs/ocfs2/ |
H A D | ocfs2.h | 433 * How many clusters in our truncate log. 719 u32 clusters) in ocfs2_clusters_to_blocks() 724 return (u64)clusters << c_to_b_bits; in ocfs2_clusters_to_blocks() 750 unsigned int clusters; in ocfs2_clusters_for_bytes() local 753 /* OCFS2 just cannot have enough clusters to overflow this */ in ocfs2_clusters_for_bytes() 754 clusters = (unsigned int)(bytes >> cl_bits); in ocfs2_clusters_for_bytes() 756 return clusters; in ocfs2_clusters_for_bytes() 763 unsigned int clusters; in ocfs2_bytes_to_clusters() local 765 clusters = (unsigned int)(bytes >> cl_bits); in ocfs2_bytes_to_clusters() 766 return clusters; in ocfs2_bytes_to_clusters() 718 ocfs2_clusters_to_blocks(struct super_block *sb, u32 clusters) ocfs2_clusters_to_blocks() argument 776 ocfs2_clusters_to_bytes(struct super_block *sb, u32 clusters) ocfs2_clusters_to_bytes() argument 786 unsigned int clusters; ocfs2_block_to_cluster_start() local 796 unsigned int clusters; ocfs2_align_bytes_to_clusters() local 819 u32 clusters = pg_index; ocfs2_page_index_to_clusters() local 833 ocfs2_align_clusters_to_page_index(struct super_block *sb, u32 clusters) ocfs2_align_clusters_to_page_index() argument 867 ocfs2_clusters_to_megabytes(struct super_block *sb, unsigned int clusters) ocfs2_clusters_to_megabytes() argument [all...] |
H A D | resize.c | 172 static int update_backups(struct inode * inode, u32 clusters, char *data) in update_backups() argument 185 if (cluster >= clusters) in update_backups() 215 u32 clusters = 0; in ocfs2_update_super_and_backups() local 233 clusters = le32_to_cpu(super_di->i_clusters); in ocfs2_update_super_and_backups() 242 ret = update_backups(inode, clusters, super_bh->b_data); in ocfs2_update_super_and_backups() 255 * Extend the filesystem to the new number of clusters specified. This entry 391 else if (le16_to_cpu(gd->bg_bits) != input->clusters * cl_bpc) in ocfs2_check_new_group() 393 "input has %u clusters set\n", in ocfs2_check_new_group() 395 le16_to_cpu(gd->bg_bits), input->clusters); in ocfs2_check_new_group() 428 else if (total_clusters + input->clusters < total_cluster in ocfs2_verify_group_and_input() [all...] |
H A D | ocfs2_trace.h | 505 unsigned int e_cpos, unsigned int clusters), 506 TP_ARGS(owner, cpos, len, index, e_cpos, clusters), 513 __field(unsigned int, clusters) 521 __entry->clusters = clusters; 525 __entry->e_cpos, __entry->clusters) 530 unsigned int clusters, unsigned int depth), 531 TP_ARGS(ino, new_cpos, clusters, depth), 535 __field(unsigned int, clusters) 541 __entry->clusters [all...] |
H A D | file.c | 1041 * Call this even if we don't add any clusters to the tree. We in ocfs2_extend_no_holes() 1441 u32 cpos, phys_cpos, clusters, alloc_size; in ocfs2_allocate_unwritten_extents() local 1470 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len); in ocfs2_allocate_unwritten_extents() 1471 clusters -= cpos; in ocfs2_allocate_unwritten_extents() 1473 while (clusters) { in ocfs2_allocate_unwritten_extents() 1485 if (alloc_size > clusters) in ocfs2_allocate_unwritten_extents() 1486 alloc_size = clusters; in ocfs2_allocate_unwritten_extents() 1505 clusters -= alloc_size; in ocfs2_allocate_unwritten_extents() 1516 * Truncate a byte range, avoiding pages within partial clusters. This 1590 * partial clusters her in ocfs2_zero_partial_clusters() 2137 u32 cpos, clusters, extent_len, phys_cpos; ocfs2_check_range_for_refcount() local 2249 u32 clusters; ocfs2_prepare_inode_for_write() local [all...] |
H A D | ocfs2_ioctl.h | 52 __u32 clusters; /* Total number of clusters in this group */ member 53 __u32 frees; /* Total free clusters in this group */ 160 __u32 ffs_min; /* Minimum free chunksize in clusters */ 165 __u32 iff_chunksize; /* chunksize in clusters(in) */ 220 claim new clusters
|
H A D | refcounttree.h | 44 u32 clusters,
|
/kernel/linux/linux-6.6/fs/ocfs2/ |
H A D | ocfs2.h | 431 * How many clusters in our truncate log. 716 u32 clusters) in ocfs2_clusters_to_blocks() 721 return (u64)clusters << c_to_b_bits; in ocfs2_clusters_to_blocks() 747 unsigned int clusters; in ocfs2_clusters_for_bytes() local 750 /* OCFS2 just cannot have enough clusters to overflow this */ in ocfs2_clusters_for_bytes() 751 clusters = (unsigned int)(bytes >> cl_bits); in ocfs2_clusters_for_bytes() 753 return clusters; in ocfs2_clusters_for_bytes() 760 unsigned int clusters; in ocfs2_bytes_to_clusters() local 762 clusters = (unsigned int)(bytes >> cl_bits); in ocfs2_bytes_to_clusters() 763 return clusters; in ocfs2_bytes_to_clusters() 715 ocfs2_clusters_to_blocks(struct super_block *sb, u32 clusters) ocfs2_clusters_to_blocks() argument 773 ocfs2_clusters_to_bytes(struct super_block *sb, u32 clusters) ocfs2_clusters_to_bytes() argument 783 unsigned int clusters; ocfs2_block_to_cluster_start() local 793 unsigned int clusters; ocfs2_align_bytes_to_clusters() local 816 u32 clusters = pg_index; ocfs2_page_index_to_clusters() local 830 ocfs2_align_clusters_to_page_index(struct super_block *sb, u32 clusters) ocfs2_align_clusters_to_page_index() argument 864 ocfs2_clusters_to_megabytes(struct super_block *sb, unsigned int clusters) ocfs2_clusters_to_megabytes() argument [all...] |
H A D | resize.c | 170 static int update_backups(struct inode * inode, u32 clusters, char *data) in update_backups() argument 183 if (cluster >= clusters) in update_backups() 213 u32 clusters = 0; in ocfs2_update_super_and_backups() local 231 clusters = le32_to_cpu(super_di->i_clusters); in ocfs2_update_super_and_backups() 240 ret = update_backups(inode, clusters, super_bh->b_data); in ocfs2_update_super_and_backups() 253 * Extend the filesystem to the new number of clusters specified. This entry 389 else if (le16_to_cpu(gd->bg_bits) != input->clusters * cl_bpc) in ocfs2_check_new_group() 391 "input has %u clusters set\n", in ocfs2_check_new_group() 393 le16_to_cpu(gd->bg_bits), input->clusters); in ocfs2_check_new_group() 426 else if (total_clusters + input->clusters < total_cluster in ocfs2_verify_group_and_input() [all...] |
H A D | ocfs2_trace.h | 505 unsigned int e_cpos, unsigned int clusters), 506 TP_ARGS(owner, cpos, len, index, e_cpos, clusters), 513 __field(unsigned int, clusters) 521 __entry->clusters = clusters; 525 __entry->e_cpos, __entry->clusters) 530 unsigned int clusters, unsigned int depth), 531 TP_ARGS(ino, new_cpos, clusters, depth), 535 __field(unsigned int, clusters) 541 __entry->clusters [all...] |
H A D | file.c | 1033 * Call this even if we don't add any clusters to the tree. We in ocfs2_extend_no_holes() 1435 u32 cpos, phys_cpos, clusters, alloc_size; in ocfs2_allocate_unwritten_extents() local 1464 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len); in ocfs2_allocate_unwritten_extents() 1465 clusters -= cpos; in ocfs2_allocate_unwritten_extents() 1467 while (clusters) { in ocfs2_allocate_unwritten_extents() 1479 if (alloc_size > clusters) in ocfs2_allocate_unwritten_extents() 1480 alloc_size = clusters; in ocfs2_allocate_unwritten_extents() 1499 clusters -= alloc_size; in ocfs2_allocate_unwritten_extents() 1510 * Truncate a byte range, avoiding pages within partial clusters. This 1584 * partial clusters her in ocfs2_zero_partial_clusters() 2131 u32 cpos, clusters, extent_len, phys_cpos; ocfs2_check_range_for_refcount() local 2243 u32 clusters; ocfs2_prepare_inode_for_write() local [all...] |
H A D | refcounttree.h | 42 u32 clusters,
|
H A D | ocfs2_ioctl.h | 42 __u32 clusters; /* Total number of clusters in this group */ member 43 __u32 frees; /* Total free clusters in this group */ 150 __u32 ffs_min; /* Minimum free chunksize in clusters */ 155 __u32 iff_chunksize; /* chunksize in clusters(in) */ 210 claim new clusters
|
/kernel/linux/linux-5.10/arch/arm/common/ |
H A D | mcpm_entry.c | 36 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down() 37 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down() 50 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down() 51 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down() 66 mcpm_sync.clusters[cluster].cluster = state; in __mcpm_outbound_leave_critical() 67 sync_cache_w(&mcpm_sync.clusters[cluster].cluster); in __mcpm_outbound_leave_critical() 85 struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; in __mcpm_outbound_enter_critical() 137 sync_cache_r(&mcpm_sync.clusters[cluster].cluster); in __mcpm_cluster_state() 138 return mcpm_sync.clusters[cluster].cluster; in __mcpm_cluster_state() 436 mcpm_sync.clusters[ in mcpm_sync_init() [all...] |
/kernel/linux/linux-6.6/arch/arm/common/ |
H A D | mcpm_entry.c | 36 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down() 37 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down() 50 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down() 51 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down() 66 mcpm_sync.clusters[cluster].cluster = state; in __mcpm_outbound_leave_critical() 67 sync_cache_w(&mcpm_sync.clusters[cluster].cluster); in __mcpm_outbound_leave_critical() 85 struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; in __mcpm_outbound_enter_critical() 137 sync_cache_r(&mcpm_sync.clusters[cluster].cluster); in __mcpm_cluster_state() 138 return mcpm_sync.clusters[cluster].cluster; in __mcpm_cluster_state() 436 mcpm_sync.clusters[ in mcpm_sync_init() [all...] |
/kernel/linux/linux-5.10/drivers/cpufreq/ |
H A D | tegra186-cpufreq.c | 53 struct tegra186_cpufreq_cluster *clusters; member 62 struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; in tegra186_cpufreq_init() 113 struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; in tegra186_cpufreq_get() 237 data->clusters = devm_kcalloc(&pdev->dev, ARRAY_SIZE(tegra186_clusters), in tegra186_cpufreq_probe() 238 sizeof(*data->clusters), GFP_KERNEL); in tegra186_cpufreq_probe() 239 if (!data->clusters) in tegra186_cpufreq_probe() 255 struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; in tegra186_cpufreq_probe()
|
/kernel/linux/linux-6.6/drivers/cpufreq/ |
H A D | tegra186-cpufreq.c | 69 struct tegra186_cpufreq_cluster clusters[]; member 77 policy->freq_table = data->clusters[cluster].table; in tegra186_cpufreq_init() 112 cluster = &data->clusters[cluster_id]; in tegra186_cpufreq_get() 225 struct_size(data, clusters, TEGRA186_NUM_CLUSTERS), in tegra186_cpufreq_probe() 243 struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; in tegra186_cpufreq_probe()
|
/kernel/linux/linux-6.6/sound/soc/apple/ |
H A D | mca.c | 7 // The MCA peripheral is made up of a number of identical units called clusters. 11 // The clusters can operate independently, or can be combined together in a 14 // ports. The I2S ports can be routed to any of the clusters (irrespective 160 /* Mutex for accessing port_driver of foreign clusters */ 164 struct mca_cluster clusters[]; member 188 return &mca->clusters[cluster_no]; in mca_dai_to_cluster() 312 be_cl = &mca->clusters[i]; in mca_fe_clocks_in_use() 339 fe_cl = &mca->clusters[cl->port_driver]; in mca_be_prepare() 373 fe_cl = &mca->clusters[cl->port_driver]; in mca_be_hw_free() 991 struct mca_cluster *cl = &mca->clusters[ in apple_mca_release() 1012 struct mca_cluster *clusters; apple_mca_probe() local [all...] |
/kernel/linux/linux-5.10/fs/ntfs/ |
H A D | lcnalloc.c | 23 * ntfs_cluster_free_from_rl_nolock - free clusters from runlist 24 * @vol: mounted ntfs volume on which to free the clusters 25 * @rl: runlist describing the clusters to free 27 * Free all the clusters described by the runlist @rl on the volume @vol. In 28 * the case of an error being returned, at least some of the clusters were not 59 * ntfs_cluster_alloc - allocate clusters on an ntfs volume 60 * @vol: mounted ntfs volume on which to allocate the clusters 62 * @count: number of clusters to allocate 63 * @start_lcn: starting lcn at which to allocate the clusters (or -1 if none) 64 * @zone: zone from which to allocate the clusters 139 s64 clusters; ntfs_cluster_alloc() local [all...] |
/kernel/linux/linux-6.6/fs/ntfs/ |
H A D | lcnalloc.c | 23 * ntfs_cluster_free_from_rl_nolock - free clusters from runlist 24 * @vol: mounted ntfs volume on which to free the clusters 25 * @rl: runlist describing the clusters to free 27 * Free all the clusters described by the runlist @rl on the volume @vol. In 28 * the case of an error being returned, at least some of the clusters were not 59 * ntfs_cluster_alloc - allocate clusters on an ntfs volume 60 * @vol: mounted ntfs volume on which to allocate the clusters 62 * @count: number of clusters to allocate 63 * @start_lcn: starting lcn at which to allocate the clusters (or -1 if none) 64 * @zone: zone from which to allocate the clusters 139 s64 clusters; ntfs_cluster_alloc() local [all...] |
/kernel/linux/linux-5.10/arch/arm/include/asm/ |
H A D | mcpm.h | 13 * Maximum number of possible clusters / CPUs per cluster. 298 struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; member
|
/kernel/linux/linux-6.6/arch/arm/include/asm/ |
H A D | mcpm.h | 13 * Maximum number of possible clusters / CPUs per cluster. 298 struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; member
|
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/adreno/ |
H A D | a6xx_gpu_state.c | 28 struct a6xx_gpu_state_obj *clusters; member 525 /* Some clusters need a selector register to be programmed too */ in a6xx_get_cluster() 569 a6xx_state->clusters = state_kcalloc(a6xx_state, in a6xx_get_clusters() 570 ARRAY_SIZE(a6xx_clusters), sizeof(*a6xx_state->clusters)); in a6xx_get_clusters() 572 if (!a6xx_state->clusters) in a6xx_get_clusters() 579 &a6xx_state->clusters[i], dumper); in a6xx_get_clusters() 1198 drm_puts(p, "clusters:\n"); in a6xx_show() 1200 a6xx_show_cluster(&a6xx_state->clusters[i], p); in a6xx_show()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/msm/adreno/ |
H A D | a6xx_gpu_state.c | 28 struct a6xx_gpu_state_obj *clusters; member 555 /* Some clusters need a selector register to be programmed too */ in a6xx_get_cluster() 599 a6xx_state->clusters = state_kcalloc(a6xx_state, in a6xx_get_clusters() 600 ARRAY_SIZE(a6xx_clusters), sizeof(*a6xx_state->clusters)); in a6xx_get_clusters() 602 if (!a6xx_state->clusters) in a6xx_get_clusters() 609 &a6xx_state->clusters[i], dumper); in a6xx_get_clusters() 1392 drm_puts(p, "clusters:\n"); in a6xx_show() 1394 a6xx_show_cluster(&a6xx_state->clusters[i], p); in a6xx_show()
|
/kernel/linux/linux-5.10/drivers/perf/ |
H A D | qcom_l2_pmu.c | 117 struct list_head clusters; member 121 * The cache is made up of one or more clusters, each cluster has its own PMU. 755 list_for_each_entry(cluster, &l2cache_pmu->clusters, next) { in l2_cache_associate_cpu_with_cluster() 866 list_add(&cluster->next, &l2cache_pmu->clusters); in l2_cache_pmu_probe_cluster() 907 INIT_LIST_HEAD(&l2cache_pmu->clusters); in l2_cache_pmu_probe()
|
/kernel/linux/linux-6.6/drivers/perf/ |
H A D | qcom_l2_pmu.c | 117 struct list_head clusters; member 121 * The cache is made up of one or more clusters, each cluster has its own PMU. 752 list_for_each_entry(cluster, &l2cache_pmu->clusters, next) { in l2_cache_associate_cpu_with_cluster() 885 list_add(&cluster->next, &l2cache_pmu->clusters); in l2_cache_pmu_probe_cluster() 901 INIT_LIST_HEAD(&l2cache_pmu->clusters); in l2_cache_pmu_probe()
|