Lines Matching refs:prop

2138 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2149 if (!(prop->tpc_enabled_mask & BIT(tpc_seq)))
2163 if (!(prop->tpc_enabled_mask & BIT(TPC_ID_DCORE0_TPC6)))
2183 struct asic_fixed_properties *prop = &hdev->asic_prop;
2189 prop->num_functional_hbms = GAUDI2_HBM_NUM;
2210 prop->num_functional_hbms = GAUDI2_HBM_NUM - faulty_hbms;
2216 struct asic_fixed_properties *prop = &hdev->asic_prop;
2229 basic_hbm_page_size = prop->num_functional_hbms * SZ_8M;
2230 prop->dram_page_size = GAUDI2_COMPENSATE_TLB_PAGE_SIZE_FACTOR * basic_hbm_page_size;
2231 prop->device_mem_alloc_default_page_size = prop->dram_page_size;
2232 prop->dram_size = prop->num_functional_hbms * SZ_16G;
2233 prop->dram_base_address = DRAM_PHYS_BASE;
2234 prop->dram_end_address = prop->dram_base_address + prop->dram_size;
2235 prop->dram_supports_virtual_memory = true;
2237 prop->dram_user_base_address = DRAM_PHYS_BASE + prop->dram_page_size;
2238 prop->dram_hints_align_mask = ~GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK;
2239 prop->hints_dram_reserved_va_range.start_addr = RESERVED_VA_RANGE_FOR_ARC_ON_HBM_START;
2240 prop->hints_dram_reserved_va_range.end_addr = RESERVED_VA_RANGE_FOR_ARC_ON_HBM_END;
2259 prop->dmmu.start_addr = prop->dram_base_address +
2260 (prop->dram_page_size *
2261 DIV_ROUND_UP_SECTOR_T(prop->dram_size, prop->dram_page_size));
2263 prop->dmmu.end_addr = prop->dmmu.start_addr + prop->dram_page_size *
2264 div_u64((VA_HBM_SPACE_END - prop->dmmu.start_addr), prop->dmmu.page_size);
2271 struct asic_fixed_properties *prop = &hdev->asic_prop;
2276 prop->max_queues = GAUDI2_QUEUE_ID_SIZE;
2277 prop->hw_queues_props = kcalloc(prop->max_queues, sizeof(struct hw_queue_properties),
2280 if (!prop->hw_queues_props)
2283 q_props = prop->hw_queues_props;
2303 prop->cache_line_size = DEVICE_CACHE_LINE_SIZE;
2304 prop->cfg_base_address = CFG_BASE;
2305 prop->device_dma_offset_for_host_access = HOST_PHYS_BASE_0;
2306 prop->host_base_address = HOST_PHYS_BASE_0;
2307 prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE_0;
2308 prop->max_pending_cs = GAUDI2_MAX_PENDING_CS;
2309 prop->completion_queues_count = GAUDI2_RESERVED_CQ_NUMBER;
2310 prop->user_dec_intr_count = NUMBER_OF_DEC;
2311 prop->user_interrupt_count = GAUDI2_IRQ_NUM_USER_LAST - GAUDI2_IRQ_NUM_USER_FIRST + 1;
2312 prop->completion_mode = HL_COMPLETION_MODE_CS;
2313 prop->sync_stream_first_sob = GAUDI2_RESERVED_SOB_NUMBER;
2314 prop->sync_stream_first_mon = GAUDI2_RESERVED_MON_NUMBER;
2316 prop->sram_base_address = SRAM_BASE_ADDR;
2317 prop->sram_size = SRAM_SIZE;
2318 prop->sram_end_address = prop->sram_base_address + prop->sram_size;
2319 prop->sram_user_base_address = prop->sram_base_address + SRAM_USER_BASE_OFFSET;
2321 prop->hints_range_reservation = true;
2323 prop->rotator_enabled_mask = BIT(NUM_OF_ROT) - 1;
2326 prop->mmu_pgt_size = 0x800000; /* 8MB */
2328 prop->mmu_pgt_size = MMU_PAGE_TABLES_INITIAL_SIZE;
2330 prop->mmu_pte_size = HL_PTE_SIZE;
2331 prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
2332 prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
2334 prop->dmmu.hop_shifts[MMU_HOP0] = DHOP0_SHIFT;
2335 prop->dmmu.hop_shifts[MMU_HOP1] = DHOP1_SHIFT;
2336 prop->dmmu.hop_shifts[MMU_HOP2] = DHOP2_SHIFT;
2337 prop->dmmu.hop_shifts[MMU_HOP3] = DHOP3_SHIFT;
2338 prop->dmmu.hop_shifts[MMU_HOP4] = DHOP4_SHIFT;
2339 prop->dmmu.hop_masks[MMU_HOP0] = DHOP0_MASK;
2340 prop->dmmu.hop_masks[MMU_HOP1] = DHOP1_MASK;
2341 prop->dmmu.hop_masks[MMU_HOP2] = DHOP2_MASK;
2342 prop->dmmu.hop_masks[MMU_HOP3] = DHOP3_MASK;
2343 prop->dmmu.hop_masks[MMU_HOP4] = DHOP4_MASK;
2344 prop->dmmu.page_size = PAGE_SIZE_1GB;
2345 prop->dmmu.num_hops = MMU_ARCH_6_HOPS;
2346 prop->dmmu.last_mask = LAST_MASK;
2347 prop->dmmu.host_resident = 1;
2348 prop->dmmu.hop_table_size = prop->mmu_hop_table_size;
2349 prop->dmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
2357 prop->dram_size = (GAUDI2_HBM_NUM - 1) * SZ_16G;
2360 prop->pmmu.host_resident = 1;
2361 prop->pmmu.num_hops = MMU_ARCH_6_HOPS;
2362 prop->pmmu.last_mask = LAST_MASK;
2363 prop->pmmu.hop_table_size = prop->mmu_hop_table_size;
2364 prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
2366 prop->hints_host_reserved_va_range.start_addr = RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START;
2367 prop->hints_host_reserved_va_range.end_addr = RESERVED_VA_RANGE_FOR_ARC_ON_HOST_END;
2368 prop->hints_host_hpage_reserved_va_range.start_addr =
2370 prop->hints_host_hpage_reserved_va_range.end_addr =
2374 prop->pmmu.hop_shifts[MMU_HOP0] = HOP0_SHIFT_64K;
2375 prop->pmmu.hop_shifts[MMU_HOP1] = HOP1_SHIFT_64K;
2376 prop->pmmu.hop_shifts[MMU_HOP2] = HOP2_SHIFT_64K;
2377 prop->pmmu.hop_shifts[MMU_HOP3] = HOP3_SHIFT_64K;
2378 prop->pmmu.hop_shifts[MMU_HOP4] = HOP4_SHIFT_64K;
2379 prop->pmmu.hop_shifts[MMU_HOP5] = HOP5_SHIFT_64K;
2380 prop->pmmu.hop_masks[MMU_HOP0] = HOP0_MASK_64K;
2381 prop->pmmu.hop_masks[MMU_HOP1] = HOP1_MASK_64K;
2382 prop->pmmu.hop_masks[MMU_HOP2] = HOP2_MASK_64K;
2383 prop->pmmu.hop_masks[MMU_HOP3] = HOP3_MASK_64K;
2384 prop->pmmu.hop_masks[MMU_HOP4] = HOP4_MASK_64K;
2385 prop->pmmu.hop_masks[MMU_HOP5] = HOP5_MASK_64K;
2386 prop->pmmu.start_addr = VA_HOST_SPACE_PAGE_START;
2387 prop->pmmu.end_addr = VA_HOST_SPACE_PAGE_END;
2388 prop->pmmu.page_size = PAGE_SIZE_64KB;
2391 memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
2392 prop->pmmu_huge.page_size = PAGE_SIZE_16MB;
2393 prop->pmmu_huge.start_addr = VA_HOST_SPACE_HPAGE_START;
2394 prop->pmmu_huge.end_addr = VA_HOST_SPACE_HPAGE_END;
2396 prop->pmmu.hop_shifts[MMU_HOP0] = HOP0_SHIFT_4K;
2397 prop->pmmu.hop_shifts[MMU_HOP1] = HOP1_SHIFT_4K;
2398 prop->pmmu.hop_shifts[MMU_HOP2] = HOP2_SHIFT_4K;
2399 prop->pmmu.hop_shifts[MMU_HOP3] = HOP3_SHIFT_4K;
2400 prop->pmmu.hop_shifts[MMU_HOP4] = HOP4_SHIFT_4K;
2401 prop->pmmu.hop_shifts[MMU_HOP5] = HOP5_SHIFT_4K;
2402 prop->pmmu.hop_masks[MMU_HOP0] = HOP0_MASK_4K;
2403 prop->pmmu.hop_masks[MMU_HOP1] = HOP1_MASK_4K;
2404 prop->pmmu.hop_masks[MMU_HOP2] = HOP2_MASK_4K;
2405 prop->pmmu.hop_masks[MMU_HOP3] = HOP3_MASK_4K;
2406 prop->pmmu.hop_masks[MMU_HOP4] = HOP4_MASK_4K;
2407 prop->pmmu.hop_masks[MMU_HOP5] = HOP5_MASK_4K;
2408 prop->pmmu.start_addr = VA_HOST_SPACE_PAGE_START;
2409 prop->pmmu.end_addr = VA_HOST_SPACE_PAGE_END;
2410 prop->pmmu.page_size = PAGE_SIZE_4KB;
2413 memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
2414 prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
2415 prop->pmmu_huge.start_addr = VA_HOST_SPACE_HPAGE_START;
2416 prop->pmmu_huge.end_addr = VA_HOST_SPACE_HPAGE_END;
2419 prop->max_num_of_engines = GAUDI2_ENGINE_ID_SIZE;
2420 prop->num_engine_cores = CPU_ID_MAX;
2421 prop->cfg_size = CFG_SIZE;
2422 prop->max_asid = MAX_ASID;
2423 prop->num_of_events = GAUDI2_EVENT_SIZE;
2425 prop->supports_engine_modes = true;
2427 prop->dc_power_default = DC_POWER_DEFAULT;
2429 prop->cb_pool_cb_cnt = GAUDI2_CB_POOL_CB_CNT;
2430 prop->cb_pool_cb_size = GAUDI2_CB_POOL_CB_SIZE;
2431 prop->pcie_dbi_base_address = CFG_BASE + mmPCIE_DBI_BASE;
2432 prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
2434 strncpy(prop->cpucp_info.card_name, GAUDI2_DEFAULT_CARD_NAME, CARD_NAME_MAX_LEN);
2436 prop->mme_master_slave_mode = 1;
2438 prop->first_available_user_sob[0] = GAUDI2_RESERVED_SOB_NUMBER +
2441 prop->first_available_user_mon[0] = GAUDI2_RESERVED_MON_NUMBER +
2444 prop->first_available_user_interrupt = GAUDI2_IRQ_NUM_USER_FIRST;
2445 prop->tpc_interrupt_id = GAUDI2_IRQ_NUM_TPC_ASSERT;
2446 prop->eq_interrupt_id = GAUDI2_IRQ_NUM_EVENT_QUEUE;
2448 prop->first_available_cq[0] = GAUDI2_RESERVED_CQ_NUMBER;
2450 prop->fw_cpu_boot_dev_sts0_valid = false;
2451 prop->fw_cpu_boot_dev_sts1_valid = false;
2452 prop->hard_reset_done_by_fw = false;
2453 prop->gic_interrupts_enable = true;
2455 prop->server_type = HL_SERVER_TYPE_UNKNOWN;
2457 prop->max_dec = NUMBER_OF_DEC;
2459 prop->clk_pll_index = HL_GAUDI2_MME_PLL;
2461 prop->dma_mask = 64;
2463 prop->hbw_flush_reg = mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0;
2583 struct asic_fixed_properties *prop = &hdev->asic_prop;
2596 prop->tpc_binning_mask = hdev->tpc_binning;
2597 prop->tpc_enabled_mask = GAUDI2_TPC_FULL_MASK;
2604 struct asic_fixed_properties *prop = &hdev->asic_prop;
2605 struct hw_queue_properties *q_props = prop->hw_queues_props;
2614 tpc_binning_mask = prop->tpc_binning_mask;
2646 clear_bit(subst_seq, (unsigned long *)&prop->tpc_enabled_mask);
2662 struct asic_fixed_properties *prop = &hdev->asic_prop;
2677 prop->decoder_binning_mask = (hdev->decoder_binning & GAUDI2_DECODER_FULL_MASK);
2679 if (prop->decoder_binning_mask)
2680 prop->decoder_enabled_mask = (GAUDI2_DECODER_FULL_MASK & ~BIT(DEC_ID_PCIE_VDEC1));
2682 prop->decoder_enabled_mask = GAUDI2_DECODER_FULL_MASK;
2689 struct asic_fixed_properties *prop = &hdev->asic_prop;
2693 prop->dram_binning_mask = 0;
2694 prop->dram_enabled_mask = GAUDI2_DRAM_FULL_MASK;
2699 prop->faulty_dram_cluster_map |= hdev->dram_binning;
2700 prop->dram_binning_mask = hdev->dram_binning;
2701 prop->dram_enabled_mask = GAUDI2_DRAM_FULL_MASK & ~BIT(HBM_ID5);
2706 struct asic_fixed_properties *prop = &hdev->asic_prop;
2724 prop->edma_binning_mask = 0;
2725 prop->edma_enabled_mask = GAUDI2_EDMA_FULL_MASK;
2732 prop->faulty_dram_cluster_map |= BIT(edma_to_hbm_cluster[seq]);
2733 prop->edma_binning_mask = hdev->edma_binning;
2734 prop->edma_enabled_mask = GAUDI2_EDMA_FULL_MASK & ~BIT(EDMA_ID_DCORE3_INSTANCE1);
2737 q_props = prop->hw_queues_props;
2748 struct asic_fixed_properties *prop = &hdev->asic_prop;
2753 prop->xbar_edge_enabled_mask = GAUDI2_XBAR_EDGE_FULL_MASK;
2776 prop->faulty_dram_cluster_map |= BIT(xbar_edge_to_hbm_cluster[seq]);
2777 prop->xbar_edge_enabled_mask = (~xbar_edge_iso_mask) & GAUDI2_XBAR_EDGE_FULL_MASK;
2812 struct asic_fixed_properties *prop = &hdev->asic_prop;
2815 rc = gaudi2_set_cluster_binning_masks_common(hdev, prop->cpucp_info.xbar_binning_mask);
2820 if (prop->faulty_dram_cluster_map) {
2821 u8 cluster_seq = __ffs((unsigned long)prop->faulty_dram_cluster_map);
2823 prop->hmmu_hif_enabled_mask = cluster_hmmu_hif_enabled_mask[cluster_seq];
2851 struct asic_fixed_properties *prop = &hdev->asic_prop;
2870 dram_size = le64_to_cpu(prop->cpucp_info.dram_size);
2878 dram_size, prop->dram_size);
2879 dram_size = prop->dram_size;
2882 prop->dram_size = dram_size;
2883 prop->dram_end_address = prop->dram_base_address + dram_size;
2886 if (!strlen(prop->cpucp_info.card_name))
2887 strncpy(prop->cpucp_info.card_name, GAUDI2_DEFAULT_CARD_NAME, CARD_NAME_MAX_LEN);
2890 hdev->dram_binning = prop->cpucp_info.dram_binning_mask;
2891 hdev->edma_binning = prop->cpucp_info.edma_binning_mask;
2892 hdev->tpc_binning = le64_to_cpu(prop->cpucp_info.tpc_binning_mask);
2893 hdev->decoder_binning = lower_32_bits(le64_to_cpu(prop->cpucp_info.decoder_binning_mask));
2915 prop->max_power_default = (u64) max_power;
2940 struct asic_fixed_properties *prop = &hdev->asic_prop;
2967 prop->dram_pci_bar_size = pci_resource_len(pdev, DRAM_BAR_ID);
3318 struct asic_fixed_properties *prop = &hdev->asic_prop;
3344 region->bar_size = prop->dram_pci_bar_size;
3351 struct asic_fixed_properties *prop = &hdev->asic_prop;
3383 for (i = GAUDI2_IRQ_NUM_USER_FIRST, k = 0 ; k < prop->user_interrupt_count; i++, j++, k++)
3396 struct asic_fixed_properties *prop = &hdev->asic_prop;
3398 &prop->skip_special_blocks_cfg;
3400 kfree(prop->special_blocks);
3419 struct asic_fixed_properties *prop = &hdev->asic_prop;
3423 prop->glbl_err_cause_num = GAUDI2_NUM_OF_GLBL_ERR_CAUSE;
3424 prop->num_of_special_blocks = ARRAY_SIZE(gaudi2_special_blocks);
3425 prop->special_blocks = kmalloc_array(prop->num_of_special_blocks,
3426 sizeof(*prop->special_blocks), GFP_KERNEL);
3427 if (!prop->special_blocks)
3430 for (i = 0 ; i < prop->num_of_special_blocks ; i++)
3431 memcpy(&prop->special_blocks[i], &gaudi2_special_blocks[i],
3432 sizeof(*prop->special_blocks));
3435 memset(&prop->skip_special_blocks_cfg, 0, sizeof(prop->skip_special_blocks_cfg));
3436 prop->skip_special_blocks_cfg.skip_block_hook = gaudi2_special_block_skip;
3439 prop->skip_special_blocks_cfg.block_types =
3442 if (!prop->skip_special_blocks_cfg.block_types) {
3447 memcpy(prop->skip_special_blocks_cfg.block_types, gaudi2_iterator_skip_block_types,
3450 prop->skip_special_blocks_cfg.block_types_len =
3455 prop->skip_special_blocks_cfg.block_ranges =
3458 if (!prop->skip_special_blocks_cfg.block_ranges) {
3464 memcpy(&prop->skip_special_blocks_cfg.block_ranges[i],
3468 prop->skip_special_blocks_cfg.block_ranges_len =
3475 kfree(prop->skip_special_blocks_cfg.block_types);
3477 kfree(prop->special_blocks);
3531 struct asic_fixed_properties *prop = &hdev->asic_prop;
3592 gaudi2->virt_msix_db_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, prop->pmmu.page_size,
3620 prop->supports_compute_reset = true;
3646 hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr);
3661 struct asic_fixed_properties *prop = &hdev->asic_prop;
3668 hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr);
4151 struct asic_fixed_properties *prop = &hdev->asic_prop;
4207 for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, user_irq_init_cnt = 0;
4208 user_irq_init_cnt < prop->user_interrupt_count;
4227 for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count;
4285 struct asic_fixed_properties *prop = &hdev->asic_prop;
4306 for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, k = 0;
4773 struct asic_fixed_properties *prop = &hdev->asic_prop;
4829 if (prop->fw_cpu_boot_dev_sts0_valid)
4830 prop->fw_app_cpu_boot_dev_sts0 = RREG32(mmCPU_BOOT_DEV_STS0);
4832 if (prop->fw_cpu_boot_dev_sts1_valid)
4833 prop->fw_app_cpu_boot_dev_sts1 = RREG32(mmCPU_BOOT_DEV_STS1);
5094 struct asic_fixed_properties *prop = &hdev->asic_prop;
5105 if (!(prop->edma_enabled_mask & BIT(seq)))
5199 struct asic_fixed_properties *prop = &hdev->asic_prop;
5203 if (!(prop->decoder_enabled_mask & BIT(decoder_id)))
5741 struct asic_fixed_properties *prop = &hdev->asic_prop;
5743 u32 asid, max_asid = prop->max_asid;
5860 struct asic_fixed_properties *prop = &hdev->asic_prop;
5873 if ((gaudi2->hw_cap_initialized & hw_cap) || !(prop->hmmu_hif_enabled_mask & BIT(dmmu_seq)))
7017 struct asic_fixed_properties *prop = &hdev->asic_prop;
7033 if (!(prop->edma_enabled_mask & BIT(seq)))
7228 struct asic_fixed_properties *prop = &hdev->asic_prop;
7243 if (e && prop->tpc_enabled_mask)
7256 struct asic_fixed_properties *prop = &hdev->asic_prop;
7266 if (e && (prop->decoder_enabled_mask & (~PCIE_DEC_EN_MASK)))
7274 if (!(prop->decoder_enabled_mask & dec_enabled_bit))
7294 if (e && (prop->decoder_enabled_mask & PCIE_DEC_EN_MASK))
7302 if (!(prop->decoder_enabled_mask & BIT(dec_enabled_bit)))
7451 struct asic_fixed_properties *prop = &hdev->asic_prop;
7458 if (prop->edma_enabled_mask & BIT(edma_seq_base)) {
7465 if (prop->edma_enabled_mask & BIT(edma_seq_base + 1)) {
7511 if (prop->decoder_enabled_mask & BIT(dcore_id * NUM_OF_DEC_PER_DCORE + vdec_id))
7612 struct asic_fixed_properties *prop = &hdev->asic_prop;
7640 if (prop->decoder_enabled_mask & BIT(NUM_OF_DCORES * NUM_OF_DEC_PER_DCORE + 0))
7643 if (prop->decoder_enabled_mask & BIT(NUM_OF_DCORES * NUM_OF_DEC_PER_DCORE + 1))
8097 struct asic_fixed_properties *prop = &hdev->asic_prop;
8102 if (prop->tpc_enabled_mask & BIT(mod_idx))
8114 if (prop->edma_enabled_mask & BIT(mod_idx))
8129 if (prop->decoder_enabled_mask & BIT(mod_idx))
9967 struct asic_fixed_properties *prop = &hdev->asic_prop;
9972 if (prop->edma_enabled_mask == 0) {
10006 if (!(prop->edma_enabled_mask & BIT(edma_bit)))
10029 if (!(prop->edma_enabled_mask & BIT(edma_bit)))
10061 if (!(prop->edma_enabled_mask & BIT(edma_bit)))
10082 struct asic_fixed_properties *prop = &hdev->asic_prop;
10083 u64 size = prop->dram_end_address - prop->dram_user_base_address;
10085 rc = gaudi2_memset_device_memory(hdev, prop->dram_user_base_address, size, val);
10089 prop->dram_user_base_address, size);
10096 struct asic_fixed_properties *prop = &hdev->asic_prop;
10104 addr = prop->sram_user_base_address;
10105 size = hdev->pldm ? 0x10000 : (prop->sram_size - SRAM_USER_BASE_OFFSET);
10496 struct asic_fixed_properties *prop = &hdev->asic_prop;
10501 gaudi2->virt_msix_db_dma_addr, prop->pmmu.page_size, true);
10512 struct asic_fixed_properties *prop = &hdev->asic_prop;
10516 prop->pmmu.page_size, true);
10684 static u32 gaudi2_gen_wait_cb(struct hl_device *hdev, struct hl_gen_wait_properties *prop)
10686 struct hl_cb *cb = prop->data;
10690 u32 stream_index, size = prop->size;
10693 stream_index = prop->q_idx % 4;
10694 fence_addr = CFG_BASE + gaudi2_qm_blocks_bases[prop->q_idx] +
10704 msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + prop->mon_id * 4) -
10710 msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + prop->mon_id * 4) -
10719 msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + prop->mon_id * 4) -
10725 msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_0 + prop->mon_id * 4) - monitor_base;
10727 size += gaudi2_add_arm_monitor_pkt(hdev, buf + size, prop->sob_base, prop->sob_mask,
10728 prop->sob_val, msg_addr_offset);
10790 struct asic_fixed_properties *prop = &hdev->asic_prop;
10798 divisor = prop->num_functional_hbms * GAUDI2_HBM_MMU_SCRM_MEM_SIZE;
10810 struct asic_fixed_properties *prop = &hdev->asic_prop;
10818 divisor = prop->num_functional_hbms * GAUDI2_HBM_MMU_SCRM_MEM_SIZE;
11101 struct asic_fixed_properties *prop = &hdev->asic_prop;
11112 if ((page_size % prop->dram_page_size) || (prop->dram_page_size > mmu_prop->page_size))
11123 *real_page_size = prop->dram_page_size;