Lines Matching defs:hdev

26  * @hdev: pointer to hl_device structure
32 static int hl_get_pb_block(struct hl_device *hdev, u32 mm_reg_addr,
46 dev_err(hdev->dev, "No protection domain was found for 0x%x\n",
54 * @hdev: pointer to hl_device structure
59 static int hl_unset_pb_in_block(struct hl_device *hdev, u32 reg_offset,
63 dev_err(hdev->dev,
79 * @hdev: pointer to hl_device structure
87 int hl_unsecure_register(struct hl_device *hdev, u32 mm_reg_addr, int offset,
94 block_num = hl_get_pb_block(hdev, mm_reg_addr + offset, pb_blocks,
101 return hl_unset_pb_in_block(hdev, reg_offset, &sgs_array[block_num]);
108 * @hdev: pointer to hl_device structure
116 static int hl_unsecure_register_range(struct hl_device *hdev,
124 block_num = hl_get_pb_block(hdev,
132 rc |= hl_unset_pb_in_block(hdev, reg_offset,
143 * @hdev: pointer to hl_device structure
152 int hl_unsecure_registers(struct hl_device *hdev, const u32 mm_reg_array[],
159 rc = hl_unsecure_register(hdev, mm_reg_array[i], offset,
173 * @hdev: pointer to hl_device structure
182 static int hl_unsecure_registers_range(struct hl_device *hdev,
190 rc = hl_unsecure_register_range(hdev, mm_reg_range_array[i],
203 * @hdev: pointer to hl_device structure
209 static void hl_ack_pb_security_violations(struct hl_device *hdev,
220 hdev->asic_funcs->pb_print_security_errors(hdev,
230 * @hdev: pointer to hl_device structure
237 void hl_config_glbl_sec(struct hl_device *hdev, const u32 pb_blocks[],
244 if (hdev->pldm)
260 * @hdev: pointer to hl_device structure
265 void hl_secure_block(struct hl_device *hdev,
279 * @hdev: pointer to hl_device structure
291 int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
305 hl_secure_block(hdev, glbl_sec, blocks_array_size);
306 hl_unsecure_registers(hdev, user_regs_array, user_regs_array_size, 0,
317 hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
331 * @hdev: pointer to hl_device structure
343 int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
348 return hl_init_pb_with_mask(hdev, num_dcores, dcore_offset,
359 * @hdev: pointer to hl_device structure
371 int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
386 hl_secure_block(hdev, glbl_sec, blocks_array_size);
387 rc = hl_unsecure_registers_range(hdev, user_regs_range_array,
401 hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
417 * @hdev: pointer to hl_device structure
429 int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
435 return hl_init_pb_ranges_with_mask(hdev, num_dcores, dcore_offset,
445 * @hdev: pointer to hl_device structure
455 int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
469 hl_secure_block(hdev, glbl_sec, blocks_array_size);
470 rc = hl_unsecure_registers(hdev, user_regs_array, user_regs_array_size,
477 hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
493 * @hdev: pointer to hl_device structure
503 int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
517 hl_secure_block(hdev, glbl_sec, blocks_array_size);
518 hl_unsecure_registers_range(hdev, user_regs_range_array,
524 hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
536 * @hdev: pointer to hl_device structure
547 void hl_ack_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
561 hl_ack_pb_security_violations(hdev, pb_blocks,
571 * @hdev: pointer to hl_device structure
581 void hl_ack_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
585 hl_ack_pb_with_mask(hdev, num_dcores, dcore_offset, num_instances,
594 * @hdev: pointer to hl_device structure
602 void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
610 hl_ack_pb_security_violations(hdev, pb_blocks,
616 static u32 hl_automated_get_block_base_addr(struct hl_device *hdev,
624 struct asic_fixed_properties *prop = &hdev->asic_prop;
645 static bool hl_check_block_range_exclusion(struct hl_device *hdev,
653 block_base_addr = hl_automated_get_block_base_addr(hdev, block_info,
671 static int hl_read_glbl_errors(struct hl_device *hdev,
674 struct hl_special_block_info *special_blocks = hdev->asic_prop.special_blocks;
677 base = current_block->base_addr - lower_32_bits(hdev->asic_prop.cfg_base_address);
692 for (i = 0 ; i < hdev->asic_prop.glbl_err_cause_num ; i++) {
694 dev_err_ratelimited(hdev->dev,
697 hdev->asic_prop.cfg_base_address + block_base +
706 void hl_check_for_glbl_errors(struct hl_device *hdev)
708 struct asic_fixed_properties *prop = &hdev->asic_prop;
719 rc = hl_iterate_special_blocks(hdev, &glbl_err_iter);
721 dev_err_ratelimited(hdev->dev,
725 int hl_iterate_special_blocks(struct hl_device *hdev, struct iterate_special_ctx *ctx)
735 block_info_arr = hdev->asic_prop.special_blocks;
739 num_blocks = hdev->asic_prop.num_of_special_blocks;
750 if ((hl_check_block_range_exclusion(hdev,
754 skip_blocks_cfg->skip_block_hook(hdev,
761 rc = ctx->fn(hdev, blk_idx, major, minor,