Lines Matching refs:hldev
130 if (vpath->hldev->first_vp_id != vpath->vp_id)
334 * @hldev: HW device handle.
337 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
346 hldev->config.intr_mode = intr_mode;
352 * @hldev: HW device handle.
359 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
365 vxge_hw_device_mask_all(hldev);
369 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
373 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
376 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
377 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
378 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
381 writeq(val64, &hldev->common_reg->tim_int_status0);
383 writeq(~val64, &hldev->common_reg->tim_int_mask0);
386 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
387 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
391 &hldev->common_reg->tim_int_status1);
394 &hldev->common_reg->tim_int_mask1);
398 val64 = readq(&hldev->common_reg->titan_general_int_status);
400 vxge_hw_device_unmask_all(hldev);
405 * @hldev: HW device handle.
411 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
415 vxge_hw_device_mask_all(hldev);
418 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
420 &hldev->common_reg->tim_int_mask1);
424 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
428 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
434 * @hldev: HW device handle.
440 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
448 &hldev->common_reg->titan_mask_all_int);
453 * @hldev: HW device handle.
459 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
463 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
467 &hldev->common_reg->titan_mask_all_int);
472 * @hldev: HW device handle.
478 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
480 readl(&hldev->common_reg->titan_general_int_status);
485 * @hldev: HW device
492 __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
524 if (hldev->uld_callbacks->crit_err)
525 hldev->uld_callbacks->crit_err(hldev,
534 * @hldev: HW device handle.
540 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
545 if (hldev->link_state == VXGE_HW_LINK_DOWN)
548 hldev->link_state = VXGE_HW_LINK_DOWN;
551 if (hldev->uld_callbacks->link_down)
552 hldev->uld_callbacks->link_down(hldev);
559 * @hldev: HW device handle.
565 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
570 if (hldev->link_state == VXGE_HW_LINK_UP)
573 hldev->link_state = VXGE_HW_LINK_UP;
576 if (hldev->uld_callbacks->link_up)
577 hldev->uld_callbacks->link_up(hldev);
597 struct __vxge_hw_device *hldev = NULL;
609 hldev = vpath->hldev;
657 __vxge_hw_device_handle_link_down_ind(hldev);
678 __vxge_hw_device_handle_link_up_ind(hldev);
843 hldev->stats.sw_dev_err_stats.vpath_alarms++;
849 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
862 * @hldev: HW device handle.
879 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
888 val64 = readq(&hldev->common_reg->titan_general_int_status);
899 adapter_status = readq(&hldev->common_reg->adapter_status);
903 __vxge_hw_device_handle_error(hldev,
911 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
915 vpath_mask = hldev->vpaths_deployed >>
920 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
925 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
932 hldev->stats.sw_dev_err_stats.vpath_alarms++;
936 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
940 &hldev->virtual_paths[i], skip_alarms);
958 * @hldev: HW device.
965 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
968 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
969 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
970 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
971 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
972 &hldev->common_reg->tim_int_status0);
975 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
976 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
978 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
979 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
980 &hldev->common_reg->tim_int_status1);
1979 if (!(vpath->hldev->access_rights &
2182 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2185 if (vpath->hldev->config.intr_mode ==
2214 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2217 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2234 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2236 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)
2239 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2243 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2261 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2264 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2280 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2285 val64 = readq(&hldev->common_reg->tim_int_mask0);
2291 &hldev->common_reg->tim_int_mask0);
2294 val64 = readl(&hldev->common_reg->tim_int_mask1);
2301 &hldev->common_reg->tim_int_mask1);
2318 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2323 val64 = readq(&hldev->common_reg->tim_int_mask0);
2329 &hldev->common_reg->tim_int_mask0);
2337 &hldev->common_reg->tim_int_mask1);