Lines Matching defs:hldev

47 int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
54 vpath = &hldev->virtual_paths[vp_id];
97 * function (hldev) have been sent to the host.
99 void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
104 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
107 total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
222 vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
229 vpath = &hldev->virtual_paths[hldev->first_vp_id];
246 enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
253 vpath = &hldev->virtual_paths[hldev->first_vp_id];
277 vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
284 vpath = &hldev->virtual_paths[hldev->first_vp_id];
373 vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
381 vpath = &hldev->virtual_paths[hldev->first_vp_id];
484 static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
489 pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
491 pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
493 pci_save_state(hldev->pdev);
596 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
602 hldev->legacy_reg = hldev->bar0;
604 hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
605 if (hldev->toc_reg == NULL) {
610 val64 = readq(&hldev->toc_reg->toc_common_pointer);
611 hldev->common_reg = hldev->bar0 + val64;
613 val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
614 hldev->mrpcim_reg = hldev->bar0 + val64;
617 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
618 hldev->srpcim_reg[i] = hldev->bar0 + val64;
622 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
623 hldev->vpmgmt_reg[i] = hldev->bar0 + val64;
627 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
628 hldev->vpath_reg[i] = hldev->bar0 + val64;
631 val64 = readq(&hldev->toc_reg->toc_kdfc);
635 hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ;
642 (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
718 static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
723 val64 = readq(&hldev->common_reg->host_type_assignments);
725 hldev->host_type =
728 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
731 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
734 hldev->func_id =
735 __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
737 hldev->access_rights = __vxge_hw_device_access_rights_get(
738 hldev->host_type, hldev->func_id);
740 hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
741 hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
743 hldev->first_vp_id = i;
753 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
755 struct pci_dev *dev = hldev->pdev;
783 __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
787 if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
788 hldev->func_id)) {
790 status = __vxge_hw_verify_pci_e_info(hldev);
1099 struct __vxge_hw_device *hldev;
1105 hldev = blockpool->hldev;
1108 dma_unmap_single(&hldev->pdev->dev,
1113 vxge_os_dma_free(hldev->pdev,
1134 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
1152 blockpool->hldev = hldev;
1174 hldev->pdev,
1184 dma_addr = dma_map_single(&hldev->pdev->dev, memblock,
1187 if (unlikely(dma_mapping_error(&hldev->pdev->dev, dma_addr))) {
1188 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
1305 * are 'IN', including @hldev. Driver cooperates with
1319 struct __vxge_hw_device *hldev = NULL;
1326 hldev = vzalloc(sizeof(struct __vxge_hw_device));
1327 if (hldev == NULL) {
1332 hldev->magic = VXGE_HW_DEVICE_MAGIC;
1334 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
1337 memcpy(&hldev->config, device_config,
1340 hldev->bar0 = attr->bar0;
1341 hldev->pdev = attr->pdev;
1343 hldev->uld_callbacks = attr->uld_callbacks;
1345 __vxge_hw_device_pci_e_init(hldev);
1347 status = __vxge_hw_device_reg_addr_get(hldev);
1349 vfree(hldev);
1353 __vxge_hw_device_host_info_get(hldev);
1359 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
1372 if (__vxge_hw_blockpool_create(hldev,
1373 &hldev->block_pool,
1377 vxge_hw_device_terminate(hldev);
1382 status = __vxge_hw_device_initialize(hldev);
1384 vxge_hw_device_terminate(hldev);
1388 *devh = hldev;
1398 vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
1400 vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
1402 hldev->magic = VXGE_HW_DEVICE_DEAD;
1403 __vxge_hw_blockpool_destroy(&hldev->block_pool);
1404 vfree(hldev);
1433 vpath->hldev->config.device_poll_millis);
1648 vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
1655 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
1656 (hldev->virtual_paths[i].vp_open ==
1660 memcpy(hldev->virtual_paths[i].hw_stats_sav,
1661 hldev->virtual_paths[i].hw_stats,
1665 &hldev->virtual_paths[i],
1666 hldev->virtual_paths[i].hw_stats);
1669 memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
1680 struct __vxge_hw_device *hldev,
1683 memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
1695 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
1701 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1702 hldev->func_id);
1712 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
1714 hldev->config.device_poll_millis);
1717 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
1729 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
1739 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1740 hldev->func_id);
1745 status = vxge_hw_mrpcim_stats_access(hldev,
1764 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1773 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1774 hldev->func_id);
1779 status = vxge_hw_mrpcim_stats_access(hldev,
1799 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1805 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1810 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1817 status = vxge_hw_device_xmac_port_stats_get(hldev,
1825 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1829 &hldev->virtual_paths[i],
1835 &hldev->virtual_paths[i],
1848 void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1851 if (hldev == NULL)
1856 hldev->debug_module_mask = mask;
1857 hldev->debug_level = level;
1861 hldev->level_err = level & VXGE_ERR;
1865 hldev->level_trace = level & VXGE_TRACE;
1873 u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1876 if (hldev == NULL)
1879 return hldev->level_err;
1889 u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1892 if (hldev == NULL)
1895 return hldev->level_trace;
1905 enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1911 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1921 if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1926 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1940 enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1946 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1956 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1957 hldev->func_id);
1961 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1971 writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1976 u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1978 struct pci_dev *dev = hldev->pdev;
2189 struct __vxge_hw_device *hldev;
2193 hldev = vph->vpath->hldev;
2212 channel->common_reg = hldev->common_reg;
2213 channel->first_vp_id = hldev->first_vp_id;
2215 channel->devh = hldev;
2331 (blockpool->hldev)->pdev,
2332 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
2408 dma_unmap_single(&(blockpool->hldev)->pdev->dev,
2414 (blockpool->hldev)->pdev,
2794 struct __vxge_hw_device *hldev;
2805 hldev = vp->vpath->hldev;
2808 config = &hldev->config.vp_config[vp_id].ring;
2826 ring->common_reg = hldev->common_reg;
2853 ring->mempool = __vxge_hw_mempool_create(hldev,
3101 vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
3107 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
3118 *value = readq((void __iomem *)hldev->legacy_reg + offset);
3125 *value = readq((void __iomem *)hldev->toc_reg + offset);
3132 *value = readq((void __iomem *)hldev->common_reg + offset);
3135 if (!(hldev->access_rights &
3144 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
3147 if (!(hldev->access_rights &
3160 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
3165 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3173 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
3178 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3190 *value = readq((void __iomem *)hldev->vpath_reg[index] +
3206 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
3214 vpmgmt_reg = hldev->vpmgmt_reg[i];
3227 vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
3233 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
3244 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
3251 writeq(value, (void __iomem *)hldev->toc_reg + offset);
3258 writeq(value, (void __iomem *)hldev->common_reg + offset);
3261 if (!(hldev->access_rights &
3270 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
3273 if (!(hldev->access_rights &
3286 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
3292 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3300 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
3305 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3313 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
3449 config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
3527 __vxge_hw_mempool_create(vpath->hldev,
3597 * @hldev: HW device.
3603 vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
3609 if (hldev == NULL) {
3614 vpath = &hldev->virtual_paths[hldev->first_vp_id];
3815 if (vp->vpath->hldev->config.rth_it_type
3957 struct __vxge_hw_device *hldev,
3987 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
3989 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
4005 &vpath->hldev->common_reg->vpath_rst_in_prog,
4008 vpath->hldev->config.device_poll_millis);
4018 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4025 &hldev->common_reg->cmn_rsthdlr_cfg0);
4035 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4040 vpath = &hldev->virtual_paths[vp_id];
4060 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4067 vpath = &hldev->virtual_paths[vp_id];
4118 if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
4132 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4140 vpath = &hldev->virtual_paths[vp_id];
4142 status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
4189 vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
4193 (hldev->kdfc + (vp_id *
4205 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4212 vpath = &hldev->virtual_paths[vp_id];
4254 if (hldev->config.rth_it_type ==
4272 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4279 vpath = &hldev->virtual_paths[vp_id];
4533 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4541 vpath = &hldev->virtual_paths[vp_id];
4543 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4553 status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
4557 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4561 status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4595 static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4599 vpath = &hldev->virtual_paths[vp_id];
4604 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4605 vpath->hldev->tim_int_mask1, vpath->vp_id);
4606 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4639 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4645 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4650 vpath = &hldev->virtual_paths[vp_id];
4655 vpath->hldev = hldev;
4657 vpath->vp_reg = hldev->vpath_reg[vp_id];
4658 vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4660 __vxge_hw_vpath_reset(hldev, vp_id);
4668 status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4676 vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4678 VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4679 hldev->tim_int_mask1, vp_id);
4681 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4683 __vxge_hw_vp_terminate(hldev, vp_id);
4789 vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4797 vpath = &hldev->virtual_paths[attr->vp_id];
4804 status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4805 &hldev->config.vp_config[attr->vp_id]);
4828 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4835 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4846 hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4850 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4862 hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4880 __vxge_hw_vp_terminate(hldev, attr->vp_id);
4898 struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
4966 devh = vpath->hldev;
5021 status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
5038 struct __vxge_hw_device *hldev;
5043 hldev = vpath->hldev;
5054 status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
5058 status = __vxge_hw_vpath_initialize(hldev, vp_id);
5063 __vxge_hw_vpath_prc_configure(hldev, vp_id);
5088 struct __vxge_hw_device *hldev;
5091 hldev = vp->vpath->hldev;
5097 &hldev->common_reg->cmn_rsthdlr_cfg1);