Lines Matching defs:vpath

25 	status = __vxge_hw_vpath_stats_access(vpath,			\
50 struct __vxge_hw_virtualpath *vpath;
54 vpath = &hldev->virtual_paths[vp_id];
55 vp_reg = vpath->vp_reg;
59 /* Check that the ring controller for this vpath has enough free RxDs
62 * RXD_SPAT value for the vpath.
76 /* Check that the ring controller for this vpath does
96 * stored in the frame buffer for each vpath assigned to the given
157 vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
161 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
166 spin_lock(&vpath->lock);
167 if (!vpath->vp_open) {
168 spin_unlock(&vpath->lock);
196 if (!vpath->vp_open)
216 if (vpath->vp_open)
217 spin_unlock(&vpath->lock);
226 struct __vxge_hw_virtualpath *vpath;
229 vpath = &hldev->virtual_paths[hldev->first_vp_id];
231 status = vxge_hw_vpath_fw_api(vpath,
249 struct __vxge_hw_virtualpath *vpath;
253 vpath = &hldev->virtual_paths[hldev->first_vp_id];
255 status = vxge_hw_vpath_fw_api(vpath,
280 struct __vxge_hw_virtualpath *vpath;
284 vpath = &hldev->virtual_paths[hldev->first_vp_id];
287 status = vxge_hw_vpath_fw_api(vpath,
306 status = vxge_hw_vpath_fw_api(vpath,
377 struct __vxge_hw_virtualpath *vpath;
381 vpath = &hldev->virtual_paths[hldev->first_vp_id];
387 status = vxge_hw_vpath_fw_api(vpath,
426 struct __vxge_hw_virtualpath *vpath;
428 vpath = channel->vph->vpath;
437 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
442 vpath->ringh = (struct __vxge_hw_ring *)channel;
496 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
498 * This routine checks the vpath reset in progress register is turned zero
700 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
701 * Returns the function number of the vpath.
804 __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
814 status = vxge_hw_vpath_fw_api(vpath,
871 __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
883 status = vxge_hw_vpath_fw_api(vpath,
896 status = vxge_hw_vpath_fw_api(vpath,
911 status = vxge_hw_vpath_fw_api(vpath,
930 __vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
938 status = vxge_hw_vpath_fw_api(vpath,
950 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
954 __vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
963 status = vxge_hw_vpath_fw_api(vpath, action,
994 * Returns the vpath mask that has the bits set for each vpath allocated
996 * each vpath
1009 struct __vxge_hw_virtualpath vpath;
1057 spin_lock_init(&vpath.lock);
1058 vpath.vp_reg = bar0 + val64;
1059 vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
1061 status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
1065 status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
1069 status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
1081 vpath.vp_reg = bar0 + val64;
1082 vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
1084 status = __vxge_hw_vpath_addr_get(&vpath,
1240 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1241 * Check the vpath configuration
1412 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
1419 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1424 vp_reg = vpath->vp_reg;
1433 vpath->hldev->config.device_poll_millis);
1444 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
1447 __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
1457 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1463 status = __vxge_hw_vpath_stats_access(vpath,
1476 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
1479 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
1488 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1493 status = __vxge_hw_vpath_stats_access(vpath,
1507 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
1510 __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
1517 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1521 vp_reg = vpath->vp_reg;
1581 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
1585 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
1645 * Returns the vpath h/w stats for the device.
1677 * Returns the vpath s/w stats for the device.
2193 hldev = vph->vpath->hldev;
2194 vp_id = vph->vpath->vp_id;
2737 * This function resets the ring during vpath reset operation
2769 struct __vxge_hw_ring *ring = vp->vpath->ringh;
2776 vp->vpath->ringh = NULL;
2805 hldev = vp->vpath->hldev;
2806 vp_id = vp->vpath->vp_id;
2823 vp->vpath->ringh = ring;
2825 ring->vp_reg = vp->vpath->vp_reg;
2827 ring->stats = &vp->vpath->sw_stats->ring_stats;
2833 ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
2834 ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
3052 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
3053 * Set the swapper bits appropriately for the vpath.
3072 * Set the swapper bits appropriately for the vpath.
3354 * This function resets the fifo during vpath reset operation
3373 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
3380 vp->vpath->fifoh = NULL;
3442 struct __vxge_hw_virtualpath *vpath;
3448 vpath = vp->vpath;
3449 config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
3465 vpath->fifoh = fifo;
3466 fifo->nofl_db = vpath->nofl_db;
3468 fifo->vp_id = vpath->vp_id;
3469 fifo->vp_reg = vpath->vp_reg;
3470 fifo->stats = &vpath->sw_stats->fifo_stats;
3476 fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
3477 fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
3527 __vxge_hw_mempool_create(vpath->hldev,
3556 * Read from the vpath pci config space.
3559 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
3564 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
3605 struct __vxge_hw_virtualpath *vpath;
3614 vpath = &hldev->virtual_paths[hldev->first_vp_id];
3617 status = vxge_hw_vpath_fw_api(vpath,
3652 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3688 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3815 if (vp->vpath->hldev->config.rth_it_type
3958 struct __vxge_hw_virtualpath *vpath)
3965 val64 = readq(&vpath->vpmgmt_reg->
3975 vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3977 val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
3981 vpath->vsport_number = i;
3984 val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
3987 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
3989 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
3995 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3997 * adapter completed the reset process for the vpath
4000 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
4005 &vpath->hldev->common_reg->vpath_rst_in_prog,
4007 1 << (16 - vpath->vp_id)),
4008 vpath->hldev->config.device_poll_millis);
4015 * This routine resets the vpath on the device
4032 * This routine resets the vpath structures
4038 struct __vxge_hw_virtualpath *vpath;
4040 vpath = &hldev->virtual_paths[vp_id];
4042 if (vpath->ringh) {
4043 status = __vxge_hw_ring_reset(vpath->ringh);
4048 if (vpath->fifoh)
4049 status = __vxge_hw_fifo_reset(vpath->fifoh);
4063 struct __vxge_hw_virtualpath *vpath;
4067 vpath = &hldev->virtual_paths[vp_id];
4068 vp_reg = vpath->vp_reg;
4069 vp_config = vpath->vp_config;
4078 val64 = readq(&vpath->vp_reg->prc_cfg6);
4080 writeq(val64, &vpath->vp_reg->prc_cfg6);
4084 if (vpath->vp_config->ring.scatter_mode !=
4089 switch (vpath->vp_config->ring.scatter_mode) {
4109 vpath->ringh) >> 3), &vp_reg->prc_cfg5);
4137 struct __vxge_hw_virtualpath *vpath;
4140 vpath = &hldev->virtual_paths[vp_id];
4141 vp_reg = vpath->vp_reg;
4149 vpath->max_kdfc_db =
4153 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4155 vpath->max_nofl_db = vpath->max_kdfc_db;
4157 if (vpath->max_nofl_db <
4158 ((vpath->vp_config->fifo.memblock_size /
4159 (vpath->vp_config->fifo.max_frags *
4161 vpath->vp_config->fifo.fifo_blocks)) {
4166 (vpath->max_nofl_db*2)-1);
4191 vpath->nofl_db =
4208 struct __vxge_hw_virtualpath *vpath;
4212 vpath = &hldev->virtual_paths[vp_id];
4213 vp_reg = vpath->vp_reg;
4214 vp_config = vpath->vp_config;
4217 vpath->vsport_number), &vp_reg->xmac_vsport_choice);
4238 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
4244 vpath->max_mtu);
4275 struct __vxge_hw_virtualpath *vpath;
4279 vpath = &hldev->virtual_paths[vp_id];
4280 vp_reg = vpath->vp_reg;
4281 config = vpath->vp_config;
4343 vpath->tim_tti_cfg1_saved = val64;
4401 vpath->tim_tti_cfg3_saved = val64;
4450 vpath->tim_rti_cfg1_saved = val64;
4508 vpath->tim_rti_cfg3_saved = val64;
4530 * registers of the vpath using the configuration passed.
4538 struct __vxge_hw_virtualpath *vpath;
4541 vpath = &hldev->virtual_paths[vp_id];
4547 vp_reg = vpath->vp_reg;
4549 status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4568 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4597 struct __vxge_hw_virtualpath *vpath;
4599 vpath = &hldev->virtual_paths[vp_id];
4601 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4604 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4605 vpath->hldev->tim_int_mask1, vpath->vp_id);
4606 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4611 spin_lock(&vpath->lock);
4612 vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4613 spin_unlock(&vpath->lock);
4615 vpath->vpmgmt_reg = NULL;
4616 vpath->nofl_db = NULL;
4617 vpath->max_mtu = 0;
4618 vpath->vsport_number = 0;
4619 vpath->max_kdfc_db = 0;
4620 vpath->max_nofl_db = 0;
4621 vpath->ringh = NULL;
4622 vpath->fifoh = NULL;
4623 memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
4624 vpath->stats_block = NULL;
4625 vpath->hw_stats = NULL;
4626 vpath->hw_stats_sav = NULL;
4627 vpath->sw_stats = NULL;
4635 * This routine is the initial phase of init which resets the vpath and
4642 struct __vxge_hw_virtualpath *vpath;
4650 vpath = &hldev->virtual_paths[vp_id];
4652 spin_lock_init(&vpath->lock);
4653 vpath->vp_id = vp_id;
4654 vpath->vp_open = VXGE_HW_VP_OPEN;
4655 vpath->hldev = hldev;
4656 vpath->vp_config = config;
4657 vpath->vp_reg = hldev->vpath_reg[vp_id];
4658 vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4662 status = __vxge_hw_vpath_reset_check(vpath);
4664 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4668 status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4670 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4674 INIT_LIST_HEAD(&vpath->vpath_handles);
4676 vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4698 struct __vxge_hw_virtualpath *vpath;
4704 vpath = vp->vpath;
4708 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4711 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4716 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4718 vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4725 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4726 * Enable the DMA vpath statistics. The function is to be called to re-enable
4733 struct __vxge_hw_virtualpath *vpath;
4735 vpath = vp->vpath;
4737 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4742 memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4745 status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4793 struct __vxge_hw_virtualpath *vpath;
4797 vpath = &hldev->virtual_paths[attr->vp_id];
4799 if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4815 vp->vpath = vpath;
4817 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4823 if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4831 vpath->fifoh->tx_intr_num =
4835 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4837 if (vpath->stats_block == NULL) {
4842 vpath->hw_stats = vpath->stats_block->memblock;
4843 memset(vpath->hw_stats, 0,
4847 vpath->hw_stats;
4849 vpath->hw_stats_sav =
4851 memset(vpath->hw_stats_sav, 0,
4854 writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4860 list_add(&vp->item, &vpath->vpath_handles);
4862 hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4866 attr->fifo_attr.userdata = vpath->fifoh;
4867 attr->ring_attr.userdata = vpath->ringh;
4872 if (vpath->ringh != NULL)
4875 if (vpath->fifoh != NULL)
4887 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4888 * (vpath) open
4889 * @vp: Handle got from previous vpath open
4896 struct __vxge_hw_virtualpath *vpath = vp->vpath;
4897 struct __vxge_hw_ring *ring = vpath->ringh;
4898 struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
4902 new_count = readq(&vpath->vp_reg->rxdmem_size);
4910 &vpath->vp_reg->prc_rxd_doorbell);
4911 readl(&vpath->vp_reg->prc_rxd_doorbell);
4914 val64 = readq(&vpath->vp_reg->prc_cfg6);
4953 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4959 struct __vxge_hw_virtualpath *vpath = NULL;
4961 u32 vp_id = vp->vpath->vp_id;
4965 vpath = vp->vpath;
4966 devh = vpath->hldev;
4968 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4975 if (!list_empty(&vpath->vpath_handles)) {
4976 list_add(&vp->item, &vpath->vpath_handles);
4987 if (vpath->ringh != NULL)
4990 if (vpath->fifoh != NULL)
4993 if (vpath->stats_block != NULL)
4994 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
5005 * vxge_hw_vpath_reset - Resets vpath
5006 * This function is used to request a reset of vpath
5012 struct __vxge_hw_virtualpath *vpath = vp->vpath;
5014 vp_id = vpath->vp_id;
5016 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5021 status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
5023 vpath->sw_stats->soft_reset_cnt++;
5030 * This function poll's for the vpath reset completion and re initializes
5031 * the vpath.
5036 struct __vxge_hw_virtualpath *vpath = NULL;
5041 vp_id = vp->vpath->vp_id;
5042 vpath = vp->vpath;
5043 hldev = vpath->hldev;
5045 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5050 status = __vxge_hw_vpath_reset_check(vpath);
5062 if (vpath->ringh != NULL)
5065 memset(vpath->hw_stats, 0,
5068 memset(vpath->hw_stats_sav, 0,
5071 writeq(vpath->stats_block->dma_addr,
5072 &vpath->vp_reg->stats_cfg);
5081 * vxge_hw_vpath_enable - Enable vpath.
5082 * This routine clears the vpath reset thereby enabling a vpath
5091 hldev = vp->vpath->hldev;
5094 1 << (16 - vp->vpath->vp_id));