Lines Matching defs:adapter

42 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
103 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
105 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
108 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
110 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
114 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
116 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
119 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
121 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
124 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
126 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
129 static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter)
131 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT;
134 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
136 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
139 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
141 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
212 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
229 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
232 netdev_dbg(adapter->netdev,
234 adapter->replenish_no_mem++;
247 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
250 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
265 adapter->netdev->mtu +
269 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
276 adapter->replenish_add_buff_success++;
291 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
292 dma_unmap_single(&adapter->vdev->dev,
296 adapter->replenish_add_buff_failure++;
307 static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
309 __be64 *p = adapter->buffer_list_addr + 4096 - 8;
311 adapter->rx_no_buffer = be64_to_cpup(p);
315 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
319 adapter->replenish_task_cycles++;
322 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
326 ibmveth_replenish_buffer_pool(adapter, pool);
329 ibmveth_update_rx_no_buffer(adapter);
333 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
345 dma_unmap_single(&adapter->vdev->dev,
367 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
376 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
378 skb = adapter->rx_buff_pool[pool].skbuff[index];
382 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
384 dma_unmap_single(&adapter->vdev->dev,
385 adapter->rx_buff_pool[pool].dma_addr[index],
386 adapter->rx_buff_pool[pool].buff_size,
389 free_index = adapter->rx_buff_pool[pool].producer_index;
390 adapter->rx_buff_pool[pool].producer_index++;
391 if (adapter->rx_buff_pool[pool].producer_index >=
392 adapter->rx_buff_pool[pool].size)
393 adapter->rx_buff_pool[pool].producer_index = 0;
394 adapter->rx_buff_pool[pool].free_map[free_index] = index;
398 atomic_dec(&(adapter->rx_buff_pool[pool].available));
402 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
404 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
409 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
411 return adapter->rx_buff_pool[pool].skbuff[index];
415 static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
417 u32 q_index = adapter->rx_queue.index;
418 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
426 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
428 if (!adapter->rx_buff_pool[pool].active) {
429 ibmveth_rxq_harvest_buffer(adapter);
430 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
435 adapter->rx_buff_pool[pool].buff_size;
436 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
438 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
441 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
443 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
447 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
448 adapter->rx_queue.index = 0;
449 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
456 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
458 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
460 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
461 adapter->rx_queue.index = 0;
462 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
466 static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx)
468 dma_unmap_single(&adapter->vdev->dev, adapter->tx_ltb_dma[idx],
469 adapter->tx_ltb_size, DMA_TO_DEVICE);
470 kfree(adapter->tx_ltb_ptr[idx]);
471 adapter->tx_ltb_ptr[idx] = NULL;
474 static int ibmveth_allocate_tx_ltb(struct ibmveth_adapter *adapter, int idx)
476 adapter->tx_ltb_ptr[idx] = kzalloc(adapter->tx_ltb_size,
478 if (!adapter->tx_ltb_ptr[idx]) {
479 netdev_err(adapter->netdev,
483 adapter->tx_ltb_dma[idx] = dma_map_single(&adapter->vdev->dev,
484 adapter->tx_ltb_ptr[idx],
485 adapter->tx_ltb_size,
487 if (dma_mapping_error(&adapter->vdev->dev, adapter->tx_ltb_dma[idx])) {
488 netdev_err(adapter->netdev,
490 kfree(adapter->tx_ltb_ptr[idx]);
491 adapter->tx_ltb_ptr[idx] = NULL;
498 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
504 * After a kexec the adapter will still be open, so our attempt to
505 * open it will fail. So if we get a failure we free the adapter and
509 rc = h_register_logical_lan(adapter->vdev->unit_address,
510 adapter->buffer_list_dma, rxq_desc.desc,
511 adapter->filter_list_dma, mac_address);
515 rc = h_free_logical_lan(adapter->vdev->unit_address);
527 struct ibmveth_adapter *adapter = netdev_priv(netdev);
538 napi_enable(&adapter->napi);
541 rxq_entries += adapter->rx_buff_pool[i].size;
544 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
545 if (!adapter->buffer_list_addr) {
550 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
551 if (!adapter->filter_list_addr) {
556 dev = &adapter->vdev->dev;
558 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
560 adapter->rx_queue.queue_addr =
561 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
562 &adapter->rx_queue.queue_dma, GFP_KERNEL);
563 if (!adapter->rx_queue.queue_addr)
566 adapter->buffer_list_dma = dma_map_single(dev,
567 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
568 if (dma_mapping_error(dev, adapter->buffer_list_dma)) {
573 adapter->filter_list_dma = dma_map_single(dev,
574 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
575 if (dma_mapping_error(dev, adapter->filter_list_dma)) {
581 if (ibmveth_allocate_tx_ltb(adapter, i))
585 adapter->rx_queue.index = 0;
586 adapter->rx_queue.num_slots = rxq_entries;
587 adapter->rx_queue.toggle = 1;
592 adapter->rx_queue.queue_len;
593 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
595 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
596 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
597 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
599 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
601 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
608 adapter->buffer_list_dma,
609 adapter->filter_list_dma,
617 if (!adapter->rx_buff_pool[i].active)
619 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
621 adapter->rx_buff_pool[i].active = 0;
634 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
653 if (adapter->rx_buff_pool[i].active)
654 ibmveth_free_buffer_pool(adapter,
655 &adapter->rx_buff_pool[i]);
658 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
663 ibmveth_free_tx_ltb(adapter, i);
667 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
670 dma_free_coherent(dev, adapter->rx_queue.queue_len,
671 adapter->rx_queue.queue_addr,
672 adapter->rx_queue.queue_dma);
674 free_page((unsigned long)adapter->filter_list_addr);
676 free_page((unsigned long)adapter->buffer_list_addr);
678 napi_disable(&adapter->napi);
684 struct ibmveth_adapter *adapter = netdev_priv(netdev);
685 struct device *dev = &adapter->vdev->dev;
691 napi_disable(&adapter->napi);
695 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
698 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
708 ibmveth_update_rx_no_buffer(adapter);
710 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
712 free_page((unsigned long)adapter->buffer_list_addr);
714 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
716 free_page((unsigned long)adapter->filter_list_addr);
718 dma_free_coherent(dev, adapter->rx_queue.queue_len,
719 adapter->rx_queue.queue_addr,
720 adapter->rx_queue.queue_dma);
723 if (adapter->rx_buff_pool[i].active)
724 ibmveth_free_buffer_pool(adapter,
725 &adapter->rx_buff_pool[i]);
728 ibmveth_free_tx_ltb(adapter, i);
738 struct ibmveth_adapter *adapter = netdev_priv(dev);
741 &adapter->speed,
742 &adapter->duplex);
748 struct ibmveth_adapter *adapter = netdev_priv(dev);
750 cmd->base.speed = adapter->speed;
751 cmd->base.duplex = adapter->duplex;
759 struct ibmveth_adapter *adapter = netdev_priv(dev);
761 adapter->speed = SPEED_1000;
762 adapter->duplex = DUPLEX_FULL;
792 struct ibmveth_adapter *adapter = netdev_priv(dev);
817 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
821 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
829 h_illan_attributes(adapter->vdev->unit_address,
836 adapter->fw_ipv4_csum_support = data;
839 ret6 = h_illan_attributes(adapter->vdev->unit_address,
847 h_illan_attributes(adapter->vdev->unit_address,
854 adapter->fw_ipv6_csum_support = data;
857 adapter->rx_csum = data;
875 struct ibmveth_adapter *adapter = netdev_priv(dev);
894 ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
898 ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
905 h_illan_attributes(adapter->vdev->unit_address,
913 adapter->fw_large_send_support = data;
914 adapter->large_send = data;
924 adapter->large_send = data;
936 struct ibmveth_adapter *adapter = netdev_priv(dev);
941 if (rx_csum != adapter->rx_csum) {
943 if (rc1 && !adapter->rx_csum)
949 if (large_send != adapter->large_send) {
951 if (rc2 && !adapter->large_send)
984 struct ibmveth_adapter *adapter = netdev_priv(dev);
987 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
1003 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1021 if (adapter->tx_ltb_ptr[i])
1024 rc = ibmveth_allocate_tx_ltb(adapter, i);
1044 if (adapter->tx_ltb_ptr[i - 1])
1045 ibmveth_free_tx_ltb(adapter, i - 1);
1070 static int ibmveth_send(struct ibmveth_adapter *adapter,
1084 ret = h_send_logical_lan(adapter->vdev->unit_address, desc,
1086 adapter->fw_large_send_support);
1090 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
1118 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1151 if (skb_is_gso(skb) && adapter->fw_large_send_support)
1156 if (adapter->fw_large_send_support) {
1158 adapter->tx_large_packets++;
1167 adapter->tx_large_packets++;
1172 if (unlikely(skb->len > adapter->tx_ltb_size)) {
1173 netdev_err(adapter->netdev, "tx: packet size (%u) exceeds ltb (%u)\n",
1174 skb->len, adapter->tx_ltb_size);
1178 memcpy(adapter->tx_ltb_ptr[queue_num], skb->data, skb_headlen(skb));
1184 memcpy(adapter->tx_ltb_ptr[queue_num] + total_bytes,
1190 netdev_err(adapter->netdev, "tx: incorrect packet len copied into ltb (%u != %u)\n",
1196 desc.fields.address = adapter->tx_ltb_dma[queue_num];
1200 if (ibmveth_send(adapter, desc.desc, mss)) {
1201 adapter->tx_send_failed++;
1263 struct ibmveth_adapter *adapter)
1333 struct ibmveth_adapter *adapter =
1335 struct net_device *netdev = adapter->netdev;
1341 if (!ibmveth_rxq_pending_buffer(adapter))
1345 if (!ibmveth_rxq_buffer_valid(adapter)) {
1347 adapter->rx_invalid_buffer++;
1349 ibmveth_rxq_recycle_buffer(adapter);
1352 int length = ibmveth_rxq_frame_length(adapter);
1353 int offset = ibmveth_rxq_frame_offset(adapter);
1354 int csum_good = ibmveth_rxq_csum_good(adapter);
1355 int lrg_pkt = ibmveth_rxq_large_packet(adapter);
1358 skb = ibmveth_rxq_get_buffer(adapter);
1382 if (!ibmveth_rxq_recycle_buffer(adapter))
1386 ibmveth_rxq_harvest_buffer(adapter);
1405 adapter->rx_large_packets++;
1410 ibmveth_rx_csum_helper(skb, adapter);
1421 ibmveth_replenish_task(adapter);
1429 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1434 if (ibmveth_rxq_pending_buffer(adapter) &&
1436 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1447 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1450 if (napi_schedule_prep(&adapter->napi)) {
1451 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1454 __napi_schedule(&adapter->napi);
1461 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1465 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1466 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1477 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1492 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1503 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1515 struct ibmveth_adapter *adapter = netdev_priv(dev);
1516 struct vio_dev *viodev = adapter->vdev;
1522 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
1530 if (netif_running(adapter->netdev)) {
1532 ibmveth_close(adapter->netdev);
1537 adapter->rx_buff_pool[i].active = 1;
1539 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
1545 return ibmveth_open(adapter->netdev);
1551 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1576 struct ibmveth_adapter *adapter;
1588 adapter = netdev_priv(netdev);
1597 if (adapter->rx_buff_pool[i].active)
1599 adapter->rx_buff_pool[i].size *
1600 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1602 rxqentries += adapter->rx_buff_pool[i].size;
1613 struct ibmveth_adapter *adapter = netdev_priv(dev);
1622 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1624 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1653 struct ibmveth_adapter *adapter;
1690 adapter = netdev_priv(netdev);
1693 adapter->vdev = dev;
1694 adapter->netdev = netdev;
1695 adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
1698 netif_napi_add_weight(netdev, &adapter->napi, ibmveth_poll, 16);
1712 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
1723 adapter->is_active_trunk = false;
1725 adapter->is_active_trunk = true;
1739 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1742 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1759 adapter->tx_ltb_size = PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE);
1761 adapter->tx_ltb_ptr[i] = NULL;
1763 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1784 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1788 kobject_put(&adapter->rx_buff_pool[i].kobj);
1823 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1848 if (pool == &adapter->rx_buff_pool[i])
1850 if (!adapter->rx_buff_pool[i].active)
1852 if (mtu <= adapter->rx_buff_pool[i].buff_size)