Lines Matching refs:qdev
103 static int ql_sem_spinlock(struct ql3_adapter *qdev,
107 qdev->mem_map_registers;
122 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
125 qdev->mem_map_registers;
130 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
133 qdev->mem_map_registers;
144 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
149 if (ql_sem_lock(qdev,
151 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
153 netdev_printk(KERN_DEBUG, qdev->ndev,
160 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
164 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
167 qdev->mem_map_registers;
172 qdev->current_page = page;
175 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
180 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
182 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
187 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
192 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
197 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
199 if (qdev->current_page != 0)
200 ql_set_register_page(qdev, 0);
203 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
207 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
209 if (qdev->current_page != 0)
210 ql_set_register_page(qdev, 0);
214 static void ql_write_common_reg_l(struct ql3_adapter *qdev,
219 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
222 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
225 static void ql_write_common_reg(struct ql3_adapter *qdev,
232 static void ql_write_nvram_reg(struct ql3_adapter *qdev,
240 static void ql_write_page0_reg(struct ql3_adapter *qdev,
243 if (qdev->current_page != 0)
244 ql_set_register_page(qdev, 0);
252 static void ql_write_page1_reg(struct ql3_adapter *qdev,
255 if (qdev->current_page != 1)
256 ql_set_register_page(qdev, 1);
264 static void ql_write_page2_reg(struct ql3_adapter *qdev,
267 if (qdev->current_page != 2)
268 ql_set_register_page(qdev, 2);
273 static void ql_disable_interrupts(struct ql3_adapter *qdev)
276 qdev->mem_map_registers;
278 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
283 static void ql_enable_interrupts(struct ql3_adapter *qdev)
286 qdev->mem_map_registers;
288 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
293 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
300 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
301 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
303 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
304 qdev->lrg_buf_free_tail = lrg_buf_cb;
308 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
309 qdev->lrg_buffer_len);
311 qdev->lrg_buf_skb_check++;
318 map = dma_map_single(&qdev->pdev->dev,
320 qdev->lrg_buffer_len - QL_HEADER_SPACE,
322 err = dma_mapping_error(&qdev->pdev->dev, map);
324 netdev_err(qdev->ndev,
330 qdev->lrg_buf_skb_check++;
340 qdev->lrg_buffer_len -
345 qdev->lrg_buf_free_count++;
349 *qdev)
351 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
354 qdev->lrg_buf_free_head = lrg_buf_cb->next;
355 if (qdev->lrg_buf_free_head == NULL)
356 qdev->lrg_buf_free_tail = NULL;
357 qdev->lrg_buf_free_count--;
366 static void fm93c56a_deselect(struct ql3_adapter *qdev);
367 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
373 static void fm93c56a_select(struct ql3_adapter *qdev)
376 qdev->mem_map_registers;
379 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
380 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
386 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
393 qdev->mem_map_registers;
397 ql_write_nvram_reg(qdev, spir,
398 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
400 ql_write_nvram_reg(qdev, spir,
401 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
403 ql_write_nvram_reg(qdev, spir,
404 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
416 ql_write_nvram_reg(qdev, spir,
418 qdev->eeprom_cmd_data | dataBit));
421 ql_write_nvram_reg(qdev, spir,
422 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
424 ql_write_nvram_reg(qdev, spir,
425 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
441 ql_write_nvram_reg(qdev, spir,
443 qdev->eeprom_cmd_data | dataBit));
446 ql_write_nvram_reg(qdev, spir,
447 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
449 ql_write_nvram_reg(qdev, spir,
450 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
459 static void fm93c56a_deselect(struct ql3_adapter *qdev)
462 qdev->mem_map_registers;
465 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
466 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
472 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
478 qdev->mem_map_registers;
484 ql_write_nvram_reg(qdev, spir,
485 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
487 ql_write_nvram_reg(qdev, spir,
488 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
490 dataBit = (ql_read_common_reg(qdev, spir) &
500 static void eeprom_readword(struct ql3_adapter *qdev,
503 fm93c56a_select(qdev);
504 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
505 fm93c56a_datain(qdev, value);
506 fm93c56a_deselect(qdev);
519 static int ql_get_nvram_params(struct ql3_adapter *qdev)
526 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
528 pEEPROMData = (u16 *)&qdev->nvram_data;
529 qdev->eeprom_cmd_data = 0;
530 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
531 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
534 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
539 eeprom_readword(qdev, index, pEEPROMData);
543 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
546 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
548 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
552 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
560 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
563 qdev->mem_map_registers;
568 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
577 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
580 qdev->mem_map_registers;
583 if (qdev->numPorts > 1) {
596 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
599 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
604 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
608 qdev->mem_map_registers;
611 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
624 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
627 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
634 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
638 qdev->mem_map_registers;
641 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
643 if (ql_wait_for_mii_ready(qdev)) {
644 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
648 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
651 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
654 if (ql_wait_for_mii_ready(qdev)) {
655 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
660 ql_mii_enable_scan_mode(qdev);
665 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
669 qdev->mem_map_registers;
673 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
675 if (ql_wait_for_mii_ready(qdev)) {
676 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
680 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
683 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
686 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
690 if (ql_wait_for_mii_ready(qdev)) {
691 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
695 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
699 ql_mii_enable_scan_mode(qdev);
704 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
707 qdev->mem_map_registers;
709 ql_mii_disable_scan_mode(qdev);
711 if (ql_wait_for_mii_ready(qdev)) {
712 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
716 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
717 qdev->PHYAddr | regAddr);
719 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
722 if (ql_wait_for_mii_ready(qdev)) {
723 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
727 ql_mii_enable_scan_mode(qdev);
732 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
736 qdev->mem_map_registers;
738 ql_mii_disable_scan_mode(qdev);
740 if (ql_wait_for_mii_ready(qdev)) {
741 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
745 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
746 qdev->PHYAddr | regAddr);
748 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
751 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
755 if (ql_wait_for_mii_ready(qdev)) {
756 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
760 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
763 ql_mii_enable_scan_mode(qdev);
768 static void ql_petbi_reset(struct ql3_adapter *qdev)
770 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
773 static void ql_petbi_start_neg(struct ql3_adapter *qdev)
778 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®);
780 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
782 ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
785 ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
791 static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
793 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
794 PHYAddr[qdev->mac_index]);
797 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
802 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®,
803 PHYAddr[qdev->mac_index]);
805 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
806 PHYAddr[qdev->mac_index]);
808 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
810 PHYAddr[qdev->mac_index]);
812 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
815 PHYAddr[qdev->mac_index]);
818 static void ql_petbi_init(struct ql3_adapter *qdev)
820 ql_petbi_reset(qdev);
821 ql_petbi_start_neg(qdev);
824 static void ql_petbi_init_ex(struct ql3_adapter *qdev)
826 ql_petbi_reset_ex(qdev);
827 ql_petbi_start_neg_ex(qdev);
830 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
834 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0)
840 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
842 netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
844 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
846 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
848 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
850 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
852 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
854 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
856 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
858 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
860 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
862 ql_mii_write_reg_ex(qdev, 0x11,
863 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
869 ql_mii_write_reg(qdev, 0x12, 0x840a);
870 ql_mii_write_reg(qdev, 0x00, 0x1140);
871 ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
874 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
897 netdev_info(qdev->ndev, "Phy: %s\n",
907 static int ql_phy_get_speed(struct ql3_adapter *qdev)
911 switch (qdev->phyType) {
913 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0)
920 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0)
938 static int ql_is_full_dup(struct ql3_adapter *qdev)
942 switch (qdev->phyType) {
944 if (ql_mii_read_reg(qdev, 0x1A, ®))
951 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0)
958 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
962 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0)
968 static int PHY_Setup(struct ql3_adapter *qdev)
977 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1);
979 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
983 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2);
985 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
994 if (qdev->mac_index == 0)
999 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr);
1001 netdev_err(qdev->ndev,
1006 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr);
1008 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
1018 qdev->phyType = getPhyType(qdev, reg1, reg2);
1020 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1022 phyAgereSpecificInit(qdev, miiAddr);
1023 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1024 netdev_err(qdev->ndev, "PHY is unknown\n");
1034 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1037 qdev->mem_map_registers;
1045 if (qdev->mac_index)
1046 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1048 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1054 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1057 qdev->mem_map_registers;
1065 if (qdev->mac_index)
1066 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1068 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1074 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1077 qdev->mem_map_registers;
1085 if (qdev->mac_index)
1086 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1088 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1094 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1097 qdev->mem_map_registers;
1105 if (qdev->mac_index)
1106 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1108 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1114 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1117 qdev->mem_map_registers;
1127 if (qdev->mac_index)
1128 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1130 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1136 static int ql_is_fiber(struct ql3_adapter *qdev)
1139 qdev->mem_map_registers;
1143 switch (qdev->mac_index) {
1152 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1156 static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1159 ql_mii_read_reg(qdev, 0x00, ®);
1166 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1169 qdev->mem_map_registers;
1173 switch (qdev->mac_index) {
1182 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1184 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
1187 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
1194 static int ql_is_neg_pause(struct ql3_adapter *qdev)
1196 if (ql_is_fiber(qdev))
1197 return ql_is_petbi_neg_pause(qdev);
1199 return ql_is_phy_neg_pause(qdev);
1202 static int ql_auto_neg_error(struct ql3_adapter *qdev)
1205 qdev->mem_map_registers;
1209 switch (qdev->mac_index) {
1217 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1221 static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1223 if (ql_is_fiber(qdev))
1226 return ql_phy_get_speed(qdev);
1229 static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1231 if (ql_is_fiber(qdev))
1234 return ql_is_full_dup(qdev);
1240 static int ql_link_down_detect(struct ql3_adapter *qdev)
1243 qdev->mem_map_registers;
1247 switch (qdev->mac_index) {
1257 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1264 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1267 qdev->mem_map_registers;
1269 switch (qdev->mac_index) {
1271 ql_write_common_reg(qdev,
1278 ql_write_common_reg(qdev,
1294 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1297 qdev->mem_map_registers;
1301 switch (qdev->mac_index) {
1312 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1314 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1319 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
1323 static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1325 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1326 PHYAddr[qdev->mac_index]);
1329 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1334 if (qdev->phyType == PHY_AGERE_ET1011C)
1335 ql_mii_write_reg(qdev, 0x13, 0x0000);
1338 if (qdev->mac_index == 0)
1340 qdev->nvram_data.macCfg_port0.portConfiguration;
1343 qdev->nvram_data.macCfg_port1.portConfiguration;
1351 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®,
1352 PHYAddr[qdev->mac_index]);
1362 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1363 PHYAddr[qdev->mac_index]);
1366 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®,
1367 PHYAddr[qdev->mac_index]);
1392 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1393 PHYAddr[qdev->mac_index]);
1395 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]);
1397 ql_mii_write_reg_ex(qdev, CONTROL_REG,
1399 PHYAddr[qdev->mac_index]);
1402 static void ql_phy_init_ex(struct ql3_adapter *qdev)
1404 ql_phy_reset_ex(qdev);
1405 PHY_Setup(qdev);
1406 ql_phy_start_neg_ex(qdev);
1412 static u32 ql_get_link_state(struct ql3_adapter *qdev)
1415 qdev->mem_map_registers;
1419 switch (qdev->mac_index) {
1428 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1437 static int ql_port_start(struct ql3_adapter *qdev)
1439 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1440 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1442 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
1446 if (ql_is_fiber(qdev)) {
1447 ql_petbi_init(qdev);
1450 ql_phy_init_ex(qdev);
1453 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1457 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1460 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1461 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1465 if (!ql_auto_neg_error(qdev)) {
1466 if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1468 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1470 ql_mac_cfg_soft_reset(qdev, 1);
1471 ql_mac_cfg_gig(qdev,
1473 (qdev) ==
1475 ql_mac_cfg_full_dup(qdev,
1477 (qdev));
1478 ql_mac_cfg_pause(qdev,
1480 (qdev));
1481 ql_mac_cfg_soft_reset(qdev, 0);
1484 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1486 ql_mac_enable(qdev, 1);
1489 qdev->port_link_state = LS_UP;
1490 netif_start_queue(qdev->ndev);
1491 netif_carrier_on(qdev->ndev);
1492 netif_info(qdev, link, qdev->ndev,
1494 ql_get_link_speed(qdev),
1495 ql_is_link_full_dup(qdev) ? "full" : "half");
1499 if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1500 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1506 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1507 if (ql_port_start(qdev)) /* Restart port */
1512 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1518 struct ql3_adapter *qdev =
1524 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1526 curr_link_state = ql_get_link_state(qdev);
1528 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
1529 netif_info(qdev, link, qdev->ndev,
1532 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1535 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1540 switch (qdev->port_link_state) {
1542 if (test_bit(QL_LINK_MASTER, &qdev->flags))
1543 ql_port_start(qdev);
1544 qdev->port_link_state = LS_DOWN;
1549 netif_info(qdev, link, qdev->ndev, "Link is up\n");
1550 if (ql_is_auto_neg_complete(qdev))
1551 ql_finish_auto_neg(qdev);
1553 if (qdev->port_link_state == LS_UP)
1554 ql_link_down_detect_clear(qdev);
1556 qdev->port_link_state = LS_UP;
1566 netif_info(qdev, link, qdev->ndev, "Link is down\n");
1567 qdev->port_link_state = LS_DOWN;
1569 if (ql_link_down_detect(qdev))
1570 qdev->port_link_state = LS_DOWN;
1573 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1576 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1582 static void ql_get_phy_owner(struct ql3_adapter *qdev)
1584 if (ql_this_adapter_controls_port(qdev))
1585 set_bit(QL_LINK_MASTER, &qdev->flags);
1587 clear_bit(QL_LINK_MASTER, &qdev->flags);
1593 static void ql_init_scan_mode(struct ql3_adapter *qdev)
1595 ql_mii_enable_scan_mode(qdev);
1597 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1598 if (ql_this_adapter_controls_port(qdev))
1599 ql_petbi_init_ex(qdev);
1601 if (ql_this_adapter_controls_port(qdev))
1602 ql_phy_init_ex(qdev);
1612 static int ql_mii_setup(struct ql3_adapter *qdev)
1616 qdev->mem_map_registers;
1618 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1619 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1623 if (qdev->device_id == QL3032_DEVICE_ID)
1624 ql_write_page0_reg(qdev,
1630 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1633 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1649 static u32 ql_supported_modes(struct ql3_adapter *qdev)
1651 if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
1657 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1661 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1662 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1664 (qdev->mac_index) * 2) << 7)) {
1665 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1668 status = ql_is_auto_cfg(qdev);
1669 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1670 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1674 static u32 ql_get_speed(struct ql3_adapter *qdev)
1678 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1679 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1681 (qdev->mac_index) * 2) << 7)) {
1682 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1685 status = ql_get_link_speed(qdev);
1686 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1687 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1691 static int ql_get_full_dup(struct ql3_adapter *qdev)
1695 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1696 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1698 (qdev->mac_index) * 2) << 7)) {
1699 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1702 status = ql_is_link_full_dup(qdev);
1703 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1704 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1711 struct ql3_adapter *qdev = netdev_priv(ndev);
1714 supported = ql_supported_modes(qdev);
1716 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1720 cmd->base.phy_address = qdev->PHYAddr;
1722 advertising = ql_supported_modes(qdev);
1723 cmd->base.autoneg = ql_get_auto_cfg_status(qdev);
1724 cmd->base.speed = ql_get_speed(qdev);
1725 cmd->base.duplex = ql_get_full_dup(qdev);
1738 struct ql3_adapter *qdev = netdev_priv(ndev);
1742 strscpy(drvinfo->bus_info, pci_name(qdev->pdev),
1748 struct ql3_adapter *qdev = netdev_priv(ndev);
1749 return qdev->msg_enable;
1754 struct ql3_adapter *qdev = netdev_priv(ndev);
1755 qdev->msg_enable = value;
1761 struct ql3_adapter *qdev = netdev_priv(ndev);
1763 qdev->mem_map_registers;
1766 if (qdev->mac_index == 0)
1767 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1769 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1771 pause->autoneg = ql_get_auto_cfg_status(qdev);
1785 static int ql_populate_free_queue(struct ql3_adapter *qdev)
1787 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1794 netdev_alloc_skb(qdev->ndev,
1795 qdev->lrg_buffer_len);
1797 netdev_printk(KERN_DEBUG, qdev->ndev,
1806 map = dma_map_single(&qdev->pdev->dev,
1808 qdev->lrg_buffer_len - QL_HEADER_SPACE,
1811 err = dma_mapping_error(&qdev->pdev->dev, map);
1813 netdev_err(qdev->ndev,
1828 qdev->lrg_buffer_len -
1830 --qdev->lrg_buf_skb_check;
1831 if (!qdev->lrg_buf_skb_check)
1843 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1846 qdev->mem_map_registers;
1848 if (qdev->small_buf_release_cnt >= 16) {
1849 while (qdev->small_buf_release_cnt >= 16) {
1850 qdev->small_buf_q_producer_index++;
1852 if (qdev->small_buf_q_producer_index ==
1854 qdev->small_buf_q_producer_index = 0;
1855 qdev->small_buf_release_cnt -= 8;
1858 writel_relaxed(qdev->small_buf_q_producer_index,
1866 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1872 qdev->mem_map_registers;
1874 if ((qdev->lrg_buf_free_count >= 8) &&
1875 (qdev->lrg_buf_release_cnt >= 16)) {
1877 if (qdev->lrg_buf_skb_check)
1878 if (!ql_populate_free_queue(qdev))
1881 lrg_buf_q_ele = qdev->lrg_buf_next_free;
1883 while ((qdev->lrg_buf_release_cnt >= 16) &&
1884 (qdev->lrg_buf_free_count >= 8)) {
1888 ql_get_from_lrg_buf_free_list(qdev);
1895 qdev->lrg_buf_release_cnt--;
1898 qdev->lrg_buf_q_producer_index++;
1900 if (qdev->lrg_buf_q_producer_index ==
1901 qdev->num_lbufq_entries)
1902 qdev->lrg_buf_q_producer_index = 0;
1904 if (qdev->lrg_buf_q_producer_index ==
1905 (qdev->num_lbufq_entries - 1)) {
1906 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1910 qdev->lrg_buf_next_free = lrg_buf_q_ele;
1911 writel(qdev->lrg_buf_q_producer_index,
1916 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1923 netdev_warn(qdev->ndev,
1927 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1931 netdev_err(qdev->ndev,
1934 qdev->ndev->stats.tx_errors++;
1939 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
1942 qdev->ndev->stats.tx_errors++;
1946 dma_unmap_single(&qdev->pdev->dev,
1952 dma_unmap_page(&qdev->pdev->dev,
1958 qdev->ndev->stats.tx_packets++;
1959 qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
1966 atomic_inc(&qdev->tx_count);
1969 static void ql_get_sbuf(struct ql3_adapter *qdev)
1971 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1972 qdev->small_buf_index = 0;
1973 qdev->small_buf_release_cnt++;
1976 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
1979 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
1980 qdev->lrg_buf_release_cnt++;
1981 if (++qdev->lrg_buf_index == qdev->num_large_buffers)
1982 qdev->lrg_buf_index = 0;
1998 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2009 ql_get_sbuf(qdev);
2011 if (qdev->device_id == QL3022_DEVICE_ID)
2012 lrg_buf_cb1 = ql_get_lbuf(qdev);
2015 lrg_buf_cb2 = ql_get_lbuf(qdev);
2018 qdev->ndev->stats.rx_packets++;
2019 qdev->ndev->stats.rx_bytes += length;
2022 dma_unmap_single(&qdev->pdev->dev,
2027 skb->protocol = eth_type_trans(skb, qdev->ndev);
2029 napi_gro_receive(&qdev->napi, skb);
2032 if (qdev->device_id == QL3022_DEVICE_ID)
2033 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2034 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2037 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2043 struct net_device *ndev = qdev->ndev;
2051 ql_get_sbuf(qdev);
2053 if (qdev->device_id == QL3022_DEVICE_ID) {
2055 lrg_buf_cb1 = ql_get_lbuf(qdev);
2063 lrg_buf_cb2 = ql_get_lbuf(qdev);
2067 dma_unmap_single(&qdev->pdev->dev,
2073 if (qdev->device_id == QL3022_DEVICE_ID) {
2096 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2098 napi_gro_receive(&qdev->napi, skb2);
2103 if (qdev->device_id == QL3022_DEVICE_ID)
2104 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2105 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2108 static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget)
2111 struct net_device *ndev = qdev->ndev;
2115 while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2116 qdev->rsp_consumer_index) && (work_done < budget)) {
2118 net_rsp = qdev->rsp_current;
2124 if (qdev->device_id == QL3032_DEVICE_ID)
2130 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2136 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2143 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2161 qdev->rsp_consumer_index++;
2163 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2164 qdev->rsp_consumer_index = 0;
2165 qdev->rsp_current = qdev->rsp_q_virt_addr;
2167 qdev->rsp_current++;
2177 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2179 qdev->mem_map_registers;
2182 work_done = ql_tx_rx_clean(qdev, budget);
2187 spin_lock_irqsave(&qdev->hw_lock, flags);
2188 ql_update_small_bufq_prod_index(qdev);
2189 ql_update_lrg_bufq_prod_index(qdev);
2190 writel(qdev->rsp_consumer_index,
2192 spin_unlock_irqrestore(&qdev->hw_lock, flags);
2194 ql_enable_interrupts(qdev);
2203 struct ql3_adapter *qdev = netdev_priv(ndev);
2205 qdev->mem_map_registers;
2210 value = ql_read_common_reg_l(qdev,
2214 spin_lock(&qdev->adapter_lock);
2215 netif_stop_queue(qdev->ndev);
2216 netif_carrier_off(qdev->ndev);
2217 ql_disable_interrupts(qdev);
2218 qdev->port_link_state = LS_DOWN;
2219 set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
2226 ql_read_page0_reg_l(qdev,
2231 set_bit(QL_RESET_START, &qdev->flags) ;
2236 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
2241 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2242 spin_unlock(&qdev->adapter_lock);
2244 ql_disable_interrupts(qdev);
2245 if (likely(napi_schedule_prep(&qdev->napi)))
2246 __napi_schedule(&qdev->napi);
2260 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
2262 if (qdev->device_id == QL3022_DEVICE_ID)
2300 static int ql_send_map(struct ql3_adapter *qdev,
2318 map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
2320 err = dma_mapping_error(&qdev->pdev->dev, map);
2322 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
2356 map = dma_map_single(&qdev->pdev->dev, oal,
2360 err = dma_mapping_error(&qdev->pdev->dev, map);
2362 netdev_err(qdev->ndev,
2380 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
2383 err = dma_mapping_error(&qdev->pdev->dev, map);
2385 netdev_err(qdev->ndev,
2422 dma_unmap_single(&qdev->pdev->dev,
2430 dma_unmap_page(&qdev->pdev->dev,
2436 dma_unmap_single(&qdev->pdev->dev,
2459 struct ql3_adapter *qdev = netdev_priv(ndev);
2461 qdev->mem_map_registers;
2466 if (unlikely(atomic_read(&qdev->tx_count) < 2))
2469 tx_cb = &qdev->tx_buf[qdev->req_producer_index];
2470 tx_cb->seg_count = ql_get_seg_count(qdev,
2480 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2482 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2483 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2486 if (qdev->device_id == QL3032_DEVICE_ID &&
2490 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
2496 qdev->req_producer_index++;
2497 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2498 qdev->req_producer_index = 0;
2500 ql_write_common_reg_l(qdev,
2502 qdev->req_producer_index);
2504 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
2506 qdev->req_producer_index, skb->len);
2508 atomic_dec(&qdev->tx_count);
2512 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2514 qdev->req_q_size =
2517 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2524 qdev->req_q_virt_addr =
2525 dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
2526 &qdev->req_q_phy_addr, GFP_KERNEL);
2528 if ((qdev->req_q_virt_addr == NULL) ||
2529 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2530 netdev_err(qdev->ndev, "reqQ failed\n");
2534 qdev->rsp_q_virt_addr =
2535 dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->rsp_q_size,
2536 &qdev->rsp_q_phy_addr, GFP_KERNEL);
2538 if ((qdev->rsp_q_virt_addr == NULL) ||
2539 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2540 netdev_err(qdev->ndev, "rspQ allocation failed\n");
2541 dma_free_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
2542 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2546 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2551 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2553 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
2554 netdev_info(qdev->ndev, "Already done\n");
2558 dma_free_coherent(&qdev->pdev->dev, qdev->req_q_size,
2559 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2561 qdev->req_q_virt_addr = NULL;
2563 dma_free_coherent(&qdev->pdev->dev, qdev->rsp_q_size,
2564 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2566 qdev->rsp_q_virt_addr = NULL;
2568 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2571 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2574 qdev->lrg_buf_q_size =
2575 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2576 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2577 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2579 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2581 qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
2584 if (qdev->lrg_buf == NULL)
2587 qdev->lrg_buf_q_alloc_virt_addr =
2588 dma_alloc_coherent(&qdev->pdev->dev,
2589 qdev->lrg_buf_q_alloc_size,
2590 &qdev->lrg_buf_q_alloc_phy_addr, GFP_KERNEL);
2592 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2593 netdev_err(qdev->ndev, "lBufQ failed\n");
2594 kfree(qdev->lrg_buf);
2597 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2598 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2601 qdev->small_buf_q_size =
2603 if (qdev->small_buf_q_size < PAGE_SIZE)
2604 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2606 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2608 qdev->small_buf_q_alloc_virt_addr =
2609 dma_alloc_coherent(&qdev->pdev->dev,
2610 qdev->small_buf_q_alloc_size,
2611 &qdev->small_buf_q_alloc_phy_addr, GFP_KERNEL);
2613 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2614 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
2615 dma_free_coherent(&qdev->pdev->dev,
2616 qdev->lrg_buf_q_alloc_size,
2617 qdev->lrg_buf_q_alloc_virt_addr,
2618 qdev->lrg_buf_q_alloc_phy_addr);
2619 kfree(qdev->lrg_buf);
2623 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2624 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2625 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2629 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2631 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
2632 netdev_info(qdev->ndev, "Already done\n");
2635 kfree(qdev->lrg_buf);
2636 dma_free_coherent(&qdev->pdev->dev, qdev->lrg_buf_q_alloc_size,
2637 qdev->lrg_buf_q_alloc_virt_addr,
2638 qdev->lrg_buf_q_alloc_phy_addr);
2640 qdev->lrg_buf_q_virt_addr = NULL;
2642 dma_free_coherent(&qdev->pdev->dev, qdev->small_buf_q_alloc_size,
2643 qdev->small_buf_q_alloc_virt_addr,
2644 qdev->small_buf_q_alloc_phy_addr);
2646 qdev->small_buf_q_virt_addr = NULL;
2648 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2651 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2657 qdev->small_buf_total_size =
2661 qdev->small_buf_virt_addr =
2662 dma_alloc_coherent(&qdev->pdev->dev,
2663 qdev->small_buf_total_size,
2664 &qdev->small_buf_phy_addr, GFP_KERNEL);
2666 if (qdev->small_buf_virt_addr == NULL) {
2667 netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
2671 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2672 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2674 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2679 cpu_to_le32(qdev->small_buf_phy_addr_high);
2681 cpu_to_le32(qdev->small_buf_phy_addr_low +
2685 qdev->small_buf_index = 0;
2686 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
2690 static void ql_free_small_buffers(struct ql3_adapter *qdev)
2692 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
2693 netdev_info(qdev->ndev, "Already done\n");
2696 if (qdev->small_buf_virt_addr != NULL) {
2697 dma_free_coherent(&qdev->pdev->dev,
2698 qdev->small_buf_total_size,
2699 qdev->small_buf_virt_addr,
2700 qdev->small_buf_phy_addr);
2702 qdev->small_buf_virt_addr = NULL;
2706 static void ql_free_large_buffers(struct ql3_adapter *qdev)
2711 for (i = 0; i < qdev->num_large_buffers; i++) {
2712 lrg_buf_cb = &qdev->lrg_buf[i];
2715 dma_unmap_single(&qdev->pdev->dev,
2726 static void ql_init_large_buffers(struct ql3_adapter *qdev)
2730 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2732 for (i = 0; i < qdev->num_large_buffers; i++) {
2733 lrg_buf_cb = &qdev->lrg_buf[i];
2738 qdev->lrg_buf_index = 0;
2739 qdev->lrg_buf_skb_check = 0;
2742 static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2750 for (i = 0; i < qdev->num_large_buffers; i++) {
2751 lrg_buf_cb = &qdev->lrg_buf[i];
2754 skb = netdev_alloc_skb(qdev->ndev,
2755 qdev->lrg_buffer_len);
2758 netdev_err(qdev->ndev,
2760 qdev->lrg_buffer_len * 2, i);
2761 ql_free_large_buffers(qdev);
2770 map = dma_map_single(&qdev->pdev->dev, skb->data,
2771 qdev->lrg_buffer_len - QL_HEADER_SPACE,
2774 err = dma_mapping_error(&qdev->pdev->dev, map);
2776 netdev_err(qdev->ndev,
2780 ql_free_large_buffers(qdev);
2787 qdev->lrg_buffer_len -
2798 static void ql_free_send_free_list(struct ql3_adapter *qdev)
2803 tx_cb = &qdev->tx_buf[0];
2811 static int ql_create_send_free_list(struct ql3_adapter *qdev)
2815 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
2820 tx_cb = &qdev->tx_buf[i];
2831 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2833 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2834 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2835 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2836 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2840 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2841 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2843 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n",
2844 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
2847 qdev->num_large_buffers =
2848 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2849 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2850 qdev->max_frame_size =
2851 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2858 qdev->shadow_reg_virt_addr =
2859 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2860 &qdev->shadow_reg_phy_addr, GFP_KERNEL);
2862 if (qdev->shadow_reg_virt_addr != NULL) {
2863 qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
2864 qdev->req_consumer_index_phy_addr_high =
2865 MS_64BITS(qdev->shadow_reg_phy_addr);
2866 qdev->req_consumer_index_phy_addr_low =
2867 LS_64BITS(qdev->shadow_reg_phy_addr);
2869 qdev->prsp_producer_index =
2870 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2871 qdev->rsp_producer_index_phy_addr_high =
2872 qdev->req_consumer_index_phy_addr_high;
2873 qdev->rsp_producer_index_phy_addr_low =
2874 qdev->req_consumer_index_phy_addr_low + 8;
2876 netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
2880 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2881 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
2885 if (ql_alloc_buffer_queues(qdev) != 0) {
2886 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
2890 if (ql_alloc_small_buffers(qdev) != 0) {
2891 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
2895 if (ql_alloc_large_buffers(qdev) != 0) {
2896 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
2901 ql_init_large_buffers(qdev);
2902 if (ql_create_send_free_list(qdev))
2905 qdev->rsp_current = qdev->rsp_q_virt_addr;
2909 ql_free_send_free_list(qdev);
2911 ql_free_buffer_queues(qdev);
2913 ql_free_net_req_rsp_queues(qdev);
2915 dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
2916 qdev->shadow_reg_virt_addr,
2917 qdev->shadow_reg_phy_addr);
2922 static void ql_free_mem_resources(struct ql3_adapter *qdev)
2924 ql_free_send_free_list(qdev);
2925 ql_free_large_buffers(qdev);
2926 ql_free_small_buffers(qdev);
2927 ql_free_buffer_queues(qdev);
2928 ql_free_net_req_rsp_queues(qdev);
2929 if (qdev->shadow_reg_virt_addr != NULL) {
2930 dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
2931 qdev->shadow_reg_virt_addr,
2932 qdev->shadow_reg_phy_addr);
2933 qdev->shadow_reg_virt_addr = NULL;
2937 static int ql_init_misc_registers(struct ql3_adapter *qdev)
2940 (void __iomem *)qdev->mem_map_registers;
2942 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2943 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2947 ql_write_page2_reg(qdev,
2948 &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2950 ql_write_page2_reg(qdev,
2952 qdev->nvram_data.bufletCount);
2954 ql_write_page2_reg(qdev,
2956 (qdev->nvram_data.tcpWindowThreshold25 << 16) |
2957 (qdev->nvram_data.tcpWindowThreshold0));
2959 ql_write_page2_reg(qdev,
2961 qdev->nvram_data.tcpWindowThreshold50);
2963 ql_write_page2_reg(qdev,
2965 (qdev->nvram_data.ipHashTableBaseHi << 16) |
2966 qdev->nvram_data.ipHashTableBaseLo);
2967 ql_write_page2_reg(qdev,
2969 qdev->nvram_data.ipHashTableSize);
2970 ql_write_page2_reg(qdev,
2972 (qdev->nvram_data.tcpHashTableBaseHi << 16) |
2973 qdev->nvram_data.tcpHashTableBaseLo);
2974 ql_write_page2_reg(qdev,
2976 qdev->nvram_data.tcpHashTableSize);
2977 ql_write_page2_reg(qdev,
2979 (qdev->nvram_data.ncbTableBaseHi << 16) |
2980 qdev->nvram_data.ncbTableBaseLo);
2981 ql_write_page2_reg(qdev,
2983 qdev->nvram_data.ncbTableSize);
2984 ql_write_page2_reg(qdev,
2986 (qdev->nvram_data.drbTableBaseHi << 16) |
2987 qdev->nvram_data.drbTableBaseLo);
2988 ql_write_page2_reg(qdev,
2990 qdev->nvram_data.drbTableSize);
2991 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
2995 static int ql_adapter_initialize(struct ql3_adapter *qdev)
2999 qdev->mem_map_registers;
3006 if (ql_mii_setup(qdev))
3010 ql_write_common_reg(qdev, spir,
3015 qdev->port_link_state = LS_DOWN;
3016 netif_carrier_off(qdev->ndev);
3019 ql_write_common_reg(qdev, spir,
3024 *((u32 *)(qdev->preq_consumer_index)) = 0;
3025 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
3026 qdev->req_producer_index = 0;
3028 ql_write_page1_reg(qdev,
3030 qdev->req_consumer_index_phy_addr_high);
3031 ql_write_page1_reg(qdev,
3033 qdev->req_consumer_index_phy_addr_low);
3035 ql_write_page1_reg(qdev,
3037 MS_64BITS(qdev->req_q_phy_addr));
3038 ql_write_page1_reg(qdev,
3040 LS_64BITS(qdev->req_q_phy_addr));
3041 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3044 *((__le16 *) (qdev->prsp_producer_index)) = 0;
3045 qdev->rsp_consumer_index = 0;
3046 qdev->rsp_current = qdev->rsp_q_virt_addr;
3048 ql_write_page1_reg(qdev,
3050 qdev->rsp_producer_index_phy_addr_high);
3052 ql_write_page1_reg(qdev,
3054 qdev->rsp_producer_index_phy_addr_low);
3056 ql_write_page1_reg(qdev,
3058 MS_64BITS(qdev->rsp_q_phy_addr));
3060 ql_write_page1_reg(qdev,
3062 LS_64BITS(qdev->rsp_q_phy_addr));
3064 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3067 ql_write_page1_reg(qdev,
3069 MS_64BITS(qdev->lrg_buf_q_phy_addr));
3071 ql_write_page1_reg(qdev,
3073 LS_64BITS(qdev->lrg_buf_q_phy_addr));
3075 ql_write_page1_reg(qdev,
3077 qdev->num_lbufq_entries);
3079 ql_write_page1_reg(qdev,
3081 qdev->lrg_buffer_len);
3084 ql_write_page1_reg(qdev,
3086 MS_64BITS(qdev->small_buf_q_phy_addr));
3088 ql_write_page1_reg(qdev,
3090 LS_64BITS(qdev->small_buf_q_phy_addr));
3092 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3093 ql_write_page1_reg(qdev,
3097 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3098 qdev->small_buf_release_cnt = 8;
3099 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3100 qdev->lrg_buf_release_cnt = 8;
3101 qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
3102 qdev->small_buf_index = 0;
3103 qdev->lrg_buf_index = 0;
3104 qdev->lrg_buf_free_count = 0;
3105 qdev->lrg_buf_free_head = NULL;
3106 qdev->lrg_buf_free_tail = NULL;
3108 ql_write_common_reg(qdev,
3111 qdev->small_buf_q_producer_index);
3112 ql_write_common_reg(qdev,
3115 qdev->lrg_buf_q_producer_index);
3121 clear_bit(QL_LINK_MASTER, &qdev->flags);
3122 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3126 if (ql_init_misc_registers(qdev)) {
3131 value = qdev->nvram_data.tcpMaxWindowSize;
3132 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3134 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3136 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3137 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3142 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3143 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3147 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3150 if (qdev->mac_index)
3151 ql_write_page0_reg(qdev,
3153 qdev->max_frame_size);
3155 ql_write_page0_reg(qdev,
3157 qdev->max_frame_size);
3159 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3160 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3166 PHY_Setup(qdev);
3167 ql_init_scan_mode(qdev);
3168 ql_get_phy_owner(qdev);
3173 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3175 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3176 ((qdev->ndev->dev_addr[2] << 24)
3177 | (qdev->ndev->dev_addr[3] << 16)
3178 | (qdev->ndev->dev_addr[4] << 8)
3179 | qdev->ndev->dev_addr[5]));
3182 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3184 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3185 ((qdev->ndev->dev_addr[0] << 8)
3186 | qdev->ndev->dev_addr[1]));
3189 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3194 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3196 (qdev->mac_index << 2)));
3197 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3199 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3201 ((qdev->mac_index << 2) + 1)));
3202 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3204 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3207 ql_write_page0_reg(qdev,
3212 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3215 spin_unlock_irq(&qdev->hw_lock);
3217 spin_lock_irq(&qdev->hw_lock);
3221 netdev_err(qdev->ndev, "Hw Initialization timeout\n");
3227 if (qdev->device_id == QL3032_DEVICE_ID) {
3232 ql_write_page0_reg(qdev, &port_regs->functionControl,
3238 ql_write_page0_reg(qdev, &port_regs->portControl,
3250 static int ql_adapter_reset(struct ql3_adapter *qdev)
3253 qdev->mem_map_registers;
3258 set_bit(QL_RESET_ACTIVE, &qdev->flags);
3259 clear_bit(QL_RESET_DONE, &qdev->flags);
3264 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
3265 ql_write_common_reg(qdev,
3270 netdev_printk(KERN_DEBUG, qdev->ndev,
3277 ql_read_common_reg(qdev,
3290 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3292 netdev_printk(KERN_DEBUG, qdev->ndev,
3294 ql_write_common_reg(qdev,
3302 ql_write_common_reg(qdev,
3313 value = ql_read_common_reg(qdev,
3324 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3325 set_bit(QL_RESET_DONE, &qdev->flags);
3329 static void ql_set_mac_info(struct ql3_adapter *qdev)
3332 qdev->mem_map_registers;
3338 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3340 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3343 qdev->mac_index = 0;
3344 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3345 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3346 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3348 set_bit(QL_LINK_OPTICAL, &qdev->flags);
3350 clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3354 qdev->mac_index = 1;
3355 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3356 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3357 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3359 set_bit(QL_LINK_OPTICAL, &qdev->flags);
3361 clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3367 netdev_printk(KERN_DEBUG, qdev->ndev,
3372 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
3377 struct ql3_adapter *qdev = netdev_priv(ndev);
3378 struct pci_dev *pdev = qdev->pdev;
3382 DRV_NAME, qdev->index, qdev->chip_rev_id,
3383 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
3384 qdev->pci_slot);
3386 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
3392 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3393 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3396 qdev->mem_map_registers);
3399 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
3402 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3404 struct net_device *ndev = qdev->ndev;
3410 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3411 clear_bit(QL_LINK_MASTER, &qdev->flags);
3413 ql_disable_interrupts(qdev);
3415 free_irq(qdev->pdev->irq, ndev);
3417 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3418 netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
3419 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3420 pci_disable_msi(qdev->pdev);
3423 del_timer_sync(&qdev->adapter_timer);
3425 napi_disable(&qdev->napi);
3431 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3432 if (ql_wait_for_drvr_lock(qdev)) {
3433 soft_reset = ql_adapter_reset(qdev);
3436 qdev->index);
3445 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3447 ql_free_mem_resources(qdev);
3451 static int ql_adapter_up(struct ql3_adapter *qdev)
3453 struct net_device *ndev = qdev->ndev;
3458 if (ql_alloc_mem_resources(qdev)) {
3463 if (qdev->msi) {
3464 if (pci_enable_msi(qdev->pdev)) {
3467 qdev->msi = 0;
3470 set_bit(QL_MSI_ENABLED, &qdev->flags);
3475 err = request_irq(qdev->pdev->irq, ql3xxx_isr,
3480 qdev->pdev->irq);
3484 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3486 if (!ql_wait_for_drvr_lock(qdev)) {
3492 err = ql_adapter_initialize(qdev);
3497 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3499 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3501 set_bit(QL_ADAPTER_UP, &qdev->flags);
3503 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3505 napi_enable(&qdev->napi);
3506 ql_enable_interrupts(qdev);
3510 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3512 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3513 free_irq(qdev->pdev->irq, ndev);
3515 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3517 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3518 pci_disable_msi(qdev->pdev);
3523 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3525 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
3526 netdev_err(qdev->ndev,
3529 dev_close(qdev->ndev);
3538 struct ql3_adapter *qdev = netdev_priv(ndev);
3544 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3547 ql_adapter_down(qdev, QL_DO_RESET);
3553 struct ql3_adapter *qdev = netdev_priv(ndev);
3554 return ql_adapter_up(qdev);
3559 struct ql3_adapter *qdev = netdev_priv(ndev);
3561 qdev->mem_map_registers;
3573 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3575 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3577 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3583 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3585 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3587 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3594 struct ql3_adapter *qdev = netdev_priv(ndev);
3605 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3610 struct ql3_adapter *qdev =
3612 struct net_device *ndev = qdev->ndev;
3617 qdev->mem_map_registers;
3620 if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
3621 test_bit(QL_RESET_START, &qdev->flags)) {
3622 clear_bit(QL_LINK_MASTER, &qdev->flags);
3629 tx_cb = &qdev->tx_buf[i];
3633 dma_unmap_single(&qdev->pdev->dev,
3638 dma_unmap_page(&qdev->pdev->dev,
3649 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3650 ql_write_common_reg(qdev,
3659 value = ql_read_common_reg(qdev,
3672 ql_write_common_reg(qdev,
3680 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3682 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3684 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3695 clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3696 clear_bit(QL_RESET_START, &qdev->flags);
3697 ql_cycle_adapter(qdev, QL_DO_RESET);
3701 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3702 clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3703 clear_bit(QL_RESET_START, &qdev->flags);
3704 ql_cycle_adapter(qdev, QL_NO_RESET);
3710 struct ql3_adapter *qdev =
3713 ql_cycle_adapter(qdev, QL_DO_RESET);
3716 static void ql_get_board_info(struct ql3_adapter *qdev)
3719 qdev->mem_map_registers;
3722 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3724 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3726 qdev->pci_width = 64;
3728 qdev->pci_width = 32;
3730 qdev->pci_x = 1;
3732 qdev->pci_x = 0;
3733 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3738 struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
3739 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
3755 struct ql3_adapter *qdev = NULL;
3789 qdev = netdev_priv(ndev);
3790 qdev->index = cards_found;
3791 qdev->ndev = ndev;
3792 qdev->pdev = pdev;
3793 qdev->device_id = pci_entry->device;
3794 qdev->port_link_state = LS_DOWN;
3796 qdev->msi = 1;
3798 qdev->msg_enable = netif_msg_init(debug, default_msg);
3801 if (qdev->device_id == QL3032_DEVICE_ID)
3804 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3805 if (!qdev->mem_map_registers) {
3811 spin_lock_init(&qdev->adapter_lock);
3812 spin_lock_init(&qdev->hw_lock);
3819 netif_napi_add(ndev, &qdev->napi, ql_poll);
3824 if (ql_get_nvram_params(qdev)) {
3826 __func__, qdev->index);
3831 ql_set_mac_info(qdev);
3834 if (qdev->mac_index) {
3835 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3836 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
3838 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3839 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
3845 ql_get_board_info(qdev);
3851 if (qdev->pci_x)
3865 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3866 if (!qdev->workqueue) {
3872 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
3873 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
3874 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
3876 timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0);
3877 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
3890 iounmap(qdev->mem_map_registers);
3904 struct ql3_adapter *qdev = netdev_priv(ndev);
3908 ql_disable_interrupts(qdev);
3910 if (qdev->workqueue) {
3911 cancel_delayed_work(&qdev->reset_work);
3912 cancel_delayed_work(&qdev->tx_timeout_work);
3913 destroy_workqueue(qdev->workqueue);
3914 qdev->workqueue = NULL;
3917 iounmap(qdev->mem_map_registers);