Lines Matching refs:qdev

103 static int ql_sem_spinlock(struct ql3_adapter *qdev,
107 qdev->mem_map_registers;
122 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
125 qdev->mem_map_registers;
130 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
133 qdev->mem_map_registers;
144 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
149 if (ql_sem_lock(qdev,
151 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
153 netdev_printk(KERN_DEBUG, qdev->ndev,
160 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
164 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
167 qdev->mem_map_registers;
172 qdev->current_page = page;
175 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
180 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
182 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
187 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
192 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
197 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
199 if (qdev->current_page != 0)
200 ql_set_register_page(qdev, 0);
203 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
207 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
209 if (qdev->current_page != 0)
210 ql_set_register_page(qdev, 0);
214 static void ql_write_common_reg_l(struct ql3_adapter *qdev,
219 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
222 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
225 static void ql_write_common_reg(struct ql3_adapter *qdev,
232 static void ql_write_nvram_reg(struct ql3_adapter *qdev,
240 static void ql_write_page0_reg(struct ql3_adapter *qdev,
243 if (qdev->current_page != 0)
244 ql_set_register_page(qdev, 0);
252 static void ql_write_page1_reg(struct ql3_adapter *qdev,
255 if (qdev->current_page != 1)
256 ql_set_register_page(qdev, 1);
264 static void ql_write_page2_reg(struct ql3_adapter *qdev,
267 if (qdev->current_page != 2)
268 ql_set_register_page(qdev, 2);
273 static void ql_disable_interrupts(struct ql3_adapter *qdev)
276 qdev->mem_map_registers;
278 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
283 static void ql_enable_interrupts(struct ql3_adapter *qdev)
286 qdev->mem_map_registers;
288 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
293 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
300 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
301 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
303 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
304 qdev->lrg_buf_free_tail = lrg_buf_cb;
308 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
309 qdev->lrg_buffer_len);
311 qdev->lrg_buf_skb_check++;
318 map = dma_map_single(&qdev->pdev->dev,
320 qdev->lrg_buffer_len - QL_HEADER_SPACE,
322 err = dma_mapping_error(&qdev->pdev->dev, map);
324 netdev_err(qdev->ndev,
330 qdev->lrg_buf_skb_check++;
340 qdev->lrg_buffer_len -
345 qdev->lrg_buf_free_count++;
349 *qdev)
351 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
354 qdev->lrg_buf_free_head = lrg_buf_cb->next;
355 if (qdev->lrg_buf_free_head == NULL)
356 qdev->lrg_buf_free_tail = NULL;
357 qdev->lrg_buf_free_count--;
366 static void fm93c56a_deselect(struct ql3_adapter *qdev);
367 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
373 static void fm93c56a_select(struct ql3_adapter *qdev)
376 qdev->mem_map_registers;
379 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
380 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
386 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
393 qdev->mem_map_registers;
397 ql_write_nvram_reg(qdev, spir,
398 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
400 ql_write_nvram_reg(qdev, spir,
401 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
403 ql_write_nvram_reg(qdev, spir,
404 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
416 ql_write_nvram_reg(qdev, spir,
418 qdev->eeprom_cmd_data | dataBit));
421 ql_write_nvram_reg(qdev, spir,
422 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
424 ql_write_nvram_reg(qdev, spir,
425 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
441 ql_write_nvram_reg(qdev, spir,
443 qdev->eeprom_cmd_data | dataBit));
446 ql_write_nvram_reg(qdev, spir,
447 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
449 ql_write_nvram_reg(qdev, spir,
450 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
459 static void fm93c56a_deselect(struct ql3_adapter *qdev)
462 qdev->mem_map_registers;
465 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
466 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
472 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
478 qdev->mem_map_registers;
484 ql_write_nvram_reg(qdev, spir,
485 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
487 ql_write_nvram_reg(qdev, spir,
488 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
490 dataBit = (ql_read_common_reg(qdev, spir) &
500 static void eeprom_readword(struct ql3_adapter *qdev,
503 fm93c56a_select(qdev);
504 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
505 fm93c56a_datain(qdev, value);
506 fm93c56a_deselect(qdev);
517 static int ql_get_nvram_params(struct ql3_adapter *qdev)
524 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
526 pEEPROMData = (u16 *)&qdev->nvram_data;
527 qdev->eeprom_cmd_data = 0;
528 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
529 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
532 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
537 eeprom_readword(qdev, index, pEEPROMData);
541 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
544 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
546 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
550 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
558 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
561 qdev->mem_map_registers;
566 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
575 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
578 qdev->mem_map_registers;
581 if (qdev->numPorts > 1) {
594 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
597 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
602 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
606 qdev->mem_map_registers;
609 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
622 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
625 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
632 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
636 qdev->mem_map_registers;
639 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
641 if (ql_wait_for_mii_ready(qdev)) {
642 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
646 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
649 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
652 if (ql_wait_for_mii_ready(qdev)) {
653 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
658 ql_mii_enable_scan_mode(qdev);
663 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
667 qdev->mem_map_registers;
671 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
673 if (ql_wait_for_mii_ready(qdev)) {
674 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
678 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
681 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
684 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
688 if (ql_wait_for_mii_ready(qdev)) {
689 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
693 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
697 ql_mii_enable_scan_mode(qdev);
702 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
705 qdev->mem_map_registers;
707 ql_mii_disable_scan_mode(qdev);
709 if (ql_wait_for_mii_ready(qdev)) {
710 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
714 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
715 qdev->PHYAddr | regAddr);
717 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
720 if (ql_wait_for_mii_ready(qdev)) {
721 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
725 ql_mii_enable_scan_mode(qdev);
730 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
734 qdev->mem_map_registers;
736 ql_mii_disable_scan_mode(qdev);
738 if (ql_wait_for_mii_ready(qdev)) {
739 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
743 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
744 qdev->PHYAddr | regAddr);
746 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
749 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
753 if (ql_wait_for_mii_ready(qdev)) {
754 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
758 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
761 ql_mii_enable_scan_mode(qdev);
766 static void ql_petbi_reset(struct ql3_adapter *qdev)
768 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
771 static void ql_petbi_start_neg(struct ql3_adapter *qdev)
776 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
778 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
780 ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
783 ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
789 static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
791 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
792 PHYAddr[qdev->mac_index]);
795 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
800 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
801 PHYAddr[qdev->mac_index]);
803 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
804 PHYAddr[qdev->mac_index]);
806 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
808 PHYAddr[qdev->mac_index]);
810 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
813 PHYAddr[qdev->mac_index]);
816 static void ql_petbi_init(struct ql3_adapter *qdev)
818 ql_petbi_reset(qdev);
819 ql_petbi_start_neg(qdev);
822 static void ql_petbi_init_ex(struct ql3_adapter *qdev)
824 ql_petbi_reset_ex(qdev);
825 ql_petbi_start_neg_ex(qdev);
828 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
832 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
838 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
840 netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
842 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
844 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
846 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
848 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
850 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
852 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
854 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
856 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
858 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
860 ql_mii_write_reg_ex(qdev, 0x11,
861 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
867 ql_mii_write_reg(qdev, 0x12, 0x840a);
868 ql_mii_write_reg(qdev, 0x00, 0x1140);
869 ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
872 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
895 netdev_info(qdev->ndev, "Phy: %s\n",
905 static int ql_phy_get_speed(struct ql3_adapter *qdev)
909 switch (qdev->phyType) {
911 if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
918 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
936 static int ql_is_full_dup(struct ql3_adapter *qdev)
940 switch (qdev->phyType) {
942 if (ql_mii_read_reg(qdev, 0x1A, &reg))
949 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
956 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
960 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
966 static int PHY_Setup(struct ql3_adapter *qdev)
975 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
977 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
981 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
983 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
992 if (qdev->mac_index == 0)
997 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
999 netdev_err(qdev->ndev,
1004 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1006 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
1016 qdev->phyType = getPhyType(qdev, reg1, reg2);
1018 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1020 phyAgereSpecificInit(qdev, miiAddr);
1021 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1022 netdev_err(qdev->ndev, "PHY is unknown\n");
1032 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1035 qdev->mem_map_registers;
1043 if (qdev->mac_index)
1044 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1046 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1052 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1055 qdev->mem_map_registers;
1063 if (qdev->mac_index)
1064 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1066 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1072 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1075 qdev->mem_map_registers;
1083 if (qdev->mac_index)
1084 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1086 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1092 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1095 qdev->mem_map_registers;
1103 if (qdev->mac_index)
1104 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1106 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1112 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1115 qdev->mem_map_registers;
1125 if (qdev->mac_index)
1126 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1128 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1134 static int ql_is_fiber(struct ql3_adapter *qdev)
1137 qdev->mem_map_registers;
1141 switch (qdev->mac_index) {
1150 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1154 static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1157 ql_mii_read_reg(qdev, 0x00, &reg);
1164 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1167 qdev->mem_map_registers;
1171 switch (qdev->mac_index) {
1180 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1182 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
1185 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
1192 static int ql_is_neg_pause(struct ql3_adapter *qdev)
1194 if (ql_is_fiber(qdev))
1195 return ql_is_petbi_neg_pause(qdev);
1197 return ql_is_phy_neg_pause(qdev);
1200 static int ql_auto_neg_error(struct ql3_adapter *qdev)
1203 qdev->mem_map_registers;
1207 switch (qdev->mac_index) {
1215 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1219 static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1221 if (ql_is_fiber(qdev))
1224 return ql_phy_get_speed(qdev);
1227 static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1229 if (ql_is_fiber(qdev))
1232 return ql_is_full_dup(qdev);
1238 static int ql_link_down_detect(struct ql3_adapter *qdev)
1241 qdev->mem_map_registers;
1245 switch (qdev->mac_index) {
1255 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1262 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1265 qdev->mem_map_registers;
1267 switch (qdev->mac_index) {
1269 ql_write_common_reg(qdev,
1276 ql_write_common_reg(qdev,
1292 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1295 qdev->mem_map_registers;
1299 switch (qdev->mac_index) {
1310 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1312 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1317 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
1321 static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1323 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1324 PHYAddr[qdev->mac_index]);
1327 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1332 if (qdev->phyType == PHY_AGERE_ET1011C)
1333 ql_mii_write_reg(qdev, 0x13, 0x0000);
1336 if (qdev->mac_index == 0)
1338 qdev->nvram_data.macCfg_port0.portConfiguration;
1341 qdev->nvram_data.macCfg_port1.portConfiguration;
1349 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
1350 PHYAddr[qdev->mac_index]);
1360 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1361 PHYAddr[qdev->mac_index]);
1364 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
1365 PHYAddr[qdev->mac_index]);
1390 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1391 PHYAddr[qdev->mac_index]);
1393 ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
1395 ql_mii_write_reg_ex(qdev, CONTROL_REG,
1397 PHYAddr[qdev->mac_index]);
1400 static void ql_phy_init_ex(struct ql3_adapter *qdev)
1402 ql_phy_reset_ex(qdev);
1403 PHY_Setup(qdev);
1404 ql_phy_start_neg_ex(qdev);
1410 static u32 ql_get_link_state(struct ql3_adapter *qdev)
1413 qdev->mem_map_registers;
1417 switch (qdev->mac_index) {
1426 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1435 static int ql_port_start(struct ql3_adapter *qdev)
1437 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1438 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1440 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
1444 if (ql_is_fiber(qdev)) {
1445 ql_petbi_init(qdev);
1448 ql_phy_init_ex(qdev);
1451 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1455 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1458 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1459 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1463 if (!ql_auto_neg_error(qdev)) {
1464 if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1466 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1468 ql_mac_cfg_soft_reset(qdev, 1);
1469 ql_mac_cfg_gig(qdev,
1471 (qdev) ==
1473 ql_mac_cfg_full_dup(qdev,
1475 (qdev));
1476 ql_mac_cfg_pause(qdev,
1478 (qdev));
1479 ql_mac_cfg_soft_reset(qdev, 0);
1482 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1484 ql_mac_enable(qdev, 1);
1487 qdev->port_link_state = LS_UP;
1488 netif_start_queue(qdev->ndev);
1489 netif_carrier_on(qdev->ndev);
1490 netif_info(qdev, link, qdev->ndev,
1492 ql_get_link_speed(qdev),
1493 ql_is_link_full_dup(qdev) ? "full" : "half");
1497 if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1498 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1504 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1505 if (ql_port_start(qdev)) /* Restart port */
1510 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1516 struct ql3_adapter *qdev =
1522 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1524 curr_link_state = ql_get_link_state(qdev);
1526 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
1527 netif_info(qdev, link, qdev->ndev,
1530 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1533 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1538 switch (qdev->port_link_state) {
1540 if (test_bit(QL_LINK_MASTER, &qdev->flags))
1541 ql_port_start(qdev);
1542 qdev->port_link_state = LS_DOWN;
1547 netif_info(qdev, link, qdev->ndev, "Link is up\n");
1548 if (ql_is_auto_neg_complete(qdev))
1549 ql_finish_auto_neg(qdev);
1551 if (qdev->port_link_state == LS_UP)
1552 ql_link_down_detect_clear(qdev);
1554 qdev->port_link_state = LS_UP;
1564 netif_info(qdev, link, qdev->ndev, "Link is down\n");
1565 qdev->port_link_state = LS_DOWN;
1567 if (ql_link_down_detect(qdev))
1568 qdev->port_link_state = LS_DOWN;
1571 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1574 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1580 static void ql_get_phy_owner(struct ql3_adapter *qdev)
1582 if (ql_this_adapter_controls_port(qdev))
1583 set_bit(QL_LINK_MASTER, &qdev->flags);
1585 clear_bit(QL_LINK_MASTER, &qdev->flags);
1591 static void ql_init_scan_mode(struct ql3_adapter *qdev)
1593 ql_mii_enable_scan_mode(qdev);
1595 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1596 if (ql_this_adapter_controls_port(qdev))
1597 ql_petbi_init_ex(qdev);
1599 if (ql_this_adapter_controls_port(qdev))
1600 ql_phy_init_ex(qdev);
1610 static int ql_mii_setup(struct ql3_adapter *qdev)
1614 qdev->mem_map_registers;
1616 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1617 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1621 if (qdev->device_id == QL3032_DEVICE_ID)
1622 ql_write_page0_reg(qdev,
1628 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1631 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1647 static u32 ql_supported_modes(struct ql3_adapter *qdev)
1649 if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
1655 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1659 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1660 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1662 (qdev->mac_index) * 2) << 7)) {
1663 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1666 status = ql_is_auto_cfg(qdev);
1667 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1668 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1672 static u32 ql_get_speed(struct ql3_adapter *qdev)
1676 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1677 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1679 (qdev->mac_index) * 2) << 7)) {
1680 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1683 status = ql_get_link_speed(qdev);
1684 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1685 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1689 static int ql_get_full_dup(struct ql3_adapter *qdev)
1693 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1694 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1696 (qdev->mac_index) * 2) << 7)) {
1697 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1700 status = ql_is_link_full_dup(qdev);
1701 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1702 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1709 struct ql3_adapter *qdev = netdev_priv(ndev);
1712 supported = ql_supported_modes(qdev);
1714 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1718 cmd->base.phy_address = qdev->PHYAddr;
1720 advertising = ql_supported_modes(qdev);
1721 cmd->base.autoneg = ql_get_auto_cfg_status(qdev);
1722 cmd->base.speed = ql_get_speed(qdev);
1723 cmd->base.duplex = ql_get_full_dup(qdev);
1736 struct ql3_adapter *qdev = netdev_priv(ndev);
1740 strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
1746 struct ql3_adapter *qdev = netdev_priv(ndev);
1747 return qdev->msg_enable;
1752 struct ql3_adapter *qdev = netdev_priv(ndev);
1753 qdev->msg_enable = value;
1759 struct ql3_adapter *qdev = netdev_priv(ndev);
1761 qdev->mem_map_registers;
1764 if (qdev->mac_index == 0)
1765 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1767 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1769 pause->autoneg = ql_get_auto_cfg_status(qdev);
1783 static int ql_populate_free_queue(struct ql3_adapter *qdev)
1785 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1792 netdev_alloc_skb(qdev->ndev,
1793 qdev->lrg_buffer_len);
1795 netdev_printk(KERN_DEBUG, qdev->ndev,
1804 map = dma_map_single(&qdev->pdev->dev,
1806 qdev->lrg_buffer_len - QL_HEADER_SPACE,
1809 err = dma_mapping_error(&qdev->pdev->dev, map);
1811 netdev_err(qdev->ndev,
1826 qdev->lrg_buffer_len -
1828 --qdev->lrg_buf_skb_check;
1829 if (!qdev->lrg_buf_skb_check)
1841 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1844 qdev->mem_map_registers;
1846 if (qdev->small_buf_release_cnt >= 16) {
1847 while (qdev->small_buf_release_cnt >= 16) {
1848 qdev->small_buf_q_producer_index++;
1850 if (qdev->small_buf_q_producer_index ==
1852 qdev->small_buf_q_producer_index = 0;
1853 qdev->small_buf_release_cnt -= 8;
1856 writel_relaxed(qdev->small_buf_q_producer_index,
1864 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1870 qdev->mem_map_registers;
1872 if ((qdev->lrg_buf_free_count >= 8) &&
1873 (qdev->lrg_buf_release_cnt >= 16)) {
1875 if (qdev->lrg_buf_skb_check)
1876 if (!ql_populate_free_queue(qdev))
1879 lrg_buf_q_ele = qdev->lrg_buf_next_free;
1881 while ((qdev->lrg_buf_release_cnt >= 16) &&
1882 (qdev->lrg_buf_free_count >= 8)) {
1886 ql_get_from_lrg_buf_free_list(qdev);
1893 qdev->lrg_buf_release_cnt--;
1896 qdev->lrg_buf_q_producer_index++;
1898 if (qdev->lrg_buf_q_producer_index ==
1899 qdev->num_lbufq_entries)
1900 qdev->lrg_buf_q_producer_index = 0;
1902 if (qdev->lrg_buf_q_producer_index ==
1903 (qdev->num_lbufq_entries - 1)) {
1904 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1908 qdev->lrg_buf_next_free = lrg_buf_q_ele;
1909 writel(qdev->lrg_buf_q_producer_index,
1914 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1921 netdev_warn(qdev->ndev,
1925 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1929 netdev_err(qdev->ndev,
1932 qdev->ndev->stats.tx_errors++;
1937 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
1940 qdev->ndev->stats.tx_errors++;
1944 dma_unmap_single(&qdev->pdev->dev,
1950 dma_unmap_page(&qdev->pdev->dev,
1956 qdev->ndev->stats.tx_packets++;
1957 qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
1964 atomic_inc(&qdev->tx_count);
1967 static void ql_get_sbuf(struct ql3_adapter *qdev)
1969 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1970 qdev->small_buf_index = 0;
1971 qdev->small_buf_release_cnt++;
1974 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
1977 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
1978 qdev->lrg_buf_release_cnt++;
1979 if (++qdev->lrg_buf_index == qdev->num_large_buffers)
1980 qdev->lrg_buf_index = 0;
1996 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2007 ql_get_sbuf(qdev);
2009 if (qdev->device_id == QL3022_DEVICE_ID)
2010 lrg_buf_cb1 = ql_get_lbuf(qdev);
2013 lrg_buf_cb2 = ql_get_lbuf(qdev);
2016 qdev->ndev->stats.rx_packets++;
2017 qdev->ndev->stats.rx_bytes += length;
2020 dma_unmap_single(&qdev->pdev->dev,
2025 skb->protocol = eth_type_trans(skb, qdev->ndev);
2027 napi_gro_receive(&qdev->napi, skb);
2030 if (qdev->device_id == QL3022_DEVICE_ID)
2031 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2032 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2035 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2041 struct net_device *ndev = qdev->ndev;
2049 ql_get_sbuf(qdev);
2051 if (qdev->device_id == QL3022_DEVICE_ID) {
2053 lrg_buf_cb1 = ql_get_lbuf(qdev);
2061 lrg_buf_cb2 = ql_get_lbuf(qdev);
2065 dma_unmap_single(&qdev->pdev->dev,
2071 if (qdev->device_id == QL3022_DEVICE_ID) {
2094 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2096 napi_gro_receive(&qdev->napi, skb2);
2101 if (qdev->device_id == QL3022_DEVICE_ID)
2102 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2103 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2106 static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget)
2109 struct net_device *ndev = qdev->ndev;
2113 while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2114 qdev->rsp_consumer_index) && (work_done < budget)) {
2116 net_rsp = qdev->rsp_current;
2122 if (qdev->device_id == QL3032_DEVICE_ID)
2128 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2134 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2141 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2159 qdev->rsp_consumer_index++;
2161 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2162 qdev->rsp_consumer_index = 0;
2163 qdev->rsp_current = qdev->rsp_q_virt_addr;
2165 qdev->rsp_current++;
2175 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2177 qdev->mem_map_registers;
2180 work_done = ql_tx_rx_clean(qdev, budget);
2185 spin_lock_irqsave(&qdev->hw_lock, flags);
2186 ql_update_small_bufq_prod_index(qdev);
2187 ql_update_lrg_bufq_prod_index(qdev);
2188 writel(qdev->rsp_consumer_index,
2190 spin_unlock_irqrestore(&qdev->hw_lock, flags);
2192 ql_enable_interrupts(qdev);
2201 struct ql3_adapter *qdev = netdev_priv(ndev);
2203 qdev->mem_map_registers;
2208 value = ql_read_common_reg_l(qdev,
2212 spin_lock(&qdev->adapter_lock);
2213 netif_stop_queue(qdev->ndev);
2214 netif_carrier_off(qdev->ndev);
2215 ql_disable_interrupts(qdev);
2216 qdev->port_link_state = LS_DOWN;
2217 set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
2224 ql_read_page0_reg_l(qdev,
2229 set_bit(QL_RESET_START, &qdev->flags) ;
2234 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
2239 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2240 spin_unlock(&qdev->adapter_lock);
2242 ql_disable_interrupts(qdev);
2243 if (likely(napi_schedule_prep(&qdev->napi)))
2244 __napi_schedule(&qdev->napi);
2258 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
2260 if (qdev->device_id == QL3022_DEVICE_ID)
2298 static int ql_send_map(struct ql3_adapter *qdev,
2316 map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
2318 err = dma_mapping_error(&qdev->pdev->dev, map);
2320 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
2354 map = dma_map_single(&qdev->pdev->dev, oal,
2358 err = dma_mapping_error(&qdev->pdev->dev, map);
2360 netdev_err(qdev->ndev,
2378 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
2381 err = dma_mapping_error(&qdev->pdev->dev, map);
2383 netdev_err(qdev->ndev,
2420 dma_unmap_single(&qdev->pdev->dev,
2428 dma_unmap_page(&qdev->pdev->dev,
2434 dma_unmap_single(&qdev->pdev->dev,
2457 struct ql3_adapter *qdev = netdev_priv(ndev);
2459 qdev->mem_map_registers;
2464 if (unlikely(atomic_read(&qdev->tx_count) < 2))
2467 tx_cb = &qdev->tx_buf[qdev->req_producer_index];
2468 tx_cb->seg_count = ql_get_seg_count(qdev,
2478 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2480 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2481 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2484 if (qdev->device_id == QL3032_DEVICE_ID &&
2488 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
2494 qdev->req_producer_index++;
2495 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2496 qdev->req_producer_index = 0;
2498 ql_write_common_reg_l(qdev,
2500 qdev->req_producer_index);
2502 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
2504 qdev->req_producer_index, skb->len);
2506 atomic_dec(&qdev->tx_count);
2510 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2512 qdev->req_q_size =
2515 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2522 qdev->req_q_virt_addr =
2523 dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
2524 &qdev->req_q_phy_addr, GFP_KERNEL);
2526 if ((qdev->req_q_virt_addr == NULL) ||
2527 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2528 netdev_err(qdev->ndev, "reqQ failed\n");
2532 qdev->rsp_q_virt_addr =
2533 dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->rsp_q_size,
2534 &qdev->rsp_q_phy_addr, GFP_KERNEL);
2536 if ((qdev->rsp_q_virt_addr == NULL) ||
2537 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2538 netdev_err(qdev->ndev, "rspQ allocation failed\n");
2539 dma_free_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
2540 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2544 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2549 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2551 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
2552 netdev_info(qdev->ndev, "Already done\n");
2556 dma_free_coherent(&qdev->pdev->dev, qdev->req_q_size,
2557 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2559 qdev->req_q_virt_addr = NULL;
2561 dma_free_coherent(&qdev->pdev->dev, qdev->rsp_q_size,
2562 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2564 qdev->rsp_q_virt_addr = NULL;
2566 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2569 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2572 qdev->lrg_buf_q_size =
2573 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2574 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2575 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2577 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2579 qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
2582 if (qdev->lrg_buf == NULL)
2585 qdev->lrg_buf_q_alloc_virt_addr =
2586 dma_alloc_coherent(&qdev->pdev->dev,
2587 qdev->lrg_buf_q_alloc_size,
2588 &qdev->lrg_buf_q_alloc_phy_addr, GFP_KERNEL);
2590 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2591 netdev_err(qdev->ndev, "lBufQ failed\n");
2592 kfree(qdev->lrg_buf);
2595 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2596 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2599 qdev->small_buf_q_size =
2601 if (qdev->small_buf_q_size < PAGE_SIZE)
2602 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2604 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2606 qdev->small_buf_q_alloc_virt_addr =
2607 dma_alloc_coherent(&qdev->pdev->dev,
2608 qdev->small_buf_q_alloc_size,
2609 &qdev->small_buf_q_alloc_phy_addr, GFP_KERNEL);
2611 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2612 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
2613 dma_free_coherent(&qdev->pdev->dev,
2614 qdev->lrg_buf_q_alloc_size,
2615 qdev->lrg_buf_q_alloc_virt_addr,
2616 qdev->lrg_buf_q_alloc_phy_addr);
2617 kfree(qdev->lrg_buf);
2621 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2622 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2623 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2627 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2629 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
2630 netdev_info(qdev->ndev, "Already done\n");
2633 kfree(qdev->lrg_buf);
2634 dma_free_coherent(&qdev->pdev->dev, qdev->lrg_buf_q_alloc_size,
2635 qdev->lrg_buf_q_alloc_virt_addr,
2636 qdev->lrg_buf_q_alloc_phy_addr);
2638 qdev->lrg_buf_q_virt_addr = NULL;
2640 dma_free_coherent(&qdev->pdev->dev, qdev->small_buf_q_alloc_size,
2641 qdev->small_buf_q_alloc_virt_addr,
2642 qdev->small_buf_q_alloc_phy_addr);
2644 qdev->small_buf_q_virt_addr = NULL;
2646 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2649 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2655 qdev->small_buf_total_size =
2659 qdev->small_buf_virt_addr =
2660 dma_alloc_coherent(&qdev->pdev->dev,
2661 qdev->small_buf_total_size,
2662 &qdev->small_buf_phy_addr, GFP_KERNEL);
2664 if (qdev->small_buf_virt_addr == NULL) {
2665 netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
2669 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2670 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2672 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2677 cpu_to_le32(qdev->small_buf_phy_addr_high);
2679 cpu_to_le32(qdev->small_buf_phy_addr_low +
2683 qdev->small_buf_index = 0;
2684 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
2688 static void ql_free_small_buffers(struct ql3_adapter *qdev)
2690 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
2691 netdev_info(qdev->ndev, "Already done\n");
2694 if (qdev->small_buf_virt_addr != NULL) {
2695 dma_free_coherent(&qdev->pdev->dev,
2696 qdev->small_buf_total_size,
2697 qdev->small_buf_virt_addr,
2698 qdev->small_buf_phy_addr);
2700 qdev->small_buf_virt_addr = NULL;
2704 static void ql_free_large_buffers(struct ql3_adapter *qdev)
2709 for (i = 0; i < qdev->num_large_buffers; i++) {
2710 lrg_buf_cb = &qdev->lrg_buf[i];
2713 dma_unmap_single(&qdev->pdev->dev,
2724 static void ql_init_large_buffers(struct ql3_adapter *qdev)
2728 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2730 for (i = 0; i < qdev->num_large_buffers; i++) {
2731 lrg_buf_cb = &qdev->lrg_buf[i];
2736 qdev->lrg_buf_index = 0;
2737 qdev->lrg_buf_skb_check = 0;
2740 static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2748 for (i = 0; i < qdev->num_large_buffers; i++) {
2749 lrg_buf_cb = &qdev->lrg_buf[i];
2752 skb = netdev_alloc_skb(qdev->ndev,
2753 qdev->lrg_buffer_len);
2756 netdev_err(qdev->ndev,
2758 qdev->lrg_buffer_len * 2, i);
2759 ql_free_large_buffers(qdev);
2768 map = dma_map_single(&qdev->pdev->dev, skb->data,
2769 qdev->lrg_buffer_len - QL_HEADER_SPACE,
2772 err = dma_mapping_error(&qdev->pdev->dev, map);
2774 netdev_err(qdev->ndev,
2778 ql_free_large_buffers(qdev);
2785 qdev->lrg_buffer_len -
2796 static void ql_free_send_free_list(struct ql3_adapter *qdev)
2801 tx_cb = &qdev->tx_buf[0];
2809 static int ql_create_send_free_list(struct ql3_adapter *qdev)
2813 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
2818 tx_cb = &qdev->tx_buf[i];
2829 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2831 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2832 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2833 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2834 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2838 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2839 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2841 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n",
2842 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
2845 qdev->num_large_buffers =
2846 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2847 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2848 qdev->max_frame_size =
2849 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2856 qdev->shadow_reg_virt_addr =
2857 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2858 &qdev->shadow_reg_phy_addr, GFP_KERNEL);
2860 if (qdev->shadow_reg_virt_addr != NULL) {
2861 qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
2862 qdev->req_consumer_index_phy_addr_high =
2863 MS_64BITS(qdev->shadow_reg_phy_addr);
2864 qdev->req_consumer_index_phy_addr_low =
2865 LS_64BITS(qdev->shadow_reg_phy_addr);
2867 qdev->prsp_producer_index =
2868 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2869 qdev->rsp_producer_index_phy_addr_high =
2870 qdev->req_consumer_index_phy_addr_high;
2871 qdev->rsp_producer_index_phy_addr_low =
2872 qdev->req_consumer_index_phy_addr_low + 8;
2874 netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
2878 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2879 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
2883 if (ql_alloc_buffer_queues(qdev) != 0) {
2884 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
2888 if (ql_alloc_small_buffers(qdev) != 0) {
2889 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
2893 if (ql_alloc_large_buffers(qdev) != 0) {
2894 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
2899 ql_init_large_buffers(qdev);
2900 if (ql_create_send_free_list(qdev))
2903 qdev->rsp_current = qdev->rsp_q_virt_addr;
2907 ql_free_send_free_list(qdev);
2909 ql_free_buffer_queues(qdev);
2911 ql_free_net_req_rsp_queues(qdev);
2913 dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
2914 qdev->shadow_reg_virt_addr,
2915 qdev->shadow_reg_phy_addr);
2920 static void ql_free_mem_resources(struct ql3_adapter *qdev)
2922 ql_free_send_free_list(qdev);
2923 ql_free_large_buffers(qdev);
2924 ql_free_small_buffers(qdev);
2925 ql_free_buffer_queues(qdev);
2926 ql_free_net_req_rsp_queues(qdev);
2927 if (qdev->shadow_reg_virt_addr != NULL) {
2928 dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
2929 qdev->shadow_reg_virt_addr,
2930 qdev->shadow_reg_phy_addr);
2931 qdev->shadow_reg_virt_addr = NULL;
2935 static int ql_init_misc_registers(struct ql3_adapter *qdev)
2938 (void __iomem *)qdev->mem_map_registers;
2940 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2941 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2945 ql_write_page2_reg(qdev,
2946 &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2948 ql_write_page2_reg(qdev,
2950 qdev->nvram_data.bufletCount);
2952 ql_write_page2_reg(qdev,
2954 (qdev->nvram_data.tcpWindowThreshold25 << 16) |
2955 (qdev->nvram_data.tcpWindowThreshold0));
2957 ql_write_page2_reg(qdev,
2959 qdev->nvram_data.tcpWindowThreshold50);
2961 ql_write_page2_reg(qdev,
2963 (qdev->nvram_data.ipHashTableBaseHi << 16) |
2964 qdev->nvram_data.ipHashTableBaseLo);
2965 ql_write_page2_reg(qdev,
2967 qdev->nvram_data.ipHashTableSize);
2968 ql_write_page2_reg(qdev,
2970 (qdev->nvram_data.tcpHashTableBaseHi << 16) |
2971 qdev->nvram_data.tcpHashTableBaseLo);
2972 ql_write_page2_reg(qdev,
2974 qdev->nvram_data.tcpHashTableSize);
2975 ql_write_page2_reg(qdev,
2977 (qdev->nvram_data.ncbTableBaseHi << 16) |
2978 qdev->nvram_data.ncbTableBaseLo);
2979 ql_write_page2_reg(qdev,
2981 qdev->nvram_data.ncbTableSize);
2982 ql_write_page2_reg(qdev,
2984 (qdev->nvram_data.drbTableBaseHi << 16) |
2985 qdev->nvram_data.drbTableBaseLo);
2986 ql_write_page2_reg(qdev,
2988 qdev->nvram_data.drbTableSize);
2989 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
2993 static int ql_adapter_initialize(struct ql3_adapter *qdev)
2997 qdev->mem_map_registers;
3004 if (ql_mii_setup(qdev))
3008 ql_write_common_reg(qdev, spir,
3013 qdev->port_link_state = LS_DOWN;
3014 netif_carrier_off(qdev->ndev);
3017 ql_write_common_reg(qdev, spir,
3022 *((u32 *)(qdev->preq_consumer_index)) = 0;
3023 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
3024 qdev->req_producer_index = 0;
3026 ql_write_page1_reg(qdev,
3028 qdev->req_consumer_index_phy_addr_high);
3029 ql_write_page1_reg(qdev,
3031 qdev->req_consumer_index_phy_addr_low);
3033 ql_write_page1_reg(qdev,
3035 MS_64BITS(qdev->req_q_phy_addr));
3036 ql_write_page1_reg(qdev,
3038 LS_64BITS(qdev->req_q_phy_addr));
3039 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3042 *((__le16 *) (qdev->prsp_producer_index)) = 0;
3043 qdev->rsp_consumer_index = 0;
3044 qdev->rsp_current = qdev->rsp_q_virt_addr;
3046 ql_write_page1_reg(qdev,
3048 qdev->rsp_producer_index_phy_addr_high);
3050 ql_write_page1_reg(qdev,
3052 qdev->rsp_producer_index_phy_addr_low);
3054 ql_write_page1_reg(qdev,
3056 MS_64BITS(qdev->rsp_q_phy_addr));
3058 ql_write_page1_reg(qdev,
3060 LS_64BITS(qdev->rsp_q_phy_addr));
3062 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3065 ql_write_page1_reg(qdev,
3067 MS_64BITS(qdev->lrg_buf_q_phy_addr));
3069 ql_write_page1_reg(qdev,
3071 LS_64BITS(qdev->lrg_buf_q_phy_addr));
3073 ql_write_page1_reg(qdev,
3075 qdev->num_lbufq_entries);
3077 ql_write_page1_reg(qdev,
3079 qdev->lrg_buffer_len);
3082 ql_write_page1_reg(qdev,
3084 MS_64BITS(qdev->small_buf_q_phy_addr));
3086 ql_write_page1_reg(qdev,
3088 LS_64BITS(qdev->small_buf_q_phy_addr));
3090 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3091 ql_write_page1_reg(qdev,
3095 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3096 qdev->small_buf_release_cnt = 8;
3097 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3098 qdev->lrg_buf_release_cnt = 8;
3099 qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
3100 qdev->small_buf_index = 0;
3101 qdev->lrg_buf_index = 0;
3102 qdev->lrg_buf_free_count = 0;
3103 qdev->lrg_buf_free_head = NULL;
3104 qdev->lrg_buf_free_tail = NULL;
3106 ql_write_common_reg(qdev,
3109 qdev->small_buf_q_producer_index);
3110 ql_write_common_reg(qdev,
3113 qdev->lrg_buf_q_producer_index);
3119 clear_bit(QL_LINK_MASTER, &qdev->flags);
3120 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3124 if (ql_init_misc_registers(qdev)) {
3129 value = qdev->nvram_data.tcpMaxWindowSize;
3130 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3132 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3134 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3135 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3140 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3141 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3145 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3148 if (qdev->mac_index)
3149 ql_write_page0_reg(qdev,
3151 qdev->max_frame_size);
3153 ql_write_page0_reg(qdev,
3155 qdev->max_frame_size);
3157 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3158 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3164 PHY_Setup(qdev);
3165 ql_init_scan_mode(qdev);
3166 ql_get_phy_owner(qdev);
3171 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3173 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3174 ((qdev->ndev->dev_addr[2] << 24)
3175 | (qdev->ndev->dev_addr[3] << 16)
3176 | (qdev->ndev->dev_addr[4] << 8)
3177 | qdev->ndev->dev_addr[5]));
3180 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3182 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3183 ((qdev->ndev->dev_addr[0] << 8)
3184 | qdev->ndev->dev_addr[1]));
3187 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3192 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3194 (qdev->mac_index << 2)));
3195 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3197 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3199 ((qdev->mac_index << 2) + 1)));
3200 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3202 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3205 ql_write_page0_reg(qdev,
3210 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3213 spin_unlock_irq(&qdev->hw_lock);
3215 spin_lock_irq(&qdev->hw_lock);
3219 netdev_err(qdev->ndev, "Hw Initialization timeout\n");
3225 if (qdev->device_id == QL3032_DEVICE_ID) {
3230 ql_write_page0_reg(qdev, &port_regs->functionControl,
3236 ql_write_page0_reg(qdev, &port_regs->portControl,
3248 static int ql_adapter_reset(struct ql3_adapter *qdev)
3251 qdev->mem_map_registers;
3256 set_bit(QL_RESET_ACTIVE, &qdev->flags);
3257 clear_bit(QL_RESET_DONE, &qdev->flags);
3262 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
3263 ql_write_common_reg(qdev,
3268 netdev_printk(KERN_DEBUG, qdev->ndev,
3275 ql_read_common_reg(qdev,
3288 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3290 netdev_printk(KERN_DEBUG, qdev->ndev,
3292 ql_write_common_reg(qdev,
3300 ql_write_common_reg(qdev,
3311 value = ql_read_common_reg(qdev,
3322 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3323 set_bit(QL_RESET_DONE, &qdev->flags);
3327 static void ql_set_mac_info(struct ql3_adapter *qdev)
3330 qdev->mem_map_registers;
3336 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3338 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3341 qdev->mac_index = 0;
3342 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3343 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3344 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3346 set_bit(QL_LINK_OPTICAL, &qdev->flags);
3348 clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3352 qdev->mac_index = 1;
3353 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3354 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3355 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3357 set_bit(QL_LINK_OPTICAL, &qdev->flags);
3359 clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3365 netdev_printk(KERN_DEBUG, qdev->ndev,
3370 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
3375 struct ql3_adapter *qdev = netdev_priv(ndev);
3376 struct pci_dev *pdev = qdev->pdev;
3380 DRV_NAME, qdev->index, qdev->chip_rev_id,
3381 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
3382 qdev->pci_slot);
3384 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
3390 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3391 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3394 qdev->mem_map_registers);
3397 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
3400 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3402 struct net_device *ndev = qdev->ndev;
3408 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3409 clear_bit(QL_LINK_MASTER, &qdev->flags);
3411 ql_disable_interrupts(qdev);
3413 free_irq(qdev->pdev->irq, ndev);
3415 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3416 netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
3417 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3418 pci_disable_msi(qdev->pdev);
3421 del_timer_sync(&qdev->adapter_timer);
3423 napi_disable(&qdev->napi);
3429 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3430 if (ql_wait_for_drvr_lock(qdev)) {
3431 soft_reset = ql_adapter_reset(qdev);
3434 qdev->index);
3443 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3445 ql_free_mem_resources(qdev);
3449 static int ql_adapter_up(struct ql3_adapter *qdev)
3451 struct net_device *ndev = qdev->ndev;
3456 if (ql_alloc_mem_resources(qdev)) {
3461 if (qdev->msi) {
3462 if (pci_enable_msi(qdev->pdev)) {
3465 qdev->msi = 0;
3468 set_bit(QL_MSI_ENABLED, &qdev->flags);
3473 err = request_irq(qdev->pdev->irq, ql3xxx_isr,
3478 qdev->pdev->irq);
3482 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3484 if (!ql_wait_for_drvr_lock(qdev)) {
3490 err = ql_adapter_initialize(qdev);
3495 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3497 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3499 set_bit(QL_ADAPTER_UP, &qdev->flags);
3501 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3503 napi_enable(&qdev->napi);
3504 ql_enable_interrupts(qdev);
3508 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3510 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3511 free_irq(qdev->pdev->irq, ndev);
3513 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3515 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3516 pci_disable_msi(qdev->pdev);
3521 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3523 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
3524 netdev_err(qdev->ndev,
3527 dev_close(qdev->ndev);
3536 struct ql3_adapter *qdev = netdev_priv(ndev);
3542 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3545 ql_adapter_down(qdev, QL_DO_RESET);
3551 struct ql3_adapter *qdev = netdev_priv(ndev);
3552 return ql_adapter_up(qdev);
3557 struct ql3_adapter *qdev = netdev_priv(ndev);
3559 qdev->mem_map_registers;
3571 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3573 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3575 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3581 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3583 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3585 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3592 struct ql3_adapter *qdev = netdev_priv(ndev);
3603 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3608 struct ql3_adapter *qdev =
3610 struct net_device *ndev = qdev->ndev;
3615 qdev->mem_map_registers;
3618 if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
3619 test_bit(QL_RESET_START, &qdev->flags)) {
3620 clear_bit(QL_LINK_MASTER, &qdev->flags);
3627 tx_cb = &qdev->tx_buf[i];
3631 dma_unmap_single(&qdev->pdev->dev,
3636 dma_unmap_page(&qdev->pdev->dev,
3647 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3648 ql_write_common_reg(qdev,
3657 value = ql_read_common_reg(qdev,
3670 ql_write_common_reg(qdev,
3678 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3680 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3682 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3693 clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3694 clear_bit(QL_RESET_START, &qdev->flags);
3695 ql_cycle_adapter(qdev, QL_DO_RESET);
3699 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3700 clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3701 clear_bit(QL_RESET_START, &qdev->flags);
3702 ql_cycle_adapter(qdev, QL_NO_RESET);
3708 struct ql3_adapter *qdev =
3711 ql_cycle_adapter(qdev, QL_DO_RESET);
3714 static void ql_get_board_info(struct ql3_adapter *qdev)
3717 qdev->mem_map_registers;
3720 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3722 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3724 qdev->pci_width = 64;
3726 qdev->pci_width = 32;
3728 qdev->pci_x = 1;
3730 qdev->pci_x = 0;
3731 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3736 struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
3737 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
3753 struct ql3_adapter *qdev = NULL;
3791 qdev = netdev_priv(ndev);
3792 qdev->index = cards_found;
3793 qdev->ndev = ndev;
3794 qdev->pdev = pdev;
3795 qdev->device_id = pci_entry->device;
3796 qdev->port_link_state = LS_DOWN;
3798 qdev->msi = 1;
3800 qdev->msg_enable = netif_msg_init(debug, default_msg);
3804 if (qdev->device_id == QL3032_DEVICE_ID)
3807 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3808 if (!qdev->mem_map_registers) {
3814 spin_lock_init(&qdev->adapter_lock);
3815 spin_lock_init(&qdev->hw_lock);
3822 netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
3827 if (ql_get_nvram_params(qdev)) {
3829 __func__, qdev->index);
3834 ql_set_mac_info(qdev);
3837 if (qdev->mac_index) {
3838 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3839 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
3841 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3842 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
3848 ql_get_board_info(qdev);
3854 if (qdev->pci_x)
3868 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3869 if (!qdev->workqueue) {
3875 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
3876 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
3877 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
3879 timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0);
3880 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
3893 iounmap(qdev->mem_map_registers);
3907 struct ql3_adapter *qdev = netdev_priv(ndev);
3911 ql_disable_interrupts(qdev);
3913 if (qdev->workqueue) {
3914 cancel_delayed_work(&qdev->reset_work);
3915 cancel_delayed_work(&qdev->tx_timeout_work);
3916 destroy_workqueue(qdev->workqueue);
3917 qdev->workqueue = NULL;
3920 iounmap(qdev->mem_map_registers);