Lines Matching defs:vk

49 		{VK_IMAGE_TYPE_BOOT1, {"vk_a0-boot1.bin", "vk-boot1.bin"}},
50 {VK_IMAGE_TYPE_BOOT2, {"vk_a0-boot2.bin", "vk-boot2.bin"}}
53 {VK_IMAGE_TYPE_BOOT1, {"vk_b0-boot1.bin", "vk-boot1.bin"}},
54 {VK_IMAGE_TYPE_BOOT2, {"vk_b0-boot2.bin", "vk-boot2.bin"}}
140 struct bcm_vk *vk = dev_id;
142 if (!bcm_vk_drv_access_ok(vk)) {
143 dev_err(&vk->pdev->dev,
149 if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0)
150 queue_work(vk->wq_thread, &vk->wq_work);
156 static int bcm_vk_intf_ver_chk(struct bcm_vk *vk)
158 struct device *dev = &vk->pdev->dev;
164 reg = vkread32(vk, BAR_0, BAR_INTF_VER);
179 bcm_vk_set_host_alert(vk, ERR_LOG_HOST_INTF_V_FAIL);
189 static void bcm_vk_log_notf(struct bcm_vk *vk,
199 struct device *dev = &vk->pdev->dev;
211 reg = vkread32(vk, BAR_0, BAR_CARD_ERR_MEM);
215 if ((uecc_mem_err != vk->alert_cnts.uecc) &&
219 DRV_MODULE_NAME, vk->devid,
221 vk->alert_cnts.uecc = uecc_mem_err;
223 reg = vkread32(vk, BAR_0, BAR_CARD_ERR_MEM);
227 if ((ecc_mem_err != vk->alert_cnts.ecc) &&
230 DRV_MODULE_NAME, vk->devid,
232 vk->alert_cnts.ecc = ecc_mem_err;
236 DRV_MODULE_NAME, vk->devid, entry->str,
242 static void bcm_vk_dump_peer_log(struct bcm_vk *vk)
245 struct bcm_vk_peer_log *log_info = &vk->peerlog_info;
248 struct device *dev = &vk->pdev->dev;
251 memcpy_fromio(&log, vk->bar[BAR_2] + vk->peerlog_off, sizeof(log));
275 data_offset = vk->peerlog_off + sizeof(struct bcm_vk_peer_log);
278 loc_buf[cnt] = vkread8(vk, BAR_2, data_offset + log.rd_idx);
290 vkwrite32(vk, log.rd_idx, BAR_2,
291 vk->peerlog_off + offsetof(struct bcm_vk_peer_log, rd_idx));
294 void bcm_vk_handle_notf(struct bcm_vk *vk)
302 reg = vkread32(vk, BAR_0, BAR_CARD_ERR_LOG);
305 vk->peer_alert.notfs = reg;
306 bcm_vk_log_notf(vk, &vk->peer_alert, bcm_vk_peer_err,
308 vk->peer_alert.flags = vk->peer_alert.notfs;
311 bcm_vk_blk_drv_access(vk);
315 spin_lock_irqsave(&vk->host_alert_lock, flags);
317 vk->host_alert.notfs |= ERR_LOG_HOST_PCIE_DWN;
319 alert = vk->host_alert;
320 vk->host_alert.flags = vk->host_alert.notfs;
321 spin_unlock_irqrestore(&vk->host_alert_lock, flags);
324 bcm_vk_log_notf(vk, &alert, bcm_vk_host_err,
332 ((vk->host_alert.flags & ERR_LOG_HOST_HB_FAIL) ||
333 (vk->peer_alert.flags & ERR_LOG_SYS_FAULT)))
334 bcm_vk_dump_peer_log(vk);
337 static inline int bcm_vk_wait(struct bcm_vk *vk, enum pci_barno bar,
341 struct device *dev = &vk->pdev->dev;
350 rd_val = vkread32(vk, bar, offset);
355 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
374 static void bcm_vk_get_card_info(struct bcm_vk *vk)
376 struct device *dev = &vk->pdev->dev;
380 struct bcm_vk_card_info *info = &vk->card_info;
383 offset = vkread32(vk, BAR_0, BAR_CARD_STATIC_INFO);
384 offset &= (pci_resource_len(vk->pdev, BAR_2 * 2) - 1);
389 *dst++ = vkread8(vk, BAR_2, offset++);
409 vk->peerlog_off = offset;
410 memcpy_fromio(&vk->peerlog_info, vk->bar[BAR_2] + vk->peerlog_off,
411 sizeof(vk->peerlog_info));
418 if ((vk->peerlog_info.buf_size > BCM_VK_PEER_LOG_BUF_MAX) ||
419 (vk->peerlog_info.mask != (vk->peerlog_info.buf_size - 1)) ||
420 (vk->peerlog_info.rd_idx > vk->peerlog_info.mask) ||
421 (vk->peerlog_info.wr_idx > vk->peerlog_info.mask)) {
423 vk->peerlog_info.buf_size,
424 vk->peerlog_info.mask,
425 vk->peerlog_info.rd_idx,
426 vk->peerlog_info.wr_idx);
427 memset(&vk->peerlog_info, 0, sizeof(vk->peerlog_info));
430 vk->peerlog_info.buf_size,
431 vk->peerlog_info.mask,
432 vk->peerlog_info.rd_idx,
433 vk->peerlog_info.wr_idx);
437 static void bcm_vk_get_proc_mon_info(struct bcm_vk *vk)
439 struct device *dev = &vk->pdev->dev;
440 struct bcm_vk_proc_mon_info *mon = &vk->proc_mon_info;
445 buf_size = vkread32(vk, BAR_2,
446 vk->peerlog_off
448 offset = vk->peerlog_off + sizeof(struct bcm_vk_peer_log)
452 num = vkread32(vk, BAR_2, offset);
453 entry_size = vkread32(vk, BAR_2, offset + sizeof(num));
464 vk->proc_mon_off = offset;
469 memcpy_fromio(dst, vk->bar[BAR_2] + offset, num * entry_size);
472 static int bcm_vk_sync_card_info(struct bcm_vk *vk)
474 u32 rdy_marker = vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY);
477 if (!bcm_vk_msgq_marker_valid(vk))
485 if (vk->tdma_addr) {
486 vkwrite32(vk, (u64)vk->tdma_addr >> 32, BAR_1,
488 vkwrite32(vk, (u32)vk->tdma_addr, BAR_1,
490 vkwrite32(vk, nr_scratch_pages * PAGE_SIZE, BAR_1,
495 bcm_vk_get_card_info(vk);
498 bcm_vk_get_proc_mon_info(vk);
503 void bcm_vk_blk_drv_access(struct bcm_vk *vk)
512 spin_lock(&vk->ctx_lock);
515 atomic_set(&vk->msgq_inited, 0);
520 list_for_each_entry(ctx, &vk->pid_ht[i].head, node) {
521 if (ctx->pid != vk->reset_pid) {
522 dev_dbg(&vk->pdev->dev,
529 bcm_vk_tty_terminate_tty_user(vk);
530 spin_unlock(&vk->ctx_lock);
533 static void bcm_vk_buf_notify(struct bcm_vk *vk, void *bufp,
537 vkwrite32(vk, (u64)host_buf_addr >> 32, BAR_1,
539 vkwrite32(vk, (u32)host_buf_addr, BAR_1,
541 vkwrite32(vk, buf_size, BAR_1, VK_BAR1_DMA_BUF_SZ);
544 static int bcm_vk_load_image_by_type(struct bcm_vk *vk, u32 load_type,
547 struct device *dev = &vk->pdev->dev;
563 value = vkread32(vk, BAR_0, BAR_BOOTSRC_SELECT);
565 vkwrite32(vk, value, BAR_0, BAR_BOOTSRC_SELECT);
571 vkwrite32(vk, CODEPUSH_BOOTSTART, BAR_0, offset_codepush);
574 ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, SRAM_OPEN,
595 ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, DDR_OPEN,
613 bcm_vk_buf_notify(vk, bufp, boot_dma_addr, max_buf);
630 memcpy_toio(vk->bar[BAR_1] + BAR1_CODEPUSH_BASE_BOOT1,
635 vkwrite32(vk, codepush, BAR_0, offset_codepush);
641 ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS,
646 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
657 reg = vkread32(vk, BAR_0, BAR_BOOT1_STDALONE_PROGRESS);
681 ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS,
701 ret = bcm_vk_wait(vk, BAR_0, offset_codepush,
721 vkwrite32(vk, codepush, BAR_0, offset_codepush);
732 ret = bcm_vk_wait(vk, BAR_0, VK_BAR_FWSTS,
741 is_stdalone = vkread32(vk, BAR_0, BAR_BOOT_STATUS) &
744 ret = bcm_vk_intf_ver_chk(vk);
754 ret = bcm_vk_sync_msgq(vk, true);
762 ret = bcm_vk_sync_card_info(vk);
780 static u32 bcm_vk_next_boot_image(struct bcm_vk *vk)
786 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
787 fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS);
795 dev_info(&vk->pdev->dev,
802 static enum soc_idx get_soc_idx(struct bcm_vk *vk)
804 struct pci_dev *pdev = vk->pdev;
812 rev = MAJOR_SOC_REV(vkread32(vk, BAR_0, BAR_CHIP_ID));
834 static const char *get_load_fw_name(struct bcm_vk *vk,
838 struct device *dev = &vk->pdev->dev;
857 int bcm_vk_auto_load_all_images(struct bcm_vk *vk)
861 struct device *dev = &vk->pdev->dev;
865 idx = get_soc_idx(vk);
870 dev_dbg(dev, "Load All for device %d\n", vk->devid);
874 if (bcm_vk_next_boot_image(vk) == curr_type) {
875 curr_name = get_load_fw_name(vk, &image_tab[idx][i]);
882 ret = bcm_vk_load_image_by_type(vk, curr_type,
899 static int bcm_vk_trigger_autoload(struct bcm_vk *vk)
901 if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0)
904 set_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload);
905 queue_work(vk->wq_thread, &vk->wq_work);
915 struct bcm_vk *vk = container_of(work, struct bcm_vk, wq_work);
916 struct device *dev = &vk->pdev->dev;
920 if (test_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload)) {
922 clear_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload);
923 bcm_vk_handle_notf(vk);
925 if (test_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload)) {
926 bcm_vk_auto_load_all_images(vk);
932 clear_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload);
933 clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
937 ret = bcm_to_h_msg_dequeue(vk);
942 bcm_vk_blk_drv_access(vk);
945 static long bcm_vk_load_image(struct bcm_vk *vk,
948 struct device *dev = &vk->pdev->dev;
965 next_loadable = bcm_vk_next_boot_image(vk);
977 if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) {
985 idx = get_soc_idx(vk);
991 image_name = get_load_fw_name(vk, &image_tab[idx][image_idx]);
1002 ret = bcm_vk_load_image_by_type(vk, image.type, image_name);
1005 clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
1010 static int bcm_vk_reset_successful(struct bcm_vk *vk)
1012 struct device *dev = &vk->pdev->dev;
1027 fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS);
1053 static void bcm_to_v_reset_doorbell(struct bcm_vk *vk, u32 db_val)
1055 vkwrite32(vk, db_val, BAR_0, VK_BAR0_RESET_DB_BASE);
1058 static int bcm_vk_trigger_reset(struct bcm_vk *vk)
1070 bcm_vk_drain_msg_on_reset(vk);
1071 vkwrite32(vk, 0, BAR_1, VK_BAR1_MSGQ_DEF_RDY);
1073 vkwrite32(vk, 0, BAR_1, VK_BAR1_BOOT1_VER_TAG);
1076 vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_STORE_ADDR(i));
1077 vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_VALID_ADDR(i));
1080 vkwrite32(vk, 0, BAR_1, VK_BAR1_SOTP_REVID_ADDR(i));
1082 memset(&vk->card_info, 0, sizeof(vk->card_info));
1083 memset(&vk->peerlog_info, 0, sizeof(vk->peerlog_info));
1084 memset(&vk->proc_mon_info, 0, sizeof(vk->proc_mon_info));
1085 memset(&vk->alert_cnts, 0, sizeof(vk->alert_cnts));
1095 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
1097 dev_info(&vk->pdev->dev,
1102 value = vkread32(vk, BAR_0, BAR_CODEPUSH_SBL);
1105 vkwrite32(vk, value, BAR_0, BAR_CODEPUSH_SBL);
1110 if (vk->peer_alert.flags & ERR_LOG_RAMDUMP) {
1116 vkwrite32(vk, VK_BAR0_RESET_RAMPDUMP, BAR_0, VK_BAR_FWSTS);
1119 dev_info(&vk->pdev->dev, "Hard reset on Standalone mode");
1120 bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_HARD);
1125 vkwrite32(vk, VK_FWSTS_RESET_MBOX_DB, BAR_0, VK_BAR_FWSTS);
1126 bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_SOFT);
1130 vkwrite32(vk, 0, BAR_0, bar0_reg_clr_list[i]);
1131 memset(&vk->host_alert, 0, sizeof(vk->host_alert));
1132 memset(&vk->peer_alert, 0, sizeof(vk->peer_alert));
1134 bitmap_clear(vk->bmap, 0, VK_MSG_ID_BITMAP_SIZE);
1139 static long bcm_vk_reset(struct bcm_vk *vk, struct vk_reset __user *arg)
1141 struct device *dev = &vk->pdev->dev;
1151 if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) {
1156 ramdump_reset = vk->peer_alert.flags & ERR_LOG_RAMDUMP;
1167 bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_GRACEFUL, 0, 0);
1169 spin_lock(&vk->ctx_lock);
1170 if (!vk->reset_pid) {
1171 vk->reset_pid = task_pid_nr(current);
1174 vk->reset_pid);
1177 spin_unlock(&vk->ctx_lock);
1181 bcm_vk_blk_drv_access(vk);
1182 special_reset = bcm_vk_trigger_reset(vk);
1196 ret = bcm_vk_reset_successful(vk);
1200 clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
1207 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
1213 pg_size = ((pci_resource_len(vk->pdev, VK_MMAPABLE_BAR) - 1)
1218 vma->vm_pgoff += (pci_resource_start(vk->pdev, VK_MMAPABLE_BAR)
1231 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
1234 dev_dbg(&vk->pdev->dev,
1238 mutex_lock(&vk->mutex);
1242 ret = bcm_vk_load_image(vk, argp);
1246 ret = bcm_vk_reset(vk, argp);
1253 mutex_unlock(&vk->mutex);
1272 struct bcm_vk *vk = container_of(nb, struct bcm_vk, panic_nb);
1274 bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_HARD);
1286 struct bcm_vk *vk;
1291 /* allocate vk structure which is tied to kref for freeing */
1292 vk = kzalloc(sizeof(*vk), GFP_KERNEL);
1293 if (!vk)
1296 kref_init(&vk->kref);
1302 vk->ib_sgl_size = nr_ib_sgl_blk * VK_MSGQ_BLK_SIZE;
1303 mutex_init(&vk->mutex);
1310 vk->pdev = pci_dev_get(pdev);
1328 vk->tdma_vaddr = dma_alloc_coherent
1331 &vk->tdma_addr, GFP_KERNEL);
1332 if (!vk->tdma_vaddr) {
1339 pci_set_drvdata(pdev, vk);
1359 vk->bar[i] = pci_ioremap_bar(pdev, i * 2);
1360 if (!vk->bar[i]) {
1367 for (vk->num_irqs = 0;
1368 vk->num_irqs < VK_MSIX_MSGQ_MAX;
1369 vk->num_irqs++) {
1370 err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
1372 IRQF_SHARED, DRV_MODULE_NAME, vk);
1375 pdev->irq + vk->num_irqs, vk->num_irqs + 1);
1380 err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
1382 IRQF_SHARED, DRV_MODULE_NAME, vk);
1385 pdev->irq + vk->num_irqs, vk->num_irqs + 1);
1388 vk->num_irqs++;
1391 (i < VK_MSIX_TTY_MAX) && (vk->num_irqs < irq);
1392 i++, vk->num_irqs++) {
1393 err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
1395 IRQF_SHARED, DRV_MODULE_NAME, vk);
1398 pdev->irq + vk->num_irqs, vk->num_irqs + 1);
1401 bcm_vk_tty_set_irq_enabled(vk, i);
1411 vk->devid = id;
1413 misc_device = &vk->miscdev;
1428 INIT_WORK(&vk->wq_work, bcm_vk_wq_handler);
1431 vk->wq_thread = create_singlethread_workqueue(name);
1432 if (!vk->wq_thread) {
1438 err = bcm_vk_msg_init(vk);
1445 bcm_vk_sync_card_info(vk);
1448 vk->panic_nb.notifier_call = bcm_vk_on_panic;
1450 &vk->panic_nb);
1457 err = bcm_vk_tty_init(vk, name);
1465 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
1468 err = bcm_vk_trigger_autoload(vk);
1479 bcm_vk_hb_init(vk);
1486 bcm_vk_tty_exit(vk);
1490 &vk->panic_nb);
1493 destroy_workqueue(vk->wq_thread);
1506 for (i = 0; i < vk->num_irqs; i++)
1507 devm_free_irq(dev, pci_irq_vector(pdev, i), vk);
1514 if (vk->bar[i])
1515 pci_iounmap(pdev, vk->bar[i]);
1520 if (vk->tdma_vaddr)
1522 vk->tdma_vaddr, vk->tdma_addr);
1529 kfree(vk);
1536 struct bcm_vk *vk = container_of(kref, struct bcm_vk, kref);
1537 struct pci_dev *pdev = vk->pdev;
1539 dev_dbg(&pdev->dev, "BCM-VK:%d release data 0x%p\n", vk->devid, vk);
1541 kfree(vk);
1547 struct bcm_vk *vk = pci_get_drvdata(pdev);
1548 struct miscdevice *misc_device = &vk->miscdev;
1550 bcm_vk_hb_deinit(vk);
1558 bcm_vk_trigger_reset(vk);
1563 &vk->panic_nb);
1565 bcm_vk_msg_remove(vk);
1566 bcm_vk_tty_exit(vk);
1568 if (vk->tdma_vaddr)
1570 vk->tdma_vaddr, vk->tdma_addr);
1576 ida_free(&bcm_vk_ida, vk->devid);
1578 for (i = 0; i < vk->num_irqs; i++)
1579 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), vk);
1584 cancel_work_sync(&vk->wq_work);
1585 destroy_workqueue(vk->wq_thread);
1586 bcm_vk_tty_wq_exit(vk);
1589 if (vk->bar[i])
1590 pci_iounmap(pdev, vk->bar[i]);
1593 dev_dbg(&pdev->dev, "BCM-VK:%d released\n", vk->devid);
1599 kref_put(&vk->kref, bcm_vk_release_data);
1604 struct bcm_vk *vk = pci_get_drvdata(pdev);
1607 reg = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
1612 bcm_vk_trigger_reset(vk);
1629 vkwrite32(vk, reg, BAR_0, BAR_BOOT_STATUS);