Lines Matching refs:stdev
51 struct switchtec_dev *stdev;
69 static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
77 get_device(&stdev->dev);
78 stuser->stdev = stdev;
82 stuser->event_cnt = atomic_read(&stdev->event_cnt);
84 dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
95 dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
97 put_device(&stuser->stdev->dev);
120 dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
124 static void mrpc_complete_cmd(struct switchtec_dev *stdev);
126 static void flush_wc_buf(struct switchtec_dev *stdev)
134 mmio_dbmsg = (void __iomem *)stdev->mmio_ntb +
139 static void mrpc_cmd_submit(struct switchtec_dev *stdev)
145 if (stdev->mrpc_busy)
148 if (list_empty(&stdev->mrpc_queue))
151 stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
154 if (stdev->dma_mrpc) {
155 stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS;
156 memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE);
160 stdev->mrpc_busy = 1;
161 memcpy_toio(&stdev->mmio_mrpc->input_data,
163 flush_wc_buf(stdev);
164 iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
166 schedule_delayed_work(&stdev->mrpc_timeout,
174 struct switchtec_dev *stdev = stuser->stdev;
180 list_add_tail(&stuser->list, &stdev->mrpc_queue);
182 mrpc_cmd_submit(stdev);
187 static void mrpc_complete_cmd(struct switchtec_dev *stdev)
192 if (list_empty(&stdev->mrpc_queue))
195 stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
198 if (stdev->dma_mrpc)
199 stuser->status = stdev->dma_mrpc->status;
201 stuser->status = ioread32(&stdev->mmio_mrpc->status);
212 if (stdev->dma_mrpc)
213 stuser->return_code = stdev->dma_mrpc->rtn_code;
215 stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
219 if (stdev->dma_mrpc)
220 memcpy(stuser->data, &stdev->dma_mrpc->data,
223 memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
230 stdev->mrpc_busy = 0;
232 mrpc_cmd_submit(stdev);
237 struct switchtec_dev *stdev;
239 stdev = container_of(work, struct switchtec_dev, mrpc_work);
241 dev_dbg(&stdev->dev, "%s\n", __func__);
243 mutex_lock(&stdev->mrpc_mutex);
244 cancel_delayed_work(&stdev->mrpc_timeout);
245 mrpc_complete_cmd(stdev);
246 mutex_unlock(&stdev->mrpc_mutex);
251 struct switchtec_dev *stdev;
254 stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
256 dev_dbg(&stdev->dev, "%s\n", __func__);
258 mutex_lock(&stdev->mrpc_mutex);
260 if (stdev->dma_mrpc)
261 status = stdev->dma_mrpc->status;
263 status = ioread32(&stdev->mmio_mrpc->status);
265 schedule_delayed_work(&stdev->mrpc_timeout,
270 mrpc_complete_cmd(stdev);
272 mutex_unlock(&stdev->mrpc_mutex);
278 struct switchtec_dev *stdev = to_stdev(dev);
281 ver = ioread32(&stdev->mmio_sys_info->device_version);
290 struct switchtec_dev *stdev = to_stdev(dev);
293 ver = ioread32(&stdev->mmio_sys_info->firmware_version);
321 struct switchtec_dev *stdev = to_stdev(dev); \
322 struct sys_info_regs __iomem *si = stdev->mmio_sys_info; \
323 if (stdev->gen == SWITCHTEC_GEN3) \
326 else if (stdev->gen == SWITCHTEC_GEN4) \
342 struct switchtec_dev *stdev = to_stdev(dev);
343 struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
346 if (stdev->gen != SWITCHTEC_GEN3)
357 struct switchtec_dev *stdev = to_stdev(dev);
358 int id = ioread16(&stdev->mmio_sys_info->gen3.component_id);
361 if (stdev->gen != SWITCHTEC_GEN3)
371 struct switchtec_dev *stdev = to_stdev(dev);
372 int rev = ioread8(&stdev->mmio_sys_info->gen3.component_revision);
375 if (stdev->gen != SWITCHTEC_GEN3)
385 struct switchtec_dev *stdev = to_stdev(dev);
387 return sprintf(buf, "%d\n", stdev->partition);
394 struct switchtec_dev *stdev = to_stdev(dev);
396 return sprintf(buf, "%d\n", stdev->partition_count);
418 struct switchtec_dev *stdev;
421 stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
423 stuser = stuser_create(stdev);
430 dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
444 static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
446 if (mutex_lock_interruptible(&stdev->mrpc_mutex))
449 if (!stdev->alive) {
450 mutex_unlock(&stdev->mrpc_mutex);
461 struct switchtec_dev *stdev = stuser->stdev;
470 rc = lock_mutex_and_test_alive(stdev);
501 mutex_unlock(&stdev->mrpc_mutex);
513 struct switchtec_dev *stdev = stuser->stdev;
520 rc = lock_mutex_and_test_alive(stdev);
525 mutex_unlock(&stdev->mrpc_mutex);
531 mutex_unlock(&stdev->mrpc_mutex);
543 rc = lock_mutex_and_test_alive(stdev);
548 mutex_unlock(&stdev->mrpc_mutex);
570 mutex_unlock(&stdev->mrpc_mutex);
583 struct switchtec_dev *stdev = stuser->stdev;
587 poll_wait(filp, &stdev->event_wq, wait);
589 if (lock_mutex_and_test_alive(stdev))
592 mutex_unlock(&stdev->mrpc_mutex);
597 if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
603 static int ioctl_flash_info(struct switchtec_dev *stdev,
607 struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
609 if (stdev->gen == SWITCHTEC_GEN3) {
612 } else if (stdev->gen == SWITCHTEC_GEN4) {
632 static int flash_part_info_gen3(struct switchtec_dev *stdev,
636 &stdev->mmio_flash_info->gen3;
637 struct sys_info_regs_gen3 __iomem *si = &stdev->mmio_sys_info->gen3;
702 static int flash_part_info_gen4(struct switchtec_dev *stdev,
705 struct flash_info_regs_gen4 __iomem *fi = &stdev->mmio_flash_info->gen4;
706 struct sys_info_regs_gen4 __iomem *si = &stdev->mmio_sys_info->gen4;
806 static int ioctl_flash_part_info(struct switchtec_dev *stdev,
815 if (stdev->gen == SWITCHTEC_GEN3) {
816 ret = flash_part_info_gen3(stdev, &info);
819 } else if (stdev->gen == SWITCHTEC_GEN4) {
820 ret = flash_part_info_gen4(stdev, &info);
833 static int ioctl_event_summary(struct switchtec_dev *stdev,
847 s->global = ioread32(&stdev->mmio_sw_event->global_summary);
848 s->part_bitmap = ioread64(&stdev->mmio_sw_event->part_event_bitmap);
849 s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
851 for (i = 0; i < stdev->partition_count; i++) {
852 reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
856 for (i = 0; i < stdev->pff_csr_count; i++) {
857 reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
866 stuser->event_cnt = atomic_read(&stdev->event_cnt);
873 static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
876 return (void __iomem *)stdev->mmio_sw_event + offset;
879 static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
882 return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
885 static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
888 return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
897 u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
937 static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
949 index = stdev->partition;
950 else if (index < 0 || index >= stdev->partition_count)
953 if (index < 0 || index >= stdev->pff_csr_count)
957 return event_regs[event_id].map_reg(stdev, off, index);
960 static int event_ctl(struct switchtec_dev *stdev,
967 reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
1013 static int ioctl_event_ctl(struct switchtec_dev *stdev,
1034 nr_idxs = stdev->partition_count;
1036 nr_idxs = stdev->pff_csr_count;
1043 ret = event_ctl(stdev, &ctl);
1048 ret = event_ctl(stdev, &ctl);
1059 static int ioctl_pff_to_port(struct switchtec_dev *stdev,
1071 for (part = 0; part < stdev->partition_count; part++) {
1072 pcfg = &stdev->mmio_part_cfg_all[part];
1106 static int ioctl_port_to_pff(struct switchtec_dev *stdev,
1116 pcfg = stdev->mmio_part_cfg;
1117 else if (p.partition < stdev->partition_count)
1118 pcfg = &stdev->mmio_part_cfg_all[p.partition];
1148 struct switchtec_dev *stdev = stuser->stdev;
1152 rc = lock_mutex_and_test_alive(stdev);
1158 rc = ioctl_flash_info(stdev, argp);
1161 rc = ioctl_flash_part_info(stdev, argp);
1164 rc = ioctl_event_summary(stdev, stuser, argp,
1168 rc = ioctl_event_ctl(stdev, argp);
1171 rc = ioctl_pff_to_port(stdev, argp);
1174 rc = ioctl_port_to_pff(stdev, argp);
1177 rc = ioctl_event_summary(stdev, stuser, argp,
1185 mutex_unlock(&stdev->mrpc_mutex);
1202 struct switchtec_dev *stdev;
1204 stdev = container_of(work, struct switchtec_dev, link_event_work);
1206 if (stdev->link_notifier)
1207 stdev->link_notifier(stdev);
1210 static void check_link_state_events(struct switchtec_dev *stdev)
1217 for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1218 reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
1219 dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
1222 if (count != stdev->link_event_count[idx]) {
1224 stdev->link_event_count[idx] = count;
1229 schedule_work(&stdev->link_event_work);
1232 static void enable_link_state_events(struct switchtec_dev *stdev)
1236 for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1239 &stdev->mmio_pff_csr[idx].link_state_hdr);
1243 static void enable_dma_mrpc(struct switchtec_dev *stdev)
1245 writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr);
1246 flush_wc_buf(stdev);
1247 iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en);
1252 struct switchtec_dev *stdev = to_stdev(dev);
1254 kfree(stdev);
1257 static void stdev_kill(struct switchtec_dev *stdev)
1261 pci_clear_master(stdev->pdev);
1263 cancel_delayed_work_sync(&stdev->mrpc_timeout);
1266 mutex_lock(&stdev->mrpc_mutex);
1267 stdev->alive = false;
1270 list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
1277 mutex_unlock(&stdev->mrpc_mutex);
1280 wake_up_interruptible(&stdev->event_wq);
1285 struct switchtec_dev *stdev;
1291 stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
1293 if (!stdev)
1296 stdev->alive = true;
1297 stdev->pdev = pci_dev_get(pdev);
1298 INIT_LIST_HEAD(&stdev->mrpc_queue);
1299 mutex_init(&stdev->mrpc_mutex);
1300 stdev->mrpc_busy = 0;
1301 INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
1302 INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
1303 INIT_WORK(&stdev->link_event_work, link_event_work);
1304 init_waitqueue_head(&stdev->event_wq);
1305 atomic_set(&stdev->event_cnt, 0);
1307 dev = &stdev->dev;
1324 cdev = &stdev->cdev;
1328 return stdev;
1331 pci_dev_put(stdev->pdev);
1332 put_device(&stdev->dev);
1336 static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
1342 hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
1348 dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
1355 static int mask_all_events(struct switchtec_dev *stdev, int eid)
1361 for (idx = 0; idx < stdev->partition_count; idx++)
1362 count += mask_event(stdev, eid, idx);
1364 for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1365 if (!stdev->pff_local[idx])
1368 count += mask_event(stdev, eid, idx);
1371 count += mask_event(stdev, eid, 0);
1379 struct switchtec_dev *stdev = dev;
1384 reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
1386 dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
1388 schedule_work(&stdev->mrpc_work);
1389 iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
1392 check_link_state_events(stdev);
1399 event_count += mask_all_events(stdev, eid);
1403 atomic_inc(&stdev->event_cnt);
1404 wake_up_interruptible(&stdev->event_wq);
1405 dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
1416 struct switchtec_dev *stdev = dev;
1421 &stdev->mmio_part_cfg->mrpc_comp_hdr);
1422 schedule_work(&stdev->mrpc_work);
1428 static int switchtec_init_isr(struct switchtec_dev *stdev)
1438 nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, nirqs,
1444 event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
1448 event_irq = pci_irq_vector(stdev->pdev, event_irq);
1452 rc = devm_request_irq(&stdev->pdev->dev, event_irq,
1454 KBUILD_MODNAME, stdev);
1459 if (!stdev->dma_mrpc)
1462 dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector);
1466 dma_mrpc_irq = pci_irq_vector(stdev->pdev, dma_mrpc_irq);
1470 rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq,
1472 KBUILD_MODNAME, stdev);
1477 static void init_pff(struct switchtec_dev *stdev)
1481 struct part_cfg_regs __iomem *pcfg = stdev->mmio_part_cfg;
1484 reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
1489 stdev->pff_csr_count = i;
1492 if (reg < stdev->pff_csr_count)
1493 stdev->pff_local[reg] = 1;
1496 if (reg < stdev->pff_csr_count)
1497 stdev->pff_local[reg] = 1;
1501 if (reg < stdev->pff_csr_count)
1502 stdev->pff_local[reg] = 1;
1506 static int switchtec_init_pci(struct switchtec_dev *stdev,
1531 stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start,
1533 if (!stdev->mmio_mrpc)
1542 stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET;
1543 stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
1544 stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
1545 stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
1546 stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
1548 if (stdev->gen == SWITCHTEC_GEN3)
1549 part_id = &stdev->mmio_sys_info->gen3.partition_id;
1550 else if (stdev->gen == SWITCHTEC_GEN4)
1551 part_id = &stdev->mmio_sys_info->gen4.partition_id;
1555 stdev->partition = ioread8(part_id);
1556 stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
1557 stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
1558 stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
1559 stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
1561 if (stdev->partition_count < 1)
1562 stdev->partition_count = 1;
1564 init_pff(stdev);
1566 pci_set_drvdata(pdev, stdev);
1571 if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
1574 stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
1575 sizeof(*stdev->dma_mrpc),
1576 &stdev->dma_mrpc_dma_addr,
1578 if (stdev->dma_mrpc == NULL)
1584 static void switchtec_exit_pci(struct switchtec_dev *stdev)
1586 if (stdev->dma_mrpc) {
1587 iowrite32(0, &stdev->mmio_mrpc->dma_en);
1588 flush_wc_buf(stdev);
1589 writeq(0, &stdev->mmio_mrpc->dma_addr);
1590 dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
1591 stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
1592 stdev->dma_mrpc = NULL;
1599 struct switchtec_dev *stdev;
1605 stdev = stdev_create(pdev);
1606 if (IS_ERR(stdev))
1607 return PTR_ERR(stdev);
1609 stdev->gen = id->driver_data;
1611 rc = switchtec_init_pci(stdev, pdev);
1615 rc = switchtec_init_isr(stdev);
1617 dev_err(&stdev->dev, "failed to init isr.\n");
1623 &stdev->mmio_part_cfg->mrpc_comp_hdr);
1624 enable_link_state_events(stdev);
1626 if (stdev->dma_mrpc)
1627 enable_dma_mrpc(stdev);
1629 rc = cdev_device_add(&stdev->cdev, &stdev->dev);
1633 dev_info(&stdev->dev, "Management device registered.\n");
1638 stdev_kill(stdev);
1640 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1641 put_device(&stdev->dev);
1647 struct switchtec_dev *stdev = pci_get_drvdata(pdev);
1651 cdev_device_del(&stdev->cdev, &stdev->dev);
1652 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1653 dev_info(&stdev->dev, "unregistered.\n");
1654 stdev_kill(stdev);
1655 switchtec_exit_pci(stdev);
1656 pci_dev_put(stdev->pdev);
1657 stdev->pdev = NULL;
1658 put_device(&stdev->dev);