Lines Matching defs:viodev

70 	struct vio_dev *viodev;
134 * @viodev: VIO device requesting IO memory
145 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
155 if (viodev->cmo.entitled > viodev->cmo.allocated)
156 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
167 viodev->cmo.allocated += size;
179 * @viodev: VIO device freeing IO memory
189 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
202 if (viodev->cmo.allocated > viodev->cmo.entitled) {
203 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
204 viodev->cmo.entitled));
209 viodev->cmo.allocated -= (reserve_freed + excess_freed);
235 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
238 viodev->cmo.entitled -= tmp;
280 struct vio_dev *viodev;
319 viodev = dev_ent->viodev;
320 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
321 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
322 avail += viodev->cmo.entitled -
323 max_t(size_t, viodev->cmo.allocated,
344 viodev = dev_ent->viodev;
346 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
347 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
348 tmp = viodev->cmo.entitled -
349 max_t(size_t, viodev->cmo.allocated,
351 viodev->cmo.entitled -= min(tmp, delta);
389 struct vio_dev *viodev;
412 viodev = dev_ent->viodev;
414 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
415 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
416 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
428 viodev = dev_ent->viodev;
430 if (viodev->cmo.desired <= level) {
441 chunk = min(chunk, (viodev->cmo.desired -
442 viodev->cmo.entitled));
443 viodev->cmo.entitled += chunk;
450 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
451 max(viodev->cmo.allocated, level);
466 viodev = dev_ent->viodev;
468 if (viodev->cmo.entitled)
469 cmo->reserve.size += (viodev->cmo.entitled -
472 if (viodev->cmo.allocated > viodev->cmo.entitled)
473 need += viodev->cmo.allocated - viodev->cmo.entitled;
486 struct vio_dev *viodev = to_vio_dev(dev);
489 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
490 atomic_inc(&viodev->cmo.allocs_failed);
498 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
499 atomic_inc(&viodev->cmo.allocs_failed);
509 struct vio_dev *viodev = to_vio_dev(dev);
512 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
520 struct vio_dev *viodev = to_vio_dev(dev);
524 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
533 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
535 atomic_inc(&viodev->cmo.allocs_failed);
544 struct vio_dev *viodev = to_vio_dev(dev);
548 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
555 struct vio_dev *viodev = to_vio_dev(dev);
564 ret = vio_cmo_alloc(viodev, alloc_size);
575 vio_cmo_dealloc(viodev, alloc_size);
579 vio_cmo_dealloc(viodev, alloc_size);
581 atomic_inc(&viodev->cmo.allocs_failed);
590 struct vio_dev *viodev = to_vio_dev(dev);
600 vio_cmo_dealloc(viodev, alloc_size);
621 * @viodev: struct vio_dev for device to alter
628 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
647 if (viodev == dev_ent->viodev) {
657 if (desired >= viodev->cmo.desired) {
659 vio_cmo.desired += desired - viodev->cmo.desired;
660 viodev->cmo.desired = desired;
663 vio_cmo.desired -= viodev->cmo.desired - desired;
664 viodev->cmo.desired = desired;
669 if (viodev->cmo.entitled > desired) {
670 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
671 vio_cmo.excess.size += viodev->cmo.entitled - desired;
677 if (viodev->cmo.allocated < viodev->cmo.entitled)
678 vio_cmo.excess.free += viodev->cmo.entitled -
679 max(viodev->cmo.allocated, desired);
680 viodev->cmo.entitled = desired;
690 * @viodev - Pointer to struct vio_dev for device
701 static int vio_cmo_bus_probe(struct vio_dev *viodev)
704 struct device *dev = &viodev->dev;
714 switch (viodev->family) {
716 if (of_get_property(viodev->dev.of_node,
724 dev_warn(dev, "unknown device family: %d\n", viodev->family);
738 viodev->cmo.desired =
739 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
740 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
741 viodev->cmo.desired = VIO_CMO_MIN_ENT;
749 dev_ent->viodev = viodev;
753 viodev->cmo.desired = 0;
768 vio_cmo.desired += (viodev->cmo.desired -
794 vio_cmo.desired += viodev->cmo.desired;
803 * @viodev - Pointer to struct vio_dev for device
810 static void vio_cmo_bus_remove(struct vio_dev *viodev)
817 if (viodev->cmo.allocated) {
818 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
820 __func__, viodev->cmo.allocated);
829 if (viodev == dev_ent->viodev) {
840 if (viodev->cmo.entitled) {
846 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
853 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
856 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
857 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
860 viodev->cmo.entitled -= tmp;
864 vio_cmo.excess.size += viodev->cmo.entitled;
865 vio_cmo.excess.free += viodev->cmo.entitled;
866 vio_cmo.reserve.size -= viodev->cmo.entitled;
873 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
874 viodev->cmo.desired = VIO_CMO_MIN_ENT;
875 atomic_set(&viodev->cmo.allocs_failed, 0);
881 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
883 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
949 struct vio_dev *viodev = to_vio_dev(dev);
950 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
956 struct vio_dev *viodev = to_vio_dev(dev);
957 atomic_set(&viodev->cmo.allocs_failed, 0);
964 struct vio_dev *viodev = to_vio_dev(dev);
972 vio_cmo_set_dev_desired(viodev, new_desired);
1072 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1073 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1074 static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1075 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1237 struct vio_dev *viodev = to_vio_dev(dev);
1245 id = vio_match_device(viodrv->id_table, viodev);
1247 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1249 error = vio_cmo_bus_probe(viodev);
1253 error = viodrv->probe(viodev, id);
1255 vio_cmo_bus_remove(viodev);
1264 struct vio_dev *viodev = to_vio_dev(dev);
1275 viodrv->remove(viodev);
1278 vio_cmo_bus_remove(viodev);
1285 struct vio_dev *viodev = to_vio_dev(dev);
1291 viodrv->shutdown(viodev);
1353 struct vio_dev *viodev;
1392 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1393 if (viodev == NULL) {
1399 viodev->family = family;
1400 if (viodev->family == VDEVICE) {
1403 viodev->type = of_node_get_device_type(of_node);
1404 if (!viodev->type) {
1417 dev_set_name(&viodev->dev, "%x", unit_address);
1418 viodev->irq = irq_of_parse_and_map(of_node, 0);
1419 viodev->unit_address = unit_address;
1426 viodev->resource_id = of_read_number(prop, 1);
1428 dev_set_name(&viodev->dev, "%pOFn", of_node);
1429 viodev->type = dev_name(&viodev->dev);
1430 viodev->irq = 0;
1433 viodev->name = of_node->name;
1434 viodev->dev.of_node = of_node_get(of_node);
1436 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1439 viodev->dev.parent = &vio_bus_device.dev;
1440 viodev->dev.bus = &vio_bus_type;
1441 viodev->dev.release = vio_dev_release;
1443 if (of_property_present(viodev->dev.of_node, "ibm,my-dma-window")) {
1445 vio_cmo_set_dma_ops(viodev);
1447 set_dma_ops(&viodev->dev, &dma_iommu_ops);
1449 set_iommu_table_base(&viodev->dev,
1450 vio_build_iommu_table(viodev));
1454 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1455 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
1459 if (device_register(&viodev->dev)) {
1461 __func__, dev_name(&viodev->dev));
1462 put_device(&viodev->dev);
1466 return viodev;
1469 kfree(viodev);
1595 void vio_unregister_device(struct vio_dev *viodev)
1597 device_unregister(&viodev->dev);
1598 if (viodev->family == VDEVICE)
1599 irq_dispose_mapping(viodev->irq);