Lines Matching defs:viodev

68 	struct vio_dev *viodev;
132 * @viodev: VIO device requesting IO memory
143 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
153 if (viodev->cmo.entitled > viodev->cmo.allocated)
154 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
165 viodev->cmo.allocated += size;
177 * @viodev: VIO device freeing IO memory
187 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
200 if (viodev->cmo.allocated > viodev->cmo.entitled) {
201 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
202 viodev->cmo.entitled));
207 viodev->cmo.allocated -= (reserve_freed + excess_freed);
233 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
236 viodev->cmo.entitled -= tmp;
278 struct vio_dev *viodev;
317 viodev = dev_ent->viodev;
318 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
319 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
320 avail += viodev->cmo.entitled -
321 max_t(size_t, viodev->cmo.allocated,
342 viodev = dev_ent->viodev;
344 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
345 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
346 tmp = viodev->cmo.entitled -
347 max_t(size_t, viodev->cmo.allocated,
349 viodev->cmo.entitled -= min(tmp, delta);
387 struct vio_dev *viodev;
410 viodev = dev_ent->viodev;
412 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
413 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
414 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
426 viodev = dev_ent->viodev;
428 if (viodev->cmo.desired <= level) {
439 chunk = min(chunk, (viodev->cmo.desired -
440 viodev->cmo.entitled));
441 viodev->cmo.entitled += chunk;
448 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
449 max(viodev->cmo.allocated, level);
464 viodev = dev_ent->viodev;
466 if (viodev->cmo.entitled)
467 cmo->reserve.size += (viodev->cmo.entitled -
470 if (viodev->cmo.allocated > viodev->cmo.entitled)
471 need += viodev->cmo.allocated - viodev->cmo.entitled;
484 struct vio_dev *viodev = to_vio_dev(dev);
487 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
488 atomic_inc(&viodev->cmo.allocs_failed);
496 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
497 atomic_inc(&viodev->cmo.allocs_failed);
507 struct vio_dev *viodev = to_vio_dev(dev);
510 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
518 struct vio_dev *viodev = to_vio_dev(dev);
522 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
531 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
533 atomic_inc(&viodev->cmo.allocs_failed);
542 struct vio_dev *viodev = to_vio_dev(dev);
546 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
553 struct vio_dev *viodev = to_vio_dev(dev);
562 if (vio_cmo_alloc(viodev, alloc_size))
572 vio_cmo_dealloc(viodev, alloc_size);
576 vio_cmo_dealloc(viodev, alloc_size);
578 atomic_inc(&viodev->cmo.allocs_failed);
587 struct vio_dev *viodev = to_vio_dev(dev);
597 vio_cmo_dealloc(viodev, alloc_size);
618 * @viodev: struct vio_dev for device to alter
625 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
644 if (viodev == dev_ent->viodev) {
654 if (desired >= viodev->cmo.desired) {
656 vio_cmo.desired += desired - viodev->cmo.desired;
657 viodev->cmo.desired = desired;
660 vio_cmo.desired -= viodev->cmo.desired - desired;
661 viodev->cmo.desired = desired;
666 if (viodev->cmo.entitled > desired) {
667 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
668 vio_cmo.excess.size += viodev->cmo.entitled - desired;
674 if (viodev->cmo.allocated < viodev->cmo.entitled)
675 vio_cmo.excess.free += viodev->cmo.entitled -
676 max(viodev->cmo.allocated, desired);
677 viodev->cmo.entitled = desired;
687 * @viodev - Pointer to struct vio_dev for device
698 static int vio_cmo_bus_probe(struct vio_dev *viodev)
701 struct device *dev = &viodev->dev;
711 switch (viodev->family) {
713 if (of_get_property(viodev->dev.of_node,
721 dev_warn(dev, "unknown device family: %d\n", viodev->family);
735 viodev->cmo.desired =
736 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
737 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
738 viodev->cmo.desired = VIO_CMO_MIN_ENT;
746 dev_ent->viodev = viodev;
750 viodev->cmo.desired = 0;
765 vio_cmo.desired += (viodev->cmo.desired -
791 vio_cmo.desired += viodev->cmo.desired;
800 * @viodev - Pointer to struct vio_dev for device
807 static void vio_cmo_bus_remove(struct vio_dev *viodev)
814 if (viodev->cmo.allocated) {
815 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
817 __func__, viodev->cmo.allocated);
826 if (viodev == dev_ent->viodev) {
837 if (viodev->cmo.entitled) {
843 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
850 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
853 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
854 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
857 viodev->cmo.entitled -= tmp;
861 vio_cmo.excess.size += viodev->cmo.entitled;
862 vio_cmo.excess.free += viodev->cmo.entitled;
863 vio_cmo.reserve.size -= viodev->cmo.entitled;
870 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
871 viodev->cmo.desired = VIO_CMO_MIN_ENT;
872 atomic_set(&viodev->cmo.allocs_failed, 0);
878 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
880 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
946 struct vio_dev *viodev = to_vio_dev(dev);
947 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
953 struct vio_dev *viodev = to_vio_dev(dev);
954 atomic_set(&viodev->cmo.allocs_failed, 0);
961 struct vio_dev *viodev = to_vio_dev(dev);
969 vio_cmo_set_dev_desired(viodev, new_desired);
1069 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1070 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1071 static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1072 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1234 struct vio_dev *viodev = to_vio_dev(dev);
1242 id = vio_match_device(viodrv->id_table, viodev);
1244 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1246 error = vio_cmo_bus_probe(viodev);
1250 error = viodrv->probe(viodev, id);
1252 vio_cmo_bus_remove(viodev);
1261 struct vio_dev *viodev = to_vio_dev(dev);
1273 ret = viodrv->remove(viodev);
1276 vio_cmo_bus_remove(viodev);
1338 struct vio_dev *viodev;
1377 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1378 if (viodev == NULL) {
1384 viodev->family = family;
1385 if (viodev->family == VDEVICE) {
1388 viodev->type = of_node_get_device_type(of_node);
1389 if (!viodev->type) {
1402 dev_set_name(&viodev->dev, "%x", unit_address);
1403 viodev->irq = irq_of_parse_and_map(of_node, 0);
1404 viodev->unit_address = unit_address;
1411 viodev->resource_id = of_read_number(prop, 1);
1413 dev_set_name(&viodev->dev, "%pOFn", of_node);
1414 viodev->type = dev_name(&viodev->dev);
1415 viodev->irq = 0;
1418 viodev->name = of_node->name;
1419 viodev->dev.of_node = of_node_get(of_node);
1421 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1424 viodev->dev.parent = &vio_bus_device.dev;
1425 viodev->dev.bus = &vio_bus_type;
1426 viodev->dev.release = vio_dev_release;
1428 if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
1430 vio_cmo_set_dma_ops(viodev);
1432 set_dma_ops(&viodev->dev, &dma_iommu_ops);
1434 set_iommu_table_base(&viodev->dev,
1435 vio_build_iommu_table(viodev));
1439 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1440 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
1444 if (device_register(&viodev->dev)) {
1446 __func__, dev_name(&viodev->dev));
1447 put_device(&viodev->dev);
1451 return viodev;
1454 kfree(viodev);
1580 void vio_unregister_device(struct vio_dev *viodev)
1582 device_unregister(&viodev->dev);
1583 if (viodev->family == VDEVICE)
1584 irq_dispose_mapping(viodev->irq);