Lines Matching defs:cmo

153 	if (viodev->cmo.entitled > viodev->cmo.allocated)
154 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
165 viodev->cmo.allocated += size;
200 if (viodev->cmo.allocated > viodev->cmo.entitled) {
201 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
202 viodev->cmo.entitled));
207 viodev->cmo.allocated -= (reserve_freed + excess_freed);
233 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
236 viodev->cmo.entitled -= tmp;
318 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
319 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
320 avail += viodev->cmo.entitled -
321 max_t(size_t, viodev->cmo.allocated,
344 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
345 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
346 tmp = viodev->cmo.entitled -
347 max_t(size_t, viodev->cmo.allocated,
349 viodev->cmo.entitled -= min(tmp, delta);
386 struct vio_cmo *cmo;
393 cmo = container_of(work, struct vio_cmo, balance_q.work);
398 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
399 BUG_ON(cmo->min > cmo->entitled);
400 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
401 cmo->min += cmo->spare;
402 cmo->desired = cmo->min;
408 avail = cmo->entitled - cmo->spare;
412 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
413 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
414 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
428 if (viodev->cmo.desired <= level) {
439 chunk = min(chunk, (viodev->cmo.desired -
440 viodev->cmo.entitled));
441 viodev->cmo.entitled += chunk;
448 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
449 max(viodev->cmo.allocated, level);
459 cmo->reserve.size = cmo->min;
460 cmo->excess.free = 0;
461 cmo->excess.size = 0;
466 if (viodev->cmo.entitled)
467 cmo->reserve.size += (viodev->cmo.entitled -
470 if (viodev->cmo.allocated > viodev->cmo.entitled)
471 need += viodev->cmo.allocated - viodev->cmo.entitled;
473 cmo->excess.size = cmo->entitled - cmo->reserve.size;
474 cmo->excess.free = cmo->excess.size - need;
488 atomic_inc(&viodev->cmo.allocs_failed);
497 atomic_inc(&viodev->cmo.allocs_failed);
533 atomic_inc(&viodev->cmo.allocs_failed);
578 atomic_inc(&viodev->cmo.allocs_failed);
654 if (desired >= viodev->cmo.desired) {
656 vio_cmo.desired += desired - viodev->cmo.desired;
657 viodev->cmo.desired = desired;
660 vio_cmo.desired -= viodev->cmo.desired - desired;
661 viodev->cmo.desired = desired;
666 if (viodev->cmo.entitled > desired) {
667 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
668 vio_cmo.excess.size += viodev->cmo.entitled - desired;
674 if (viodev->cmo.allocated < viodev->cmo.entitled)
675 vio_cmo.excess.free += viodev->cmo.entitled -
676 max(viodev->cmo.allocated, desired);
677 viodev->cmo.entitled = desired;
735 viodev->cmo.desired =
737 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
738 viodev->cmo.desired = VIO_CMO_MIN_ENT;
750 viodev->cmo.desired = 0;
765 vio_cmo.desired += (viodev->cmo.desired -
791 vio_cmo.desired += viodev->cmo.desired;
802 * Remove the device from the cmo device list. The minimum entitlement
814 if (viodev->cmo.allocated) {
817 __func__, viodev->cmo.allocated);
837 if (viodev->cmo.entitled) {
843 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
850 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
853 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
854 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
857 viodev->cmo.entitled -= tmp;
861 vio_cmo.excess.size += viodev->cmo.entitled;
862 vio_cmo.excess.free += viodev->cmo.entitled;
863 vio_cmo.reserve.size -= viodev->cmo.entitled;
870 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
871 viodev->cmo.desired = VIO_CMO_MIN_ENT;
872 atomic_set(&viodev->cmo.allocs_failed, 0);
940 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
947 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
954 atomic_set(&viodev->cmo.allocs_failed, 0);
1244 memset(&viodev->cmo, 0, sizeof(viodev->cmo));