Lines Matching defs:dqp
40 STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
54 int (*execute)(struct xfs_dquot *dqp, void *data),
83 struct xfs_dquot *dqp = batch[i];
85 next_index = dqp->q_id + 1;
122 struct xfs_dquot *dqp,
125 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
128 xfs_dqlock(dqp);
129 if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
132 dqp->q_flags |= XFS_DQFLAG_FREEING;
134 xfs_dqflock(dqp);
141 if (XFS_DQ_IS_DIRTY(dqp)) {
148 error = xfs_qm_dqflush(dqp, &bp);
153 dqp->q_flags &= ~XFS_DQFLAG_FREEING;
156 xfs_dqflock(dqp);
159 ASSERT(atomic_read(&dqp->q_pincount) == 0);
160 ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
161 !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
163 xfs_dqfunlock(dqp);
164 xfs_dqunlock(dqp);
166 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
173 ASSERT(!list_empty(&dqp->q_lru));
174 list_lru_del(&qi->qi_lru, &dqp->q_lru);
175 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
177 xfs_qm_dqdestroy(dqp);
181 xfs_dqunlock(dqp);
254 struct xfs_dquot *dqp;
265 dqp = *IO_idqpp;
266 if (dqp) {
267 trace_xfs_dqattach_found(dqp);
277 error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
281 trace_xfs_dqattach_get(dqp);
287 *IO_idqpp = dqp;
288 xfs_dqunlock(dqp);
418 struct xfs_dquot *dqp = container_of(item,
422 if (!xfs_dqlock_nowait(dqp))
430 if (dqp->q_flags & XFS_DQFLAG_FREEING)
437 if (dqp->q_nrefs) {
438 xfs_dqunlock(dqp);
439 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
441 trace_xfs_dqreclaim_want(dqp);
442 list_lru_isolate(lru, &dqp->q_lru);
443 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
452 if (!xfs_dqflock_nowait(dqp))
455 if (XFS_DQ_IS_DIRTY(dqp)) {
459 trace_xfs_dqreclaim_dirty(dqp);
464 error = xfs_qm_dqflush(dqp, &bp);
472 xfs_dqfunlock(dqp);
477 dqp->q_flags |= XFS_DQFLAG_FREEING;
478 xfs_dqunlock(dqp);
480 ASSERT(dqp->q_nrefs == 0);
481 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
482 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
483 trace_xfs_dqreclaim_done(dqp);
484 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
488 xfs_dqunlock(dqp);
490 trace_xfs_dqreclaim_busy(dqp);
491 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
495 trace_xfs_dqreclaim_busy(dqp);
496 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
497 xfs_dqunlock(dqp);
527 struct xfs_dquot *dqp;
529 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
530 list_del_init(&dqp->q_lru);
531 xfs_qm_dqfree_one(dqp);
554 struct xfs_dquot *dqp;
558 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
562 defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
568 defq->blk.hard = dqp->q_blk.hardlimit;
569 defq->blk.soft = dqp->q_blk.softlimit;
570 defq->ino.hard = dqp->q_ino.hardlimit;
571 defq->ino.soft = dqp->q_ino.softlimit;
572 defq->rtb.hard = dqp->q_rtb.hardlimit;
573 defq->rtb.soft = dqp->q_rtb.softlimit;
574 xfs_qm_dqdestroy(dqp);
585 struct xfs_dquot *dqp;
601 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
610 if (dqp->q_blk.timer)
611 defq->blk.time = dqp->q_blk.timer;
612 if (dqp->q_ino.timer)
613 defq->ino.time = dqp->q_ino.timer;
614 if (dqp->q_rtb.timer)
615 defq->rtb.time = dqp->q_rtb.timer;
617 xfs_qm_dqdestroy(dqp);
1080 struct xfs_dquot *dqp;
1085 error = xfs_qm_dqget(mp, id, type, true, &dqp);
1095 trace_xfs_dqadjust(dqp);
1101 dqp->q_ino.count++;
1102 dqp->q_ino.reserved++;
1104 dqp->q_blk.count += nblks;
1105 dqp->q_blk.reserved += nblks;
1108 dqp->q_rtb.count += rtblks;
1109 dqp->q_rtb.reserved += rtblks;
1117 if (dqp->q_id) {
1118 xfs_qm_adjust_dqlimits(dqp);
1119 xfs_qm_adjust_dqtimers(dqp);
1122 dqp->q_flags |= XFS_DQFLAG_DIRTY;
1123 xfs_qm_dqput(dqp);
1231 struct xfs_dquot *dqp,
1234 struct xfs_mount *mp = dqp->q_mount;
1239 xfs_dqlock(dqp);
1240 if (dqp->q_flags & XFS_DQFLAG_FREEING)
1242 if (!XFS_DQ_IS_DIRTY(dqp))
1253 if (!xfs_dqflock_nowait(dqp)) {
1255 error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1274 error = xfs_qm_dqflush(dqp, &bp);
1281 xfs_dqunlock(dqp);
1634 struct xfs_dquot *dqp)
1636 struct xfs_mount *mp = dqp->q_mount;
1640 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1645 xfs_qm_dqdestroy(dqp);
1930 struct xfs_dquot *dqp;
1934 dqp = xfs_inode_dquot(ip, type);
1935 if (!dqp || !xfs_dquot_is_enforced(dqp))
1938 if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
1939 xfs_dquot_res_over_limits(&dqp->q_rtb))
1943 if (!dqp->q_prealloc_hi_wmark)
1946 if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark)
1949 if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark)
1952 freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved;
1953 if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT])