Lines Matching defs:dqp
52 struct xfs_dquot *dqp)
54 ASSERT(list_empty(&dqp->q_lru));
56 kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
57 mutex_destroy(&dqp->q_qlock);
59 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
60 kmem_cache_free(xfs_dquot_cache, dqp);
262 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
266 dqp->q_prealloc_hi_wmark = dqp->q_blk.hardlimit;
267 dqp->q_prealloc_lo_wmark = dqp->q_blk.softlimit;
268 if (!dqp->q_prealloc_lo_wmark) {
269 dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
270 do_div(dqp->q_prealloc_lo_wmark, 100);
271 dqp->q_prealloc_lo_wmark *= 95;
274 space = dqp->q_prealloc_hi_wmark;
277 dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
278 dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
279 dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
289 struct xfs_dquot *dqp,
294 struct xfs_mount *mp = dqp->q_mount;
296 xfs_dqtype_t qtype = xfs_dquot_type(dqp);
301 trace_xfs_dqalloc(dqp);
311 if (!xfs_this_quota_on(dqp->q_mount, qtype)) {
329 error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
343 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
346 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
356 xfs_qm_init_dquot_blk(tp, mp, dqp->q_id, qtype, bp);
407 struct xfs_dquot *dqp,
412 xfs_dqtype_t qtype = xfs_dquot_type(dqp);
431 error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
443 trace_xfs_dqtobp_read(dqp);
449 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
451 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
473 struct xfs_dquot *dqp;
475 dqp = kmem_cache_zalloc(xfs_dquot_cache, GFP_KERNEL | __GFP_NOFAIL);
477 dqp->q_type = type;
478 dqp->q_id = id;
479 dqp->q_mount = mp;
480 INIT_LIST_HEAD(&dqp->q_lru);
481 mutex_init(&dqp->q_qlock);
482 init_waitqueue_head(&dqp->q_pinwait);
483 dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
487 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
495 init_completion(&dqp->q_flush);
496 complete(&dqp->q_flush);
507 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
510 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
517 xfs_qm_dquot_logitem_init(dqp);
520 return dqp;
526 struct xfs_dquot *dqp,
533 dqp_type = xfs_dquot_type(dqp);
535 if (be32_to_cpu(ddqp->d_id) != dqp->q_id)
543 if (xfs_has_crc(dqp->q_mount) ||
544 dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0)
562 struct xfs_dquot *dqp,
565 struct xfs_dqblk *dqb = xfs_buf_offset(bp, dqp->q_bufoffset);
572 if (!xfs_dquot_check_type(dqp, ddqp)) {
575 __this_address, dqp->q_id);
581 dqp->q_type = ddqp->d_type;
582 dqp->q_blk.hardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
583 dqp->q_blk.softlimit = be64_to_cpu(ddqp->d_blk_softlimit);
584 dqp->q_ino.hardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
585 dqp->q_ino.softlimit = be64_to_cpu(ddqp->d_ino_softlimit);
586 dqp->q_rtb.hardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
587 dqp->q_rtb.softlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
589 dqp->q_blk.count = be64_to_cpu(ddqp->d_bcount);
590 dqp->q_ino.count = be64_to_cpu(ddqp->d_icount);
591 dqp->q_rtb.count = be64_to_cpu(ddqp->d_rtbcount);
593 dqp->q_blk.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_btimer);
594 dqp->q_ino.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_itimer);
595 dqp->q_rtb.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_rtbtimer);
601 dqp->q_blk.reserved = dqp->q_blk.count;
602 dqp->q_ino.reserved = dqp->q_ino.count;
603 dqp->q_rtb.reserved = dqp->q_rtb.count;
606 xfs_dquot_set_prealloc_limits(dqp);
614 struct xfs_dquot *dqp)
618 ddqp->d_type = dqp->q_type;
619 ddqp->d_id = cpu_to_be32(dqp->q_id);
623 ddqp->d_blk_hardlimit = cpu_to_be64(dqp->q_blk.hardlimit);
624 ddqp->d_blk_softlimit = cpu_to_be64(dqp->q_blk.softlimit);
625 ddqp->d_ino_hardlimit = cpu_to_be64(dqp->q_ino.hardlimit);
626 ddqp->d_ino_softlimit = cpu_to_be64(dqp->q_ino.softlimit);
627 ddqp->d_rtb_hardlimit = cpu_to_be64(dqp->q_rtb.hardlimit);
628 ddqp->d_rtb_softlimit = cpu_to_be64(dqp->q_rtb.softlimit);
630 ddqp->d_bcount = cpu_to_be64(dqp->q_blk.count);
631 ddqp->d_icount = cpu_to_be64(dqp->q_ino.count);
632 ddqp->d_rtbcount = cpu_to_be64(dqp->q_rtb.count);
638 ddqp->d_btimer = xfs_dquot_to_disk_ts(dqp, dqp->q_blk.timer);
639 ddqp->d_itimer = xfs_dquot_to_disk_ts(dqp, dqp->q_ino.timer);
640 ddqp->d_rtbtimer = xfs_dquot_to_disk_ts(dqp, dqp->q_rtb.timer);
656 struct xfs_dquot *dqp;
660 dqp = xfs_dquot_alloc(mp, id, type);
661 trace_xfs_dqread(dqp);
664 error = xfs_dquot_disk_read(mp, dqp, &bp);
666 error = xfs_dquot_disk_alloc(dqp, &bp);
677 error = xfs_dquot_from_disk(dqp, bp);
682 *dqpp = dqp;
686 trace_xfs_dqread_fail(dqp);
687 xfs_qm_dqdestroy(dqp);
754 struct xfs_dquot *dqp;
758 dqp = radix_tree_lookup(tree, id);
759 if (!dqp) {
765 xfs_dqlock(dqp);
766 if (dqp->q_flags & XFS_DQFLAG_FREEING) {
767 xfs_dqunlock(dqp);
769 trace_xfs_dqget_freeing(dqp);
774 dqp->q_nrefs++;
777 trace_xfs_dqget_hit(dqp);
779 return dqp;
794 struct xfs_dquot *dqp)
799 error = radix_tree_insert(tree, id, dqp);
803 trace_xfs_dqget_dup(dqp);
808 xfs_dqlock(dqp);
809 dqp->q_nrefs = 1;
856 struct xfs_dquot *dqp;
864 dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
865 if (dqp) {
866 *O_dqpp = dqp;
870 error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
874 error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
880 xfs_qm_dqdestroy(dqp);
885 trace_xfs_dqget_miss(dqp);
886 *O_dqpp = dqp;
945 struct xfs_dquot *dqp;
959 dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
960 if (dqp) {
961 *O_dqpp = dqp;
973 error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
987 xfs_qm_dqdestroy(dqp);
988 dqp = dqp1;
989 xfs_dqlock(dqp);
994 xfs_qm_dqdestroy(dqp);
998 error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
1004 xfs_qm_dqdestroy(dqp);
1011 trace_xfs_dqget_miss(dqp);
1012 *O_dqpp = dqp;
1027 struct xfs_dquot *dqp;
1032 error = xfs_qm_dqget(mp, id, type, false, &dqp);
1038 if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
1039 *dqpp = dqp;
1043 xfs_qm_dqput(dqp);
1057 struct xfs_dquot *dqp)
1059 ASSERT(dqp->q_nrefs > 0);
1060 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1062 trace_xfs_dqput(dqp);
1064 if (--dqp->q_nrefs == 0) {
1065 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
1066 trace_xfs_dqput_free(dqp);
1068 if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
1069 XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
1071 xfs_dqunlock(dqp);
1080 struct xfs_dquot *dqp)
1082 if (!dqp)
1085 trace_xfs_dqrele(dqp);
1087 xfs_dqlock(dqp);
1094 xfs_qm_dqput(dqp);
1109 struct xfs_dquot *dqp = qip->qli_dquot;
1139 xfs_dqfunlock(dqp);
1169 struct xfs_dquot *dqp)
1171 xfs_dqtype_t type = xfs_dquot_type(dqp);
1178 if (dqp->q_id == 0)
1181 if (dqp->q_blk.softlimit && dqp->q_blk.count > dqp->q_blk.softlimit &&
1182 !dqp->q_blk.timer)
1185 if (dqp->q_ino.softlimit && dqp->q_ino.count > dqp->q_ino.softlimit &&
1186 !dqp->q_ino.timer)
1189 if (dqp->q_rtb.softlimit && dqp->q_rtb.count > dqp->q_rtb.softlimit &&
1190 !dqp->q_rtb.timer)
1194 if (dqp->q_type & XFS_DQTYPE_BIGTIME) {
1195 if (!xfs_has_bigtime(dqp->q_mount))
1197 if (dqp->q_id == 0)
1214 struct xfs_dquot *dqp,
1217 struct xfs_mount *mp = dqp->q_mount;
1218 struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
1224 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1225 ASSERT(!completion_done(&dqp->q_flush));
1227 trace_xfs_dqflush(dqp);
1231 xfs_qm_dqunpin_wait(dqp);
1236 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1244 fa = xfs_qm_dqflush_check(dqp);
1247 dqp->q_id, fa);
1254 dqblk = xfs_buf_offset(bp, dqp->q_bufoffset);
1255 xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp);
1260 dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
1262 xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1263 &dqp->q_logitem.qli_item.li_lsn);
1275 dqblk->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1285 list_add_tail(&dqp->q_logitem.qli_item.li_bio_list, &bp->b_li_list);
1292 trace_xfs_dqflush_force(dqp);
1296 trace_xfs_dqflush_done(dqp);
1301 dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
1305 xfs_dqfunlock(dqp);