/kernel/linux/linux-6.6/fs/gfs2/ |
H A D | quota.c | 81 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */ 111 struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu); in gfs2_qd_dealloc() local 112 struct gfs2_sbd *sdp = qd->qd_sbd; in gfs2_qd_dealloc() 114 kmem_cache_free(gfs2_quotad_cachep, qd); in gfs2_qd_dealloc() 119 static void gfs2_qd_dispose(struct gfs2_quota_data *qd) in gfs2_qd_dispose() argument 121 struct gfs2_sbd *sdp = qd->qd_sbd; in gfs2_qd_dispose() 124 list_del(&qd->qd_list); in gfs2_qd_dispose() 127 spin_lock_bucket(qd->qd_hash); in gfs2_qd_dispose() 128 hlist_bl_del_rcu(&qd->qd_hlist); in gfs2_qd_dispose() 129 spin_unlock_bucket(qd in gfs2_qd_dispose() 143 struct gfs2_quota_data *qd; gfs2_qd_list_dispose() local 158 struct gfs2_quota_data *qd = gfs2_qd_isolate() local 207 qd2index(struct gfs2_quota_data *qd) qd2index() argument 214 qd2offset(struct gfs2_quota_data *qd) qd2offset() argument 221 struct gfs2_quota_data *qd; qd_alloc() local 252 struct gfs2_quota_data *qd; gfs2_qd_search_bucket() local 273 struct gfs2_quota_data *qd, *new_qd; qd_get() local 309 qd_hold(struct gfs2_quota_data *qd) qd_hold() argument 316 qd_put(struct gfs2_quota_data *qd) qd_put() argument 338 slot_get(struct gfs2_quota_data *qd) slot_get() argument 361 slot_hold(struct gfs2_quota_data *qd) slot_hold() argument 371 slot_put(struct gfs2_quota_data *qd) slot_put() argument 384 bh_get(struct gfs2_quota_data *qd) bh_get() argument 438 bh_put(struct gfs2_quota_data *qd) bh_put() argument 452 qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd, u64 *sync_gen) qd_check_sync() argument 481 qd_bh_get_or_undo(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) qd_bh_get_or_undo() argument 497 struct gfs2_quota_data *qd = NULL, *iter; qd_fish() local 526 qdsb_put(struct gfs2_quota_data *qd) qdsb_put() argument 533 qd_unlock(struct gfs2_quota_data *qd) qd_unlock() argument 614 struct gfs2_quota_data **qd; gfs2_quota_hold() local 700 do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type) do_qc() argument 854 gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc, s64 change, struct gfs2_quota_data *qd, struct qc_dqblk *fdq) gfs2_adjust_quota() argument 916 struct gfs2_quota_data *qd; do_sync() local 1005 update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) update_qd() argument 1030 do_glock(struct gfs2_quota_data *qd, int force_refresh, struct gfs2_holder *q_gh) do_glock() argument 1082 struct gfs2_quota_data *qd; gfs2_quota_lock() local 1115 need_sync(struct gfs2_quota_data *qd) need_sync() argument 1162 struct gfs2_quota_data *qd; gfs2_quota_unlock() local 1194 print_message(struct gfs2_quota_data *qd, char *type) print_message() argument 1228 struct gfs2_quota_data *qd; gfs2_quota_check() local 1284 struct gfs2_quota_data *qd; gfs2_quota_change() local 1310 struct gfs2_quota_data *qd; qd_changed() local 1373 struct gfs2_quota_data *qd; gfs2_quota_refresh() local 1439 struct gfs2_quota_data *qd; gfs2_quota_init() local 1492 struct gfs2_quota_data *qd; gfs2_quota_cleanup() local 1648 struct gfs2_quota_data *qd; gfs2_get_dqblk() local 1687 struct gfs2_quota_data *qd; gfs2_set_dqblk() local [all...] |
/kernel/linux/linux-5.10/fs/gfs2/ |
H A D | quota.c | 78 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */ 108 struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu); in gfs2_qd_dealloc() local 109 kmem_cache_free(gfs2_quotad_cachep, qd); in gfs2_qd_dealloc() 114 struct gfs2_quota_data *qd; in gfs2_qd_dispose() local 118 qd = list_first_entry(list, struct gfs2_quota_data, qd_lru); in gfs2_qd_dispose() 119 sdp = qd->qd_gl->gl_name.ln_sbd; in gfs2_qd_dispose() 121 list_del(&qd->qd_lru); in gfs2_qd_dispose() 125 list_del(&qd->qd_list); in gfs2_qd_dispose() 128 spin_lock_bucket(qd->qd_hash); in gfs2_qd_dispose() 129 hlist_bl_del_rcu(&qd in gfs2_qd_dispose() 149 struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru); gfs2_qd_isolate() local 194 qd2index(struct gfs2_quota_data *qd) qd2index() argument 201 qd2offset(struct gfs2_quota_data *qd) qd2offset() argument 213 struct gfs2_quota_data *qd; qd_alloc() local 244 struct gfs2_quota_data *qd; gfs2_qd_search_bucket() local 265 struct gfs2_quota_data *qd, *new_qd; qd_get() local 300 qd_hold(struct gfs2_quota_data *qd) qd_hold() argument 307 qd_put(struct gfs2_quota_data *qd) qd_put() argument 318 slot_get(struct gfs2_quota_data *qd) slot_get() argument 342 slot_hold(struct gfs2_quota_data *qd) slot_hold() argument 352 slot_put(struct gfs2_quota_data *qd) slot_put() argument 365 bh_get(struct gfs2_quota_data *qd) bh_get() argument 412 bh_put(struct gfs2_quota_data *qd) bh_put() argument 426 qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd, u64 *sync_gen) qd_check_sync() argument 457 struct gfs2_quota_data *qd = NULL; qd_fish() local 495 qd_unlock(struct gfs2_quota_data *qd) qd_unlock() argument 531 qdsb_put(struct gfs2_quota_data *qd) qdsb_put() argument 586 struct gfs2_quota_data **qd; gfs2_quota_hold() local 671 do_qc(struct gfs2_quota_data *qd, s64 change) do_qc() argument 836 gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, s64 change, struct gfs2_quota_data *qd, struct qc_dqblk *fdq) gfs2_adjust_quota() argument 899 struct gfs2_quota_data *qd; do_sync() local 993 update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) update_qd() argument 1018 do_glock(struct gfs2_quota_data *qd, int force_refresh, struct gfs2_holder *q_gh) do_glock() argument 1069 struct gfs2_quota_data *qd; gfs2_quota_lock() local 1101 need_sync(struct gfs2_quota_data *qd) need_sync() argument 1149 struct gfs2_quota_data *qd; gfs2_quota_unlock() local 1188 print_message(struct gfs2_quota_data *qd, char *type) print_message() argument 1221 struct gfs2_quota_data *qd; gfs2_quota_check() local 1277 struct gfs2_quota_data *qd; gfs2_quota_change() local 1348 struct gfs2_quota_data *qd; gfs2_quota_refresh() local 1414 struct gfs2_quota_data *qd; gfs2_quota_init() local 1468 struct gfs2_quota_data *qd; gfs2_quota_cleanup() local 1647 struct gfs2_quota_data *qd; gfs2_get_dqblk() local 1686 struct gfs2_quota_data *qd; gfs2_set_dqblk() local [all...] |
/kernel/linux/linux-5.10/fs/erofs/ |
H A D | namei.c | 18 const struct erofs_qstr *qd, in erofs_dirnamecmp() 28 DBG_BUGON(qd->name > qd->end); in erofs_dirnamecmp() 30 /* qd could not have trailing '\0' */ in erofs_dirnamecmp() 31 /* However it is absolutely safe if < qd->end */ in erofs_dirnamecmp() 32 while (qd->name + i < qd->end && qd->name[i] != '\0') { in erofs_dirnamecmp() 33 if (qn->name[i] != qd->name[i]) { in erofs_dirnamecmp() 35 return qn->name[i] > qd in erofs_dirnamecmp() 17 erofs_dirnamecmp(const struct erofs_qstr *qn, const struct erofs_qstr *qd, unsigned int *matched) erofs_dirnamecmp() argument [all...] |
/kernel/linux/linux-6.6/fs/erofs/ |
H A D | namei.c | 17 const struct erofs_qstr *qd, in erofs_dirnamecmp() 27 DBG_BUGON(qd->name > qd->end); in erofs_dirnamecmp() 29 /* qd could not have trailing '\0' */ in erofs_dirnamecmp() 30 /* However it is absolutely safe if < qd->end */ in erofs_dirnamecmp() 31 while (qd->name + i < qd->end && qd->name[i] != '\0') { in erofs_dirnamecmp() 32 if (qn->name[i] != qd->name[i]) { in erofs_dirnamecmp() 34 return qn->name[i] > qd in erofs_dirnamecmp() 16 erofs_dirnamecmp(const struct erofs_qstr *qn, const struct erofs_qstr *qd, unsigned int *matched) erofs_dirnamecmp() argument [all...] |
/kernel/linux/linux-5.10/block/ |
H A D | blk-iolatency.c | 50 * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot 314 static inline unsigned long scale_amount(unsigned long qd, bool up) in scale_amount() argument 316 return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL); in scale_amount() 320 * We scale the qd down faster than we scale up, so we need to use this helper 332 unsigned long qd = blkiolat->rqos.q->nr_requests; in scale_cookie_change() local 333 unsigned long scale = scale_amount(qd, up); in scale_cookie_change() 335 unsigned long max_scale = qd << 1; in scale_cookie_change() 345 else if (diff > qd) in scale_cookie_change() 356 if (diff > qd) { in scale_cookie_change() 372 unsigned long qd = iolat->blkiolat->rqos.q->nr_requests; scale_change() local [all...] |
/kernel/linux/linux-6.6/block/ |
H A D | blk-iolatency.c | 50 * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot 315 static inline unsigned long scale_amount(unsigned long qd, bool up) in scale_amount() argument 317 return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL); in scale_amount() 321 * We scale the qd down faster than we scale up, so we need to use this helper 333 unsigned long qd = blkiolat->rqos.disk->queue->nr_requests; in scale_cookie_change() local 334 unsigned long scale = scale_amount(qd, up); in scale_cookie_change() 336 unsigned long max_scale = qd << 1; in scale_cookie_change() 346 else if (diff > qd) in scale_cookie_change() 357 if (diff > qd) { in scale_cookie_change() 375 unsigned long qd = iolat->blkiolat->rqos.disk->queue->nr_requests; scale_change() local [all...] |
/kernel/linux/linux-5.10/drivers/s390/block/ |
H A D | scm_blk.c | 285 const struct blk_mq_queue_data *qd) in scm_blk_request() 290 struct request *req = qd->rq; in scm_blk_request() 325 if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) { in scm_blk_request() 336 struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL); in scm_blk_init_hctx() local 338 if (!qd) in scm_blk_init_hctx() 341 spin_lock_init(&qd->lock); in scm_blk_init_hctx() 342 hctx->driver_data = qd; in scm_blk_init_hctx() 349 struct scm_queue *qd = hctx->driver_data; in scm_blk_exit_hctx() local 351 WARN_ON(qd in scm_blk_exit_hctx() 284 scm_blk_request(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *qd) scm_blk_request() argument [all...] |
/kernel/linux/linux-6.6/drivers/s390/block/ |
H A D | scm_blk.c | 284 const struct blk_mq_queue_data *qd) in scm_blk_request() 289 struct request *req = qd->rq; in scm_blk_request() 324 if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) { in scm_blk_request() 335 struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL); in scm_blk_init_hctx() local 337 if (!qd) in scm_blk_init_hctx() 340 spin_lock_init(&qd->lock); in scm_blk_init_hctx() 341 hctx->driver_data = qd; in scm_blk_init_hctx() 348 struct scm_queue *qd = hctx->driver_data; in scm_blk_exit_hctx() local 350 WARN_ON(qd in scm_blk_exit_hctx() 283 scm_blk_request(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *qd) scm_blk_request() argument [all...] |
/kernel/linux/linux-6.6/drivers/scsi/aacraid/ |
H A D | aachba.c | 3261 struct aac_query_disk qd; in query_disk() local 3267 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) in query_disk() 3269 if (qd.cnum == -1) { in query_disk() 3270 if (qd.id < 0 || qd.id >= dev->maximum_num_containers) in query_disk() 3272 qd.cnum = qd.id; in query_disk() 3273 } else if ((qd.bus == -1) && (qd.id == -1) && (qd in query_disk() [all...] |
/kernel/linux/linux-5.10/drivers/atm/ |
H A D | firestream.c | 623 static int qd; variable 1194 da[qd] = td; in fs_send() 1195 dq[qd].flags = td->flags; in fs_send() 1196 dq[qd].next = td->next; in fs_send() 1197 dq[qd].bsa = td->bsa; in fs_send() 1198 dq[qd].skb = td->skb; in fs_send() 1199 dq[qd].dev = td->dev; in fs_send() 1200 qd++; in fs_send() 1201 if (qd >= 60) qd in fs_send() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/qib/ |
H A D | qib_qsfp.c | 461 void qib_qsfp_init(struct qib_qsfp_data *qd, in qib_qsfp_init() argument 466 struct qib_devdata *dd = qd->ppd->dd; in qib_qsfp_init() 469 INIT_WORK(&qd->work, fevent); in qib_qsfp_init() 478 if (qd->ppd->hw_pidx) { in qib_qsfp_init()
|
H A D | qib_qsfp.h | 187 extern void qib_qsfp_init(struct qib_qsfp_data *qd,
|
H A D | qib_iba7322.c | 2979 struct qib_qsfp_data *qd; in unknown_7322_gpio_intr() local 2990 qd = &ppd->cpspec->qsfp_data; in unknown_7322_gpio_intr() 2996 qd->t_insert = jiffies; in unknown_7322_gpio_intr() 2997 queue_work(ib_wq, &qd->work); in unknown_7322_gpio_intr() 5572 struct qib_qsfp_data *qd = in qib_7322_ib_updown() local 5584 qd->t_insert = jiffies; in qib_7322_ib_updown() 5585 queue_work(ib_wq, &qd->work); in qib_7322_ib_updown() 5935 struct qib_qsfp_data *qd; in qsfp_7322_event() local 5942 qd = container_of(work, struct qib_qsfp_data, work); in qsfp_7322_event() 5943 ppd = qd in qsfp_7322_event() 6019 struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data; qib_init_7322_qsfp() local 7619 struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache; find_best_ent() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/qib/ |
H A D | qib_qsfp.c | 461 void qib_qsfp_init(struct qib_qsfp_data *qd, in qib_qsfp_init() argument 466 struct qib_devdata *dd = qd->ppd->dd; in qib_qsfp_init() 469 INIT_WORK(&qd->work, fevent); in qib_qsfp_init() 478 if (qd->ppd->hw_pidx) { in qib_qsfp_init()
|
H A D | qib_qsfp.h | 187 extern void qib_qsfp_init(struct qib_qsfp_data *qd,
|
H A D | qib_iba7322.c | 2957 struct qib_qsfp_data *qd; in unknown_7322_gpio_intr() local 2968 qd = &ppd->cpspec->qsfp_data; in unknown_7322_gpio_intr() 2974 qd->t_insert = jiffies; in unknown_7322_gpio_intr() 2975 queue_work(ib_wq, &qd->work); in unknown_7322_gpio_intr() 5550 struct qib_qsfp_data *qd = in qib_7322_ib_updown() local 5562 qd->t_insert = jiffies; in qib_7322_ib_updown() 5563 queue_work(ib_wq, &qd->work); in qib_7322_ib_updown() 5913 struct qib_qsfp_data *qd; in qsfp_7322_event() local 5920 qd = container_of(work, struct qib_qsfp_data, work); in qsfp_7322_event() 5921 ppd = qd in qsfp_7322_event() 5997 struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data; qib_init_7322_qsfp() local 7590 struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache; find_best_ent() local [all...] |
/kernel/linux/linux-5.10/drivers/scsi/aacraid/ |
H A D | aachba.c | 3281 struct aac_query_disk qd; in query_disk() local 3287 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) in query_disk() 3289 if (qd.cnum == -1) { in query_disk() 3290 if (qd.id < 0 || qd.id >= dev->maximum_num_containers) in query_disk() 3292 qd.cnum = qd.id; in query_disk() 3293 } else if ((qd.bus == -1) && (qd.id == -1) && (qd in query_disk() [all...] |
/kernel/linux/linux-5.10/drivers/mailbox/ |
H A D | ti-msgmgr.c | 581 * @qd: Queue description data 591 const struct ti_msgmgr_valid_queue_desc *qd, in ti_msgmgr_queue_setup() 597 qinst->proxy_id = qd->proxy_id; in ti_msgmgr_queue_setup() 598 qinst->queue_id = qd->queue_id; in ti_msgmgr_queue_setup() 632 qinst->is_tx = qd->is_tx; in ti_msgmgr_queue_setup() 587 ti_msgmgr_queue_setup(int idx, struct device *dev, struct device_node *np, struct ti_msgmgr_inst *inst, const struct ti_msgmgr_desc *d, const struct ti_msgmgr_valid_queue_desc *qd, struct ti_queue_inst *qinst, struct mbox_chan *chan) ti_msgmgr_queue_setup() argument
|
/kernel/linux/linux-6.6/drivers/mailbox/ |
H A D | ti-msgmgr.c | 631 * @qd: Queue description data 641 const struct ti_msgmgr_valid_queue_desc *qd, in ti_msgmgr_queue_setup() 647 qinst->proxy_id = qd->proxy_id; in ti_msgmgr_queue_setup() 648 qinst->queue_id = qd->queue_id; in ti_msgmgr_queue_setup() 682 qinst->is_tx = qd->is_tx; in ti_msgmgr_queue_setup() 637 ti_msgmgr_queue_setup(int idx, struct device *dev, struct device_node *np, struct ti_msgmgr_inst *inst, const struct ti_msgmgr_desc *d, const struct ti_msgmgr_valid_queue_desc *qd, struct ti_queue_inst *qinst, struct mbox_chan *chan) ti_msgmgr_queue_setup() argument
|
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_device_queue_manager.c | 532 const struct kfd_criu_queue_priv_data *qd, in create_queue_nocpsch() 573 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL); in create_queue_nocpsch() 579 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL); in create_queue_nocpsch() 593 if (qd) in create_queue_nocpsch() 596 qd->ctl_stack_size); in create_queue_nocpsch() 1779 const struct kfd_criu_queue_priv_data *qd, in create_queue_cpsch() 1795 retval = allocate_sdma_queue(dqm, q, qd ? &qd in create_queue_cpsch() 529 create_queue_nocpsch(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd, const struct kfd_criu_queue_priv_data *qd, const void *restore_mqd, const void *restore_ctl_stack) create_queue_nocpsch() argument 1777 create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd, const struct kfd_criu_queue_priv_data *qd, const void *restore_mqd, const void *restore_ctl_stack) create_queue_cpsch() argument [all...] |
H A D | kfd_device_queue_manager.h | 135 const struct kfd_criu_queue_priv_data *qd,
|
/kernel/linux/linux-5.10/drivers/clk/at91/ |
H A D | clk-audio-pll.c | 47 #define AUDIO_PLL_QDPAD(qd, div) ((AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV(qd) & \ 358 pr_debug("A PLL/PMC: %s, best_rate = %ld, best_parent_rate = %lu (qd = %d)\n", in clk_audio_pll_pmc_round_rate()
|
/kernel/linux/linux-6.6/drivers/clk/at91/ |
H A D | clk-audio-pll.c | 47 #define AUDIO_PLL_QDPAD(qd, div) ((AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV(qd) & \ 358 pr_debug("A PLL/PMC: %s, best_rate = %ld, best_parent_rate = %lu (qd = %d)\n", in clk_audio_pll_pmc_round_rate()
|
/kernel/linux/linux-6.6/arch/s390/include/asm/ |
H A D | ap.h | 113 unsigned int qd : 4; /* queue depth */
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | intel-iommu.h | 389 #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
|