/kernel/linux/linux-5.10/arch/x86/um/ |
H A D | tls_32.c | 99 * Actually, now if it wasn't flushed it gets cleared and in load_TLS() 100 * flushed to the host, which will clear it. in load_TLS() 103 if (!curr->flushed) { in load_TLS() 112 if (!(flags & O_FORCE) && curr->flushed) in load_TLS() 119 curr->flushed = 1; in load_TLS() 127 * present desc's, only if they haven't been flushed. 142 if (curr->flushed) in needs_TLS_update() 151 * On a newly forked process, the TLS descriptors haven't yet been flushed. So 164 * will remain as flushed as it was. in clear_flushed_tls() 169 curr->flushed in clear_flushed_tls() 200 set_tls_entry(struct task_struct* task, struct user_desc *info, int idx, int flushed) set_tls_entry() argument [all...] |
/kernel/linux/linux-6.6/arch/x86/um/ |
H A D | tls_32.c | 99 * Actually, now if it wasn't flushed it gets cleared and in load_TLS() 100 * flushed to the host, which will clear it. in load_TLS() 103 if (!curr->flushed) { in load_TLS() 112 if (!(flags & O_FORCE) && curr->flushed) in load_TLS() 119 curr->flushed = 1; in load_TLS() 127 * present desc's, only if they haven't been flushed. 142 if (curr->flushed) in needs_TLS_update() 151 * On a newly forked process, the TLS descriptors haven't yet been flushed. So 164 * will remain as flushed as it was. in clear_flushed_tls() 169 curr->flushed in clear_flushed_tls() 200 set_tls_entry(struct task_struct* task, struct user_desc *info, int idx, int flushed) set_tls_entry() argument [all...] |
/kernel/linux/linux-6.6/arch/x86/lib/ |
H A D | usercopy_64.c | 48 unsigned long flushed, dest = (unsigned long) dst; in __copy_user_flushcache() local 72 flushed = dest - (unsigned long) dst; in __copy_user_flushcache() 73 if (size > flushed && !IS_ALIGNED(size - flushed, 8)) in __copy_user_flushcache()
|
/kernel/linux/linux-5.10/arch/x86/lib/ |
H A D | usercopy_64.c | 89 unsigned long flushed, dest = (unsigned long) dst; in __copy_user_flushcache() local 109 flushed = dest - (unsigned long) dst; in __copy_user_flushcache() 110 if (size > flushed && !IS_ALIGNED(size - flushed, 8)) in __copy_user_flushcache()
|
/kernel/linux/linux-5.10/arch/x86/um/asm/ |
H A D | processor_32.h | 17 unsigned flushed:1; member 30 { .present = 0, .flushed = 0 } }, \
|
/kernel/linux/linux-6.6/arch/x86/um/asm/ |
H A D | processor_32.h | 17 unsigned flushed:1; member 30 { .present = 0, .flushed = 0 } }, \
|
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_ids.c | 357 struct dma_fence *flushed; in amdgpu_vmid_grab_used() local 371 flushed = (*id)->flushed_updates; in amdgpu_vmid_grab_used() 372 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) in amdgpu_vmid_grab_used() 385 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { in amdgpu_vmid_grab_used()
|
/kernel/linux/linux-5.10/drivers/net/ppp/ |
H A D | ppp_mppe.c | 439 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; in mppe_decompress() local 476 if (!state->stateful && !flushed) { in mppe_decompress() 482 if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { in mppe_decompress() 522 if (!flushed) { in mppe_decompress() 547 if (flushed) in mppe_decompress()
|
/kernel/linux/linux-6.6/drivers/net/ppp/ |
H A D | ppp_mppe.c | 439 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; in mppe_decompress() local 476 if (!state->stateful && !flushed) { in mppe_decompress() 482 if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { in mppe_decompress() 522 if (!flushed) { in mppe_decompress() 547 if (flushed) in mppe_decompress()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/cxgb4/ |
H A D | cq.c | 205 int flushed = 0; in c4iw_flush_rq() local 212 flushed++; in c4iw_flush_rq() 214 return flushed; in c4iw_flush_rq() 240 int flushed = 0; in c4iw_flush_sq() local 252 swsqe->flushed = 1; in c4iw_flush_sq() 257 flushed++; in c4iw_flush_sq() 261 wq->sq.flush_cidx += flushed; in c4iw_flush_sq() 264 return flushed; in c4iw_flush_sq() 291 swsqe->flushed = 1; in flush_completed_wrs() 362 if (qhp->wq.flushed in c4iw_flush_hw_cq() [all...] |
H A D | restrack.c | 44 if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed)) in fill_sq() 109 if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed)) in fill_swsqe()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/savage/ |
H A D | savage_bci.c | 319 dev_priv->dma_pages[i].flushed = 0; in savage_dma_init() 338 dev_priv->dma_pages[i].flushed = 0; in savage_dma_reset() 400 dev_priv->dma_pages[i].flushed = 0; in savage_dma_alloc() 438 dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed) in savage_dma_flush() 446 DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, " in savage_dma_flush() 448 first, cur, dev_priv->dma_pages[first].flushed, in savage_dma_flush() 467 dev_priv->dma_pages[first].flushed) * 4; in savage_dma_flush() 469 dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed; in savage_dma_flush() 488 dev_priv->dma_pages[i].flushed = 0; in savage_dma_flush() 494 dev_priv->dma_pages[cur].flushed in savage_dma_flush() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/cxgb4/ |
H A D | cq.c | 205 int flushed = 0; in c4iw_flush_rq() local 212 flushed++; in c4iw_flush_rq() 214 return flushed; in c4iw_flush_rq() 240 int flushed = 0; in c4iw_flush_sq() local 252 swsqe->flushed = 1; in c4iw_flush_sq() 257 flushed++; in c4iw_flush_sq() 261 wq->sq.flush_cidx += flushed; in c4iw_flush_sq() 264 return flushed; in c4iw_flush_sq() 291 swsqe->flushed = 1; in flush_completed_wrs() 362 if (qhp->wq.flushed in c4iw_flush_hw_cq() [all...] |
H A D | restrack.c | 44 if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed)) in fill_sq() 109 if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed)) in fill_swsqe()
|
/kernel/linux/linux-5.10/arch/x86/kernel/ |
H A D | amd_nb.c | 458 int flushed, i; in amd_flush_garts() local 472 flushed = 0; in amd_flush_garts() 476 flushed++; in amd_flush_garts() 490 if (!flushed) in amd_flush_garts()
|
/kernel/linux/linux-6.6/arch/x86/kernel/ |
H A D | amd_nb.c | 455 int flushed, i; in amd_flush_garts() local 469 flushed = 0; in amd_flush_garts() 473 flushed++; in amd_flush_garts() 487 if (!flushed) in amd_flush_garts()
|
/kernel/linux/linux-5.10/drivers/thunderbolt/ |
H A D | nvm.c | 103 nvm->flushed = false; in tb_nvm_write_buf()
|
/kernel/linux/linux-6.6/drivers/cdx/controller/ |
H A D | mcdi.c | 153 bool flushed; in cdx_mcdi_flushed() local 156 flushed = list_empty(&mcdi->cmd_list) && in cdx_mcdi_flushed() 159 return flushed; in cdx_mcdi_flushed()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/bnxt_re/ |
H A D | qplib_fp.c | 74 if (!qp->sq.flushed) { in __bnxt_qplib_add_flush_qp() 79 qp->sq.flushed = true; in __bnxt_qplib_add_flush_qp() 82 if (!qp->rq.flushed) { in __bnxt_qplib_add_flush_qp() 86 qp->rq.flushed = true; in __bnxt_qplib_add_flush_qp() 124 if (qp->sq.flushed) { in __bnxt_qplib_del_flush_qp() 125 qp->sq.flushed = false; in __bnxt_qplib_del_flush_qp() 129 if (qp->rq.flushed) { in __bnxt_qplib_del_flush_qp() 130 qp->rq.flushed = false; in __bnxt_qplib_del_flush_qp() 2355 if (qp->sq.flushed) { in bnxt_qplib_cq_process_req() 2458 if (qp->rq.flushed) { in bnxt_qplib_cq_process_res_rc() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/bnxt_re/ |
H A D | qplib_fp.c | 75 if (!qp->sq.flushed) { in __bnxt_qplib_add_flush_qp() 80 qp->sq.flushed = true; in __bnxt_qplib_add_flush_qp() 83 if (!qp->rq.flushed) { in __bnxt_qplib_add_flush_qp() 87 qp->rq.flushed = true; in __bnxt_qplib_add_flush_qp() 125 if (qp->sq.flushed) { in __bnxt_qplib_del_flush_qp() 126 qp->sq.flushed = false; in __bnxt_qplib_del_flush_qp() 130 if (qp->rq.flushed) { in __bnxt_qplib_del_flush_qp() 131 qp->rq.flushed = false; in __bnxt_qplib_del_flush_qp() 2437 if (qp->sq.flushed) { in bnxt_qplib_cq_process_req() 2539 if (qp->rq.flushed) { in bnxt_qplib_cq_process_res_rc() [all...] |
/kernel/linux/linux-6.6/drivers/thunderbolt/ |
H A D | retimer.c | 125 rt->nvm->flushed = true; in tb_retimer_nvm_validate_and_write() 277 if (!rt->nvm->flushed) { in nvm_authenticate_store()
|
/kernel/linux/linux-6.6/net/sunrpc/xprtrdma/ |
H A D | svc_rdma_recvfrom.c | 329 goto flushed; in svc_rdma_wc_receive() 357 flushed: in svc_rdma_wc_receive()
|
/kernel/linux/linux-5.10/drivers/iio/ |
H A D | industrialio-buffer.c | 57 int flushed = 0; in iio_buffer_ready() local 80 flushed = iio_buffer_flush_hwfifo(indio_dev, buf, in iio_buffer_ready() 82 if (flushed <= 0) in iio_buffer_ready() 85 if (avail + flushed >= to_wait) in iio_buffer_ready()
|
/kernel/linux/linux-5.10/drivers/acpi/ |
H A D | ec.c | 407 bool flushed = false; in acpi_ec_complete_request() local 413 flushed = acpi_ec_flushed(ec); in acpi_ec_complete_request() 414 if (flushed) in acpi_ec_complete_request() 964 bool flushed; in acpi_ec_stopped() local 967 flushed = acpi_ec_flushed(ec); in acpi_ec_stopped() 969 return flushed; in acpi_ec_stopped() 2023 pm_pr_dbg("ACPI EC work flushed\n"); in acpi_ec_dispatch_gpe()
|
/kernel/linux/linux-5.10/fs/ocfs2/ |
H A D | journal.c | 297 unsigned int flushed; in ocfs2_commit_cache() local 305 flushed = atomic_read(&journal->j_num_trans); in ocfs2_commit_cache() 306 trace_ocfs2_commit_cache_begin(flushed); in ocfs2_commit_cache() 307 if (flushed == 0) { in ocfs2_commit_cache() 323 flushed = atomic_read(&journal->j_num_trans); in ocfs2_commit_cache() 327 trace_ocfs2_commit_cache_end(journal->j_trans_id, flushed); in ocfs2_commit_cache()
|