Home
last modified time | relevance | path

Searched refs:req (Results 776 - 800 of 5512) sorted by relevance

1...<<31323334353637383940>>...221

/kernel/linux/linux-6.6/drivers/crypto/hisilicon/hpre/
H A Dhpre_crypto.c136 struct hpre_sqe req; member
289 struct hpre_sqe *msg = &hpre_req->req; in hpre_hw_data_init()
313 struct hpre_asym_request *req, in hpre_hw_data_clr_all()
318 struct hpre_sqe *sqe = &req->req; in hpre_hw_data_clr_all()
326 if (req->src) in hpre_hw_data_clr_all()
327 dma_free_coherent(dev, ctx->key_sz, req->src, tmp); in hpre_hw_data_clr_all()
336 if (req->dst) { in hpre_hw_data_clr_all()
338 scatterwalk_map_and_copy(req->dst, dst, 0, in hpre_hw_data_clr_all()
340 dma_free_coherent(dev, ctx->key_sz, req in hpre_hw_data_clr_all()
312 hpre_hw_data_clr_all(struct hpre_ctx *ctx, struct hpre_asym_request *req, struct scatterlist *dst, struct scatterlist *src) hpre_hw_data_clr_all() argument
349 struct hpre_asym_request *req; hpre_alg_res_post_hf() local
414 hpre_is_bd_timeout(struct hpre_asym_request *req, u64 overtime_thrhld) hpre_is_bd_timeout() argument
435 struct hpre_asym_request *req; hpre_dh_cb() local
456 struct hpre_asym_request *req; hpre_rsa_cb() local
479 struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)]; hpre_alg_cb() local
514 hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) hpre_msg_request_set() argument
590 hpre_dh_compute_value(struct kpp_request *req) hpre_dh_compute_value() argument
793 hpre_rsa_enc(struct akcipher_request *req) hpre_rsa_enc() argument
841 hpre_rsa_dec(struct akcipher_request *req) hpre_rsa_dec() argument
1441 hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx, struct hpre_asym_request *req, struct scatterlist *dst, struct scatterlist *src) hpre_ecdh_hw_data_clr_all() argument
1471 struct hpre_asym_request *req = NULL; hpre_ecdh_cb() local
1495 hpre_ecdh_msg_request_set(struct hpre_ctx *ctx, struct kpp_request *req) hpre_ecdh_msg_request_set() argument
1584 hpre_ecdh_compute_value(struct kpp_request *req) hpre_ecdh_compute_value() argument
1757 hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx, struct hpre_asym_request *req, struct scatterlist *dst, struct scatterlist *src) hpre_curve25519_hw_data_clr_all() argument
1786 struct hpre_asym_request *req = NULL; hpre_curve25519_cb() local
1807 hpre_curve25519_msg_request_set(struct hpre_ctx *ctx, struct kpp_request *req) hpre_curve25519_msg_request_set() argument
1936 hpre_curve25519_compute_value(struct kpp_request *req) hpre_curve25519_compute_value() argument
[all...]
/kernel/linux/linux-6.6/drivers/net/wireless/ath/ath11k/
H A Dqmi.c1709 struct qmi_wlanfw_host_cap_req_msg_v01 req; in ath11k_qmi_host_cap_send() local
1714 memset(&req, 0, sizeof(req)); in ath11k_qmi_host_cap_send()
1717 req.num_clients_valid = 1; in ath11k_qmi_host_cap_send()
1718 req.num_clients = 1; in ath11k_qmi_host_cap_send()
1719 req.mem_cfg_mode = ab->qmi.target_mem_mode; in ath11k_qmi_host_cap_send()
1720 req.mem_cfg_mode_valid = 1; in ath11k_qmi_host_cap_send()
1721 req.bdf_support_valid = 1; in ath11k_qmi_host_cap_send()
1722 req.bdf_support = 1; in ath11k_qmi_host_cap_send()
1725 req in ath11k_qmi_host_cap_send()
1792 struct qmi_wlanfw_ind_register_req_msg_v01 *req; ath11k_qmi_fw_ind_register_send() local
1871 struct qmi_wlanfw_respond_mem_req_msg_v01 *req; ath11k_qmi_respond_fw_mem_request() local
2116 struct qmi_wlanfw_device_info_req_msg_v01 req = {}; ath11k_qmi_request_device_info() local
2190 struct qmi_wlanfw_cap_req_msg_v01 req; ath11k_qmi_request_target_cap() local
2291 struct qmi_wlanfw_bdf_download_req_msg_v01 *req; ath11k_qmi_load_file_target_mem() local
2555 struct qmi_wlanfw_m3_info_req_msg_v01 req; ath11k_qmi_wlanfw_m3_info_send() local
2614 struct qmi_wlanfw_wlan_mode_req_msg_v01 req; ath11k_qmi_wlanfw_mode_send() local
2668 struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req; ath11k_qmi_wlanfw_wlan_cfg_send() local
2761 struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {}; ath11k_qmi_wlanfw_wlan_ini_send() local
[all...]
/kernel/linux/linux-5.10/net/core/
H A Drequest_sock.c53 * until 3WHS is either completed or aborted. Afterwards the req will stay
59 * When a child socket is accepted, its corresponding req->sk is set to
60 * NULL since it's no longer needed. More importantly, "req->sk == NULL"
66 * with its socket lock held. But a request_sock (req) can be accessed by
88 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, in reqsk_fastopen_remove() argument
91 struct sock *lsk = req->rsk_listener; in reqsk_fastopen_remove()
99 tcp_rsk(req)->tfo_listener = false; in reqsk_fastopen_remove()
100 if (req->sk) /* the child socket hasn't been accepted yet */ in reqsk_fastopen_remove()
108 reqsk_put(req); in reqsk_fastopen_remove()
111 /* Wait for 60secs before removing a req tha in reqsk_fastopen_remove()
[all...]
/kernel/linux/linux-6.6/net/core/
H A Drequest_sock.c53 * until 3WHS is either completed or aborted. Afterwards the req will stay
59 * When a child socket is accepted, its corresponding req->sk is set to
60 * NULL since it's no longer needed. More importantly, "req->sk == NULL"
66 * with its socket lock held. But a request_sock (req) can be accessed by
88 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, in reqsk_fastopen_remove() argument
91 struct sock *lsk = req->rsk_listener; in reqsk_fastopen_remove()
99 tcp_rsk(req)->tfo_listener = false; in reqsk_fastopen_remove()
100 if (req->sk) /* the child socket hasn't been accepted yet */ in reqsk_fastopen_remove()
108 reqsk_put(req); in reqsk_fastopen_remove()
111 /* Wait for 60secs before removing a req tha in reqsk_fastopen_remove()
[all...]
/kernel/linux/linux-5.10/drivers/block/xen-blkback/
H A Dblkback.c145 struct blkif_request *req,
411 struct pending_req *req = NULL; in alloc_req() local
416 req = list_entry(ring->pending_free.next, struct pending_req, in alloc_req()
418 list_del(&req->free_list); in alloc_req()
421 return req; in alloc_req()
428 static void free_req(struct xen_blkif_ring *ring, struct pending_req *req) in free_req() argument
435 list_add(&req->free_list, &ring->pending_free); in free_req()
444 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, in xen_vbd_translate() argument
453 if (likely(req->nr_sects)) { in xen_vbd_translate()
454 blkif_sector_t end = req in xen_vbd_translate()
703 xen_blkbk_unmap_and_respond(struct pending_req *req) xen_blkbk_unmap_and_respond() argument
913 xen_blkbk_parse_indirect(struct blkif_request *req, struct pending_req *pending_req, struct seg_buf seg[], struct phys_req *preq) xen_blkbk_parse_indirect() argument
966 dispatch_discard_io(struct xen_blkif_ring *ring, struct blkif_request *req) dispatch_discard_io() argument
1009 dispatch_other_io(struct xen_blkif_ring *ring, struct blkif_request *req, struct pending_req *pending_req) dispatch_other_io() argument
1085 struct blkif_request req; __do_block_io_op() local
1185 dispatch_rw_block_io(struct xen_blkif_ring *ring, struct blkif_request *req, struct pending_req *pending_req) dispatch_rw_block_io() argument
[all...]
/kernel/linux/linux-5.10/drivers/dma/ti/
H A Dk3-udma-glue.c205 struct ti_sci_msg_rm_udmap_tx_ch_cfg req; in k3_udma_glue_cfg_tx_chn() local
207 memset(&req, 0, sizeof(req)); in k3_udma_glue_cfg_tx_chn()
209 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | in k3_udma_glue_cfg_tx_chn()
217 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_cfg_tx_chn()
218 req.index = tx_chn->udma_tchan_id; in k3_udma_glue_cfg_tx_chn()
220 req.tx_pause_on_err = 1; in k3_udma_glue_cfg_tx_chn()
222 req.tx_filt_einfo = 1; in k3_udma_glue_cfg_tx_chn()
224 req.tx_filt_pswords = 1; in k3_udma_glue_cfg_tx_chn()
225 req in k3_udma_glue_cfg_tx_chn()
493 struct ti_sci_msg_rm_udmap_rx_ch_cfg req; k3_udma_glue_cfg_rx_chn() local
556 struct ti_sci_msg_rm_udmap_flow_cfg req; k3_udma_glue_cfg_rx_flow() local
979 struct ti_sci_msg_rm_udmap_flow_cfg req; k3_udma_glue_rx_flow_enable() local
1022 struct ti_sci_msg_rm_udmap_flow_cfg req; k3_udma_glue_rx_flow_disable() local
[all...]
/kernel/linux/linux-5.10/drivers/usb/gadget/function/
H A Df_hid.c34 struct usb_request *req; member
72 struct usb_request *req; member
293 struct usb_request *req; in f_hidg_intout_read() local
326 req = list->req; in f_hidg_intout_read()
327 count = min_t(unsigned int, count, req->actual - list->pos); in f_hidg_intout_read()
331 count -= copy_to_user(buffer, req->buf + list->pos, count); in f_hidg_intout_read()
340 if (list->pos == req->actual) { in f_hidg_intout_read()
343 req->length = hidg->report_length; in f_hidg_intout_read()
344 ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNE in f_hidg_intout_read()
414 f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req) f_hidg_req_complete() argument
434 struct usb_request *req; f_hidg_write() local
574 hidg_intout_complete(struct usb_ep *ep, struct usb_request *req) hidg_intout_complete() argument
609 hidg_ssreport_complete(struct usb_ep *ep, struct usb_request *req) hidg_ssreport_complete() argument
645 struct usb_request *req = cdev->req; hidg_setup() local
857 struct usb_request *req = hidg_set_alt() local
[all...]
/kernel/linux/linux-5.10/fs/hmdfs/
H A Dhmdfs_client.c324 void hmdfs_writepage_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req, in hmdfs_writepage_cb() argument
327 struct hmdfs_writepage_context *ctx = req->private; in hmdfs_writepage_cb()
353 kfree(req->data); in hmdfs_writepage_cb()
364 struct hmdfs_req req; in hmdfs_client_writepage() local
381 req.data = write_data; in hmdfs_client_writepage()
382 req.data_len = send_len; in hmdfs_client_writepage()
384 req.private = param; in hmdfs_client_writepage()
385 req.private_len = sizeof(*param); in hmdfs_client_writepage()
387 req.timeout = TIMEOUT_CONFIG; in hmdfs_client_writepage()
388 hmdfs_init_cmd(&req in hmdfs_client_writepage()
425 struct readdir_request *req = kzalloc(send_len, GFP_KERNEL); hmdfs_client_start_readdir() local
730 struct getattr_request *req = kzalloc(send_len, GFP_KERNEL); hmdfs_send_getattr() local
786 struct statfs_request *req = kzalloc(send_len, GFP_KERNEL); hmdfs_send_statfs() local
820 struct hmdfs_req req; hmdfs_send_syncfs() local
870 struct getxattr_request *req = kzalloc(send_len, GFP_KERNEL); hmdfs_send_getxattr() local
910 struct setxattr_request *req = kzalloc(send_len, GFP_KERNEL); hmdfs_send_setxattr() local
960 struct listxattr_request *req = kzalloc(send_len, GFP_KERNEL); hmdfs_send_listxattr() local
989 hmdfs_recv_syncfs_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req, const struct hmdfs_resp *resp) hmdfs_recv_syncfs_cb() argument
[all...]
/kernel/linux/linux-6.6/fs/hmdfs/
H A Dhmdfs_client.c323 void hmdfs_writepage_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req, in hmdfs_writepage_cb() argument
326 struct hmdfs_writepage_context *ctx = req->private; in hmdfs_writepage_cb()
352 kfree(req->data); in hmdfs_writepage_cb()
363 struct hmdfs_req req; in hmdfs_client_writepage() local
380 req.data = write_data; in hmdfs_client_writepage()
381 req.data_len = send_len; in hmdfs_client_writepage()
383 req.private = param; in hmdfs_client_writepage()
384 req.private_len = sizeof(*param); in hmdfs_client_writepage()
386 req.timeout = TIMEOUT_CONFIG; in hmdfs_client_writepage()
387 hmdfs_init_cmd(&req in hmdfs_client_writepage()
424 struct readdir_request *req = kzalloc(send_len, GFP_KERNEL); hmdfs_client_start_readdir() local
729 struct getattr_request *req = kzalloc(send_len, GFP_KERNEL); hmdfs_send_getattr() local
785 struct statfs_request *req = kzalloc(send_len, GFP_KERNEL); hmdfs_send_statfs() local
819 struct hmdfs_req req; hmdfs_send_syncfs() local
869 struct getxattr_request *req = kzalloc(send_len, GFP_KERNEL); hmdfs_send_getxattr() local
909 struct setxattr_request *req = kzalloc(send_len, GFP_KERNEL); hmdfs_send_setxattr() local
959 struct listxattr_request *req = kzalloc(send_len, GFP_KERNEL); hmdfs_send_listxattr() local
988 hmdfs_recv_syncfs_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req, const struct hmdfs_resp *resp) hmdfs_recv_syncfs_cb() argument
[all...]
/kernel/linux/linux-6.6/drivers/usb/gadget/function/
H A Df_hid.c38 struct usb_request *req; member
76 struct usb_request *req; member
297 struct usb_request *req; in f_hidg_intout_read() local
330 req = list->req; in f_hidg_intout_read()
331 count = min_t(unsigned int, count, req->actual - list->pos); in f_hidg_intout_read()
335 count -= copy_to_user(buffer, req->buf + list->pos, count); in f_hidg_intout_read()
344 if (list->pos == req->actual) { in f_hidg_intout_read()
347 req->length = hidg->report_length; in f_hidg_intout_read()
348 ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNE in f_hidg_intout_read()
418 f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req) f_hidg_req_complete() argument
438 struct usb_request *req; f_hidg_write() local
578 hidg_intout_complete(struct usb_ep *ep, struct usb_request *req) hidg_intout_complete() argument
613 hidg_ssreport_complete(struct usb_ep *ep, struct usb_request *req) hidg_ssreport_complete() argument
649 struct usb_request *req = cdev->req; hidg_setup() local
861 struct usb_request *req = hidg_set_alt() local
[all...]
/kernel/linux/linux-6.6/drivers/net/wireless/intel/iwlwifi/mvm/
H A Dftm-initiator.c146 mvm->ftm_initiator.req = NULL; in iwl_mvm_ftm_reset()
169 if (!mvm->ftm_initiator.req) in iwl_mvm_ftm_restart()
172 for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) { in iwl_mvm_ftm_restart()
173 memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr, in iwl_mvm_ftm_restart()
178 mvm->ftm_initiator.req, in iwl_mvm_ftm_restart()
183 mvm->ftm_initiator.req, GFP_KERNEL); in iwl_mvm_ftm_restart()
227 struct cfg80211_pmsr_request *req) in iwl_mvm_ftm_cmd_v5()
231 cmd->request_id = req->cookie; in iwl_mvm_ftm_cmd_v5()
232 cmd->num_of_ap = req->n_peers; in iwl_mvm_ftm_cmd_v5()
235 if (!req in iwl_mvm_ftm_cmd_v5()
225 iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_tof_range_req_cmd_v5 *cmd, struct cfg80211_pmsr_request *req) iwl_mvm_ftm_cmd_v5() argument
255 iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_tof_range_req_cmd_v9 *cmd, struct cfg80211_pmsr_request *req) iwl_mvm_ftm_cmd_common() argument
302 iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_tof_range_req_cmd_v8 *cmd, struct cfg80211_pmsr_request *req) iwl_mvm_ftm_cmd_v8() argument
580 iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) iwl_mvm_ftm_start_v5() argument
606 iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) iwl_mvm_ftm_start_v7() argument
636 iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) iwl_mvm_ftm_start_v8() argument
662 iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) iwl_mvm_ftm_start_v9() argument
763 iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) iwl_mvm_ftm_start_v11() argument
834 iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) iwl_mvm_ftm_start_v12() argument
862 iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) iwl_mvm_ftm_start_v13() argument
905 iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) iwl_mvm_ftm_start() argument
955 iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req) iwl_mvm_ftm_abort() argument
973 iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req, const u8 *addr) iwl_mvm_ftm_find_peer() argument
[all...]
/third_party/libuv/src/unix/
H A Dudp.c64 uv_udp_send_t* req; in uv__udp_finish_close() local
74 req = uv__queue_data(q, uv_udp_send_t, queue); in uv__udp_finish_close()
75 req->status = UV_ECANCELED; in uv__udp_finish_close()
76 uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); in uv__udp_finish_close()
92 uv_udp_send_t* req; in uv__udp_run_completed() local
102 req = uv__queue_data(q, uv_udp_send_t, queue); in uv__udp_run_completed()
103 uv__req_unregister(handle->loop, req); in uv__udp_run_completed()
105 handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs); in uv__udp_run_completed()
108 if (req in uv__udp_run_completed()
280 uv_udp_send_t* req; uv__udp_sendmsg() local
686 uv__udp_send(uv_udp_send_t* req, uv_udp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, const struct sockaddr* addr, unsigned int addrlen, uv_udp_send_cb send_cb) uv__udp_send() argument
[all...]
/third_party/node/deps/uv/src/unix/
H A Dudp.c100 uv_udp_send_t* req; in uv__udp_finish_close() local
110 req = QUEUE_DATA(q, uv_udp_send_t, queue); in uv__udp_finish_close()
111 req->status = UV_ECANCELED; in uv__udp_finish_close()
112 QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); in uv__udp_finish_close()
128 uv_udp_send_t* req; in uv__udp_run_completed() local
138 req = QUEUE_DATA(q, uv_udp_send_t, queue); in uv__udp_run_completed()
139 uv__req_unregister(handle->loop, req); in uv__udp_run_completed()
141 handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs); in uv__udp_run_completed()
144 if (req in uv__udp_run_completed()
316 uv_udp_send_t* req; uv__udp_sendmmsg() local
409 uv_udp_send_t* req; uv__udp_sendmsg() local
732 uv__udp_send(uv_udp_send_t* req, uv_udp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, const struct sockaddr* addr, unsigned int addrlen, uv_udp_send_cb send_cb) uv__udp_send() argument
[all...]
/kernel/linux/linux-5.10/drivers/scsi/qla2xxx/
H A Dqla_nvme.c17 struct nvme_fc_port_info req; in qla_nvme_register_remote() local
43 memset(&req, 0, sizeof(struct nvme_fc_port_info)); in qla_nvme_register_remote()
44 req.port_name = wwn_to_u64(fcport->port_name); in qla_nvme_register_remote()
45 req.node_name = wwn_to_u64(fcport->node_name); in qla_nvme_register_remote()
46 req.port_role = 0; in qla_nvme_register_remote()
47 req.dev_loss_tmo = 0; in qla_nvme_register_remote()
50 req.port_role = FC_PORT_ROLE_NVME_INITIATOR; in qla_nvme_register_remote()
53 req.port_role |= FC_PORT_ROLE_NVME_TARGET; in qla_nvme_register_remote()
56 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; in qla_nvme_register_remote()
58 req in qla_nvme_register_remote()
379 struct req_que *req = NULL; qla2x00_start_nvme_mq() local
[all...]
/kernel/linux/linux-5.10/fs/ocfs2/
H A Dioctl.c43 struct ocfs2_info_request __user *req) in o2info_set_request_error()
46 (void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags)); in o2info_set_request_error()
49 static inline void o2info_set_request_filled(struct ocfs2_info_request *req) in o2info_set_request_filled() argument
51 req->ir_flags |= OCFS2_INFO_FL_FILLED; in o2info_set_request_filled()
54 static inline void o2info_clear_request_filled(struct ocfs2_info_request *req) in o2info_clear_request_filled() argument
56 req->ir_flags &= ~OCFS2_INFO_FL_FILLED; in o2info_clear_request_filled()
59 static inline int o2info_coherent(struct ocfs2_info_request *req) in o2info_coherent() argument
61 return (!(req->ir_flags & OCFS2_INFO_FL_NON_COHERENT)); in o2info_coherent()
140 struct ocfs2_info_request __user *req) in ocfs2_info_handle_blocksize()
144 if (o2info_from_user(oib, req)) in ocfs2_info_handle_blocksize()
42 o2info_set_request_error(struct ocfs2_info_request *kreq, struct ocfs2_info_request __user *req) o2info_set_request_error() argument
139 ocfs2_info_handle_blocksize(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_blocksize() argument
157 ocfs2_info_handle_clustersize(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_clustersize() argument
176 ocfs2_info_handle_maxslots(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_maxslots() argument
195 ocfs2_info_handle_label(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_label() argument
214 ocfs2_info_handle_uuid(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_uuid() argument
233 ocfs2_info_handle_fs_features(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_fs_features() argument
254 ocfs2_info_handle_journal_size(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_journal_size() argument
321 ocfs2_info_handle_freeinode(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_freeinode() argument
609 ocfs2_info_handle_freefrag(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_freefrag() argument
682 ocfs2_info_handle_unknown(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_unknown() argument
705 ocfs2_info_handle_request(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_request() argument
[all...]
/kernel/linux/linux-6.6/fs/ocfs2/
H A Dioctl.c44 struct ocfs2_info_request __user *req) in o2info_set_request_error()
47 (void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags)); in o2info_set_request_error()
50 static inline void o2info_set_request_filled(struct ocfs2_info_request *req) in o2info_set_request_filled() argument
52 req->ir_flags |= OCFS2_INFO_FL_FILLED; in o2info_set_request_filled()
55 static inline void o2info_clear_request_filled(struct ocfs2_info_request *req) in o2info_clear_request_filled() argument
57 req->ir_flags &= ~OCFS2_INFO_FL_FILLED; in o2info_clear_request_filled()
60 static inline int o2info_coherent(struct ocfs2_info_request *req) in o2info_coherent() argument
62 return (!(req->ir_flags & OCFS2_INFO_FL_NON_COHERENT)); in o2info_coherent()
144 struct ocfs2_info_request __user *req) in ocfs2_info_handle_blocksize()
148 if (o2info_from_user(oib, req)) in ocfs2_info_handle_blocksize()
43 o2info_set_request_error(struct ocfs2_info_request *kreq, struct ocfs2_info_request __user *req) o2info_set_request_error() argument
143 ocfs2_info_handle_blocksize(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_blocksize() argument
161 ocfs2_info_handle_clustersize(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_clustersize() argument
180 ocfs2_info_handle_maxslots(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_maxslots() argument
199 ocfs2_info_handle_label(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_label() argument
218 ocfs2_info_handle_uuid(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_uuid() argument
237 ocfs2_info_handle_fs_features(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_fs_features() argument
258 ocfs2_info_handle_journal_size(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_journal_size() argument
325 ocfs2_info_handle_freeinode(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_freeinode() argument
613 ocfs2_info_handle_freefrag(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_freefrag() argument
686 ocfs2_info_handle_unknown(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_unknown() argument
709 ocfs2_info_handle_request(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_request() argument
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/marvell/octeontx2/af/
H A Drvu_nix.c21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
28 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
481 struct nix_bp_cfg_req *req, in rvu_mbox_handler_nix_bp_disable()
484 u16 pcifunc = req->hdr.pcifunc; in rvu_mbox_handler_nix_bp_disable()
498 chan_base = pfvf->rx_chan_base + req->chan_base; in rvu_mbox_handler_nix_bp_disable()
499 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { in rvu_mbox_handler_nix_bp_disable()
507 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, in rvu_nix_get_bpid() argument
517 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); in rvu_nix_get_bpid()
528 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); in rvu_nix_get_bpid()
542 if ((req in rvu_nix_get_bpid()
480 rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, struct nix_bp_cfg_req *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_bp_disable() argument
581 rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, struct nix_bp_cfg_req *req, struct nix_bp_cfg_rsp *rsp) rvu_mbox_handler_nix_bp_enable() argument
850 nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, u16 *smq, u16 *smq_mask) nix_get_aq_req_smq() argument
865 rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) rvu_nix_blk_aq_enq_inst() argument
1102 rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, struct nix_aq_enq_req *req, u8 ctype) rvu_nix_verify_aq_ctx() argument
1159 rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) rvu_nix_aq_enq_inst() argument
1209 nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) nix_lf_hwctx_disable() argument
1263 nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) nix_lf_hwctx_lockdown() argument
1289 rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) rvu_mbox_handler_nix_aq_enq() argument
1302 rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) rvu_mbox_handler_nix_aq_enq() argument
1310 rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, struct nix_cn10k_aq_enq_req *req, struct nix_cn10k_aq_enq_rsp *rsp) rvu_mbox_handler_nix_cn10k_aq_enq() argument
1318 rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_hwctx_disable() argument
1325 rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, struct nix_lf_alloc_req *req, struct nix_lf_alloc_rsp *rsp) rvu_mbox_handler_nix_lf_alloc() argument
1555 rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_lf_free() argument
1598 rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, struct nix_mark_format_cfg *req, struct nix_mark_format_cfg_rsp *rsp) rvu_mbox_handler_nix_mark_format_cfg() argument
1910 nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, struct nix_hw *nix_hw, struct nix_txsch_alloc_req *req) nix_check_txschq_alloc_req() argument
2043 rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, struct nix_txsch_alloc_req *req, struct nix_txsch_alloc_rsp *rsp) rvu_mbox_handler_nix_txsch_alloc() argument
2396 nix_txschq_free_one(struct rvu *rvu, struct nix_txsch_free_req *req) nix_txschq_free_one() argument
2464 rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, struct nix_txsch_free_req *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_txsch_free() argument
2586 nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr, struct nix_txschq_config *req, struct nix_txschq_config *rsp) nix_txschq_cfg_read() argument
2638 rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, struct nix_txschq_config *rsp) rvu_mbox_handler_nix_txschq_cfg() argument
2743 nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, struct nix_vtag_config *req) nix_rx_vtag_cfg() argument
2848 nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, struct nix_vtag_config *req) nix_tx_vtag_decfg() argument
2883 nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, struct nix_vtag_config *req, struct nix_vtag_config_rsp *rsp) nix_tx_vtag_cfg() argument
2926 rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, struct nix_vtag_config *req, struct nix_vtag_config_rsp *rsp) rvu_mbox_handler_nix_vtag_cfg() argument
3439 rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, struct nix_hw_info *rsp) rvu_mbox_handler_nix_get_hw_info() argument
3482 rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_stats_rst() argument
3874 rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, struct nix_rss_flowkey_cfg *req, struct nix_rss_flowkey_cfg_rsp *rsp) rvu_mbox_handler_nix_rss_flowkey_cfg() argument
3974 rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, struct nix_set_mac_addr *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_set_mac_addr() argument
4010 rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, struct msg_req *req, struct nix_get_mac_addr_rsp *rsp) rvu_mbox_handler_nix_get_mac_addr() argument
4027 rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_set_rx_mode() argument
4101 nix_find_link_frs(struct rvu *rvu, struct nix_frs_cfg *req, u16 pcifunc) nix_find_link_frs() argument
4146 rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_set_hw_frs() argument
4212 rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_set_rx_cfg() argument
4701 rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_lf_start_rx() argument
4724 rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_lf_stop_rx() argument
4866 rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_lf_ptp_tx_enable() argument
4872 rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_lf_ptp_tx_disable() argument
4878 rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, struct nix_lso_format_cfg *req, struct nix_lso_format_cfg_rsp *rsp) rvu_mbox_handler_nix_lso_format_cfg() argument
4942 nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, int blkaddr) nix_inline_ipsec_cfg() argument
4998 rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_inline_ipsec_cfg() argument
5012 rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu, struct msg_req *req, struct nix_inline_ipsec_cfg *rsp) rvu_mbox_handler_nix_read_inline_ipsec_cfg() argument
5036 rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, struct nix_inline_ipsec_lf_cfg *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_inline_ipsec_lf_cfg() argument
5264 nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, struct nix_hw *nix_hw, u16 pcifunc) nix_verify_bandprof() argument
5310 rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, struct nix_bandprof_alloc_req *req, struct nix_bandprof_alloc_rsp *rsp) rvu_mbox_handler_nix_bandprof_alloc() argument
5389 rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, struct nix_bandprof_free_req *req, struct msg_rsp *rsp) rvu_mbox_handler_nix_bandprof_free() argument
5678 rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, struct nix_bandprof_get_hwinfo_rsp *rsp) rvu_mbox_handler_nix_bandprof_get_hwinfo() argument
[all...]
/kernel/linux/linux-6.6/arch/arm64/crypto/
H A Daes-neonbs-glue.c94 static int __ecb_crypt(struct skcipher_request *req, in __ecb_crypt() argument
98 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); in __ecb_crypt()
103 err = skcipher_walk_virt(&walk, req, false); in __ecb_crypt()
123 static int ecb_encrypt(struct skcipher_request *req) in ecb_encrypt() argument
125 return __ecb_crypt(req, aesbs_ecb_encrypt); in ecb_encrypt()
128 static int ecb_decrypt(struct skcipher_request *req) in ecb_decrypt() argument
130 return __ecb_crypt(req, aesbs_ecb_decrypt); in ecb_decrypt()
156 static int cbc_encrypt(struct skcipher_request *req) in cbc_encrypt() argument
158 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); in cbc_encrypt()
163 err = skcipher_walk_virt(&walk, req, fals in cbc_encrypt()
179 cbc_decrypt(struct skcipher_request *req) cbc_decrypt() argument
207 ctr_encrypt(struct skcipher_request *req) ctr_encrypt() argument
276 __xts_crypt(struct skcipher_request *req, bool encrypt, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 iv[])) __xts_crypt() argument
382 xts_encrypt(struct skcipher_request *req) xts_encrypt() argument
387 xts_decrypt(struct skcipher_request *req) xts_decrypt() argument
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/sfc/
H A Defx_devlink.c172 struct devlink_info_req *req, in efx_devlink_info_nvram_partition()
195 devlink_info_version_stored_put(req, version_name, buf); in efx_devlink_info_nvram_partition()
201 struct devlink_info_req *req) in efx_devlink_info_stored_versions()
210 err = efx_devlink_info_nvram_partition(efx, req, in efx_devlink_info_stored_versions()
214 err |= efx_devlink_info_nvram_partition(efx, req, in efx_devlink_info_stored_versions()
218 err |= efx_devlink_info_nvram_partition(efx, req, in efx_devlink_info_stored_versions()
222 err |= efx_devlink_info_nvram_partition(efx, req, in efx_devlink_info_stored_versions()
226 err |= efx_devlink_info_nvram_partition(efx, req, in efx_devlink_info_stored_versions()
236 struct devlink_info_req *req, in efx_devlink_info_running_v2()
253 devlink_info_version_fixed_put(req, in efx_devlink_info_running_v2()
171 efx_devlink_info_nvram_partition(struct efx_nic *efx, struct devlink_info_req *req, unsigned int partition_type, const char *version_name) efx_devlink_info_nvram_partition() argument
200 efx_devlink_info_stored_versions(struct efx_nic *efx, struct devlink_info_req *req) efx_devlink_info_stored_versions() argument
235 efx_devlink_info_running_v2(struct efx_nic *efx, struct devlink_info_req *req, unsigned int flags, efx_dword_t *outbuf) efx_devlink_info_running_v2() argument
352 efx_devlink_info_running_v3(struct efx_nic *efx, struct devlink_info_req *req, unsigned int flags, efx_dword_t *outbuf) efx_devlink_info_running_v3() argument
390 efx_devlink_info_running_v4(struct efx_nic *efx, struct devlink_info_req *req, unsigned int flags, efx_dword_t *outbuf) efx_devlink_info_running_v4() argument
473 efx_devlink_info_running_v5(struct efx_nic *efx, struct devlink_info_req *req, unsigned int flags, efx_dword_t *outbuf) efx_devlink_info_running_v5() argument
513 efx_devlink_info_running_versions(struct efx_nic *efx, struct devlink_info_req *req) efx_devlink_info_running_versions() argument
579 efx_devlink_info_board_cfg(struct efx_nic *efx, struct devlink_info_req *req) efx_devlink_info_board_cfg() argument
594 efx_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack) efx_devlink_info_get() argument
[all...]
/third_party/ffmpeg/libavcodec/arm/
H A Dmlpdsp_armv6.S91 CHECK .req a1
92 COUNT .req a2
93 IN .req a3
94 OUT .req a4
95 DAT0 .req v1
96 DAT1 .req v2
97 DAT2 .req v3
98 DAT3 .req v4
99 SHIFT0 .req v5
100 SHIFT1 .req v
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/broadcom/bnxt/
H A Dbnxt_tc.c506 struct hwrm_cfa_flow_free_input *req; in bnxt_hwrm_cfa_flow_free() local
509 rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_FREE); in bnxt_hwrm_cfa_flow_free()
512 req->ext_flow_handle = flow_node->ext_flow_handle; in bnxt_hwrm_cfa_flow_free()
514 req->flow_handle = flow_node->flow_handle; in bnxt_hwrm_cfa_flow_free()
516 rc = hwrm_req_send(bp, req); in bnxt_hwrm_cfa_flow_free()
594 struct hwrm_cfa_flow_alloc_input *req; in bnxt_hwrm_cfa_flow_alloc() local
598 rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_ALLOC); in bnxt_hwrm_cfa_flow_alloc()
602 req->src_fid = cpu_to_le16(flow->src_fid); in bnxt_hwrm_cfa_flow_alloc()
603 req->ref_flow_handle = ref_flow_handle; in bnxt_hwrm_cfa_flow_alloc()
606 memcpy(req in bnxt_hwrm_cfa_flow_alloc()
797 struct hwrm_cfa_decap_filter_alloc_input *req; hwrm_cfa_decap_filter_alloc() local
865 struct hwrm_cfa_decap_filter_free_input *req; hwrm_cfa_decap_filter_free() local
885 struct hwrm_cfa_encap_record_alloc_input *req; hwrm_cfa_encap_record_alloc() local
931 struct hwrm_cfa_encap_record_free_input *req; hwrm_cfa_encap_record_free() local
1685 struct hwrm_cfa_flow_stats_input *req; bnxt_hwrm_cfa_flow_stats_get() local
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_common.c47 struct msg_req *req; in otx2_update_lmac_stats() local
53 req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox); in otx2_update_lmac_stats()
54 if (!req) { in otx2_update_lmac_stats()
65 struct msg_req *req; in otx2_update_lmac_fec_stats() local
70 req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox); in otx2_update_lmac_fec_stats()
71 if (req) in otx2_update_lmac_fec_stats()
149 struct nix_set_mac_addr *req; in otx2_hw_set_mac_addr() local
153 req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox); in otx2_hw_set_mac_addr()
154 if (!req) { in otx2_hw_set_mac_addr()
159 ether_addr_copy(req in otx2_hw_set_mac_addr()
171 struct msg_req *req; otx2_hw_get_mac_addr() local
226 struct nix_frs_cfg *req; otx2_hw_set_mtu() local
252 struct cgx_pause_frm_cfg *req; otx2_config_pause_frm() local
280 struct nix_rss_flowkey_cfg *req; otx2_set_flowkey_cfg() local
609 struct nix_txschq_config *req; otx2_txschq_config() local
714 struct nix_txschq_config *req; otx2_smq_flush() local
738 struct nix_txsch_alloc_req *req; otx2_txsch_alloc() local
1660 struct hwctx_disable_req *req; otx2_ctx_disable() local
1685 struct nix_bp_cfg_req *req; otx2_nix_config_bp() local
1824 struct msg_req *req; otx2_get_max_mtu() local
[all...]
/kernel/linux/linux-5.10/arch/arm64/crypto/
H A Daes-ce-ccm-glue.c61 static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen) in ccm_init_mac() argument
63 struct crypto_aead *aead = crypto_aead_reqtfm(req); in ccm_init_mac()
65 u32 l = req->iv[0] + 1; in ccm_init_mac()
82 memcpy(maciv, req->iv, AES_BLOCK_SIZE - l); in ccm_init_mac()
92 if (req->assoclen) in ccm_init_mac()
95 memset(&req->iv[AES_BLOCK_SIZE - l], 0, l); in ccm_init_mac()
134 static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) in ccm_calculate_auth_mac() argument
136 struct crypto_aead *aead = crypto_aead_reqtfm(req); in ccm_calculate_auth_mac()
140 u32 len = req->assoclen; in ccm_calculate_auth_mac()
154 scatterwalk_start(&walk, req in ccm_calculate_auth_mac()
222 ccm_encrypt(struct aead_request *req) ccm_encrypt() argument
279 ccm_decrypt(struct aead_request *req) ccm_decrypt() argument
[all...]
/kernel/linux/linux-5.10/drivers/crypto/ccp/
H A Dccp-crypto-aes.c24 struct skcipher_request *req = skcipher_request_cast(async_req); in ccp_aes_complete() local
25 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); in ccp_aes_complete()
26 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); in ccp_aes_complete()
32 memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE); in ccp_aes_complete()
65 static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt) in ccp_aes_crypt() argument
67 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); in ccp_aes_crypt()
69 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); in ccp_aes_crypt()
79 (req->cryptlen & (AES_BLOCK_SIZE - 1))) in ccp_aes_crypt()
83 if (!req->iv) in ccp_aes_crypt()
86 memcpy(rctx->iv, req in ccp_aes_crypt()
112 ccp_aes_encrypt(struct skcipher_request *req) ccp_aes_encrypt() argument
117 ccp_aes_decrypt(struct skcipher_request *req) ccp_aes_decrypt() argument
137 struct skcipher_request *req = skcipher_request_cast(async_req); ccp_aes_rfc3686_complete() local
160 ccp_aes_rfc3686_crypt(struct skcipher_request *req, bool encrypt) ccp_aes_rfc3686_crypt() argument
184 ccp_aes_rfc3686_encrypt(struct skcipher_request *req) ccp_aes_rfc3686_encrypt() argument
189 ccp_aes_rfc3686_decrypt(struct skcipher_request *req) ccp_aes_rfc3686_decrypt() argument
[all...]
/kernel/linux/linux-5.10/sound/xen/
H A Dxen_snd_front_evtchnl.c34 rp = channel->u.req.ring.sring->rsp_prod; in evtchnl_interrupt_req()
43 for (i = channel->u.req.ring.rsp_cons; i != rp; i++) { in evtchnl_interrupt_req()
44 resp = RING_GET_RESPONSE(&channel->u.req.ring, i); in evtchnl_interrupt_req()
53 channel->u.req.resp_status = resp->status; in evtchnl_interrupt_req()
54 complete(&channel->u.req.completion); in evtchnl_interrupt_req()
57 channel->u.req.resp_status = resp->status; in evtchnl_interrupt_req()
58 channel->u.req.resp.hw_param = in evtchnl_interrupt_req()
60 complete(&channel->u.req.completion); in evtchnl_interrupt_req()
71 channel->u.req.ring.rsp_cons = i; in evtchnl_interrupt_req()
72 if (i != channel->u.req in evtchnl_interrupt_req()
[all...]

Completed in 26 milliseconds

1...<<31323334353637383940>>...221