1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2017 Intel Deutschland GmbH
4 * Copyright (C) 2018-2020 Intel Corporation
5 */
6#include <net/tso.h>
7#include <linux/tcp.h>
8
9#include "iwl-debug.h"
10#include "iwl-csr.h"
11#include "iwl-io.h"
12#include "internal.h"
13#include "fw/api/tx.h"
14#include "queue/tx.h"
15
16/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
17
18/*
19 * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
20 * @priv: device private data point
21 * @cmd: a pointer to the ucode command structure
22 *
23 * The function returns < 0 values to indicate the operation
24 * failed. On success, it returns the index (>= 0) of command in the
25 * command queue.
26 */
27int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
28			       struct iwl_host_cmd *cmd)
29{
30	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
31	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
32	struct iwl_device_cmd *out_cmd;
33	struct iwl_cmd_meta *out_meta;
34	void *dup_buf = NULL;
35	dma_addr_t phys_addr;
36	int i, cmd_pos, idx;
37	u16 copy_size, cmd_size, tb0_size;
38	bool had_nocopy = false;
39	u8 group_id = iwl_cmd_groupid(cmd->id);
40	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
41	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
42	struct iwl_tfh_tfd *tfd;
43	unsigned long flags;
44
45	copy_size = sizeof(struct iwl_cmd_header_wide);
46	cmd_size = sizeof(struct iwl_cmd_header_wide);
47
48	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
49		cmddata[i] = cmd->data[i];
50		cmdlen[i] = cmd->len[i];
51
52		if (!cmd->len[i])
53			continue;
54
55		/* need at least IWL_FIRST_TB_SIZE copied */
56		if (copy_size < IWL_FIRST_TB_SIZE) {
57			int copy = IWL_FIRST_TB_SIZE - copy_size;
58
59			if (copy > cmdlen[i])
60				copy = cmdlen[i];
61			cmdlen[i] -= copy;
62			cmddata[i] += copy;
63			copy_size += copy;
64		}
65
66		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
67			had_nocopy = true;
68			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
69				idx = -EINVAL;
70				goto free_dup_buf;
71			}
72		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
73			/*
74			 * This is also a chunk that isn't copied
75			 * to the static buffer so set had_nocopy.
76			 */
77			had_nocopy = true;
78
79			/* only allowed once */
80			if (WARN_ON(dup_buf)) {
81				idx = -EINVAL;
82				goto free_dup_buf;
83			}
84
85			dup_buf = kmemdup(cmddata[i], cmdlen[i],
86					  GFP_ATOMIC);
87			if (!dup_buf)
88				return -ENOMEM;
89		} else {
90			/* NOCOPY must not be followed by normal! */
91			if (WARN_ON(had_nocopy)) {
92				idx = -EINVAL;
93				goto free_dup_buf;
94			}
95			copy_size += cmdlen[i];
96		}
97		cmd_size += cmd->len[i];
98	}
99
100	/*
101	 * If any of the command structures end up being larger than the
102	 * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
103	 * separate TFDs, then we will need to increase the size of the buffers
104	 */
105	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
106		 "Command %s (%#x) is too large (%d bytes)\n",
107		 iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
108		idx = -EINVAL;
109		goto free_dup_buf;
110	}
111
112	spin_lock_irqsave(&txq->lock, flags);
113
114	idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
115	tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
116	memset(tfd, 0, sizeof(*tfd));
117
118	if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
119		spin_unlock_irqrestore(&txq->lock, flags);
120
121		IWL_ERR(trans, "No space in command queue\n");
122		iwl_op_mode_cmd_queue_full(trans->op_mode);
123		idx = -ENOSPC;
124		goto free_dup_buf;
125	}
126
127	out_cmd = txq->entries[idx].cmd;
128	out_meta = &txq->entries[idx].meta;
129
130	/* re-initialize to NULL */
131	memset(out_meta, 0, sizeof(*out_meta));
132	if (cmd->flags & CMD_WANT_SKB)
133		out_meta->source = cmd;
134
135	/* set up the header */
136	out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
137	out_cmd->hdr_wide.group_id = group_id;
138	out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
139	out_cmd->hdr_wide.length =
140		cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
141	out_cmd->hdr_wide.reserved = 0;
142	out_cmd->hdr_wide.sequence =
143		cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
144					 INDEX_TO_SEQ(txq->write_ptr));
145
146	cmd_pos = sizeof(struct iwl_cmd_header_wide);
147	copy_size = sizeof(struct iwl_cmd_header_wide);
148
149	/* and copy the data that needs to be copied */
150	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
151		int copy;
152
153		if (!cmd->len[i])
154			continue;
155
156		/* copy everything if not nocopy/dup */
157		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
158					   IWL_HCMD_DFL_DUP))) {
159			copy = cmd->len[i];
160
161			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
162			cmd_pos += copy;
163			copy_size += copy;
164			continue;
165		}
166
167		/*
168		 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
169		 * in total (for bi-directional DMA), but copy up to what
170		 * we can fit into the payload for debug dump purposes.
171		 */
172		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
173
174		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
175		cmd_pos += copy;
176
177		/* However, treat copy_size the proper way, we need it below */
178		if (copy_size < IWL_FIRST_TB_SIZE) {
179			copy = IWL_FIRST_TB_SIZE - copy_size;
180
181			if (copy > cmd->len[i])
182				copy = cmd->len[i];
183			copy_size += copy;
184		}
185	}
186
187	IWL_DEBUG_HC(trans,
188		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
189		     iwl_get_cmd_string(trans, cmd->id), group_id,
190		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
191		     cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
192
193	/* start the TFD with the minimum copy bytes */
194	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
195	memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
196	iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx),
197			    tb0_size);
198
199	/* map first command fragment, if any remains */
200	if (copy_size > tb0_size) {
201		phys_addr = dma_map_single(trans->dev,
202					   (u8 *)out_cmd + tb0_size,
203					   copy_size - tb0_size,
204					   DMA_TO_DEVICE);
205		if (dma_mapping_error(trans->dev, phys_addr)) {
206			idx = -ENOMEM;
207			iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
208			goto out;
209		}
210		iwl_txq_gen2_set_tb(trans, tfd, phys_addr,
211				    copy_size - tb0_size);
212	}
213
214	/* map the remaining (adjusted) nocopy/dup fragments */
215	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
216		void *data = (void *)(uintptr_t)cmddata[i];
217
218		if (!cmdlen[i])
219			continue;
220		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
221					   IWL_HCMD_DFL_DUP)))
222			continue;
223		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
224			data = dup_buf;
225		phys_addr = dma_map_single(trans->dev, data,
226					   cmdlen[i], DMA_TO_DEVICE);
227		if (dma_mapping_error(trans->dev, phys_addr)) {
228			idx = -ENOMEM;
229			iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
230			goto out;
231		}
232		iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
233	}
234
235	BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
236	out_meta->flags = cmd->flags;
237	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
238		kfree_sensitive(txq->entries[idx].free_buf);
239	txq->entries[idx].free_buf = dup_buf;
240
241	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
242
243	/* start timer if queue currently empty */
244	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
245		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
246
247	spin_lock(&trans_pcie->reg_lock);
248	/* Increment and update queue's write index */
249	txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
250	iwl_txq_inc_wr_ptr(trans, txq);
251	spin_unlock(&trans_pcie->reg_lock);
252
253out:
254	spin_unlock_irqrestore(&txq->lock, flags);
255free_dup_buf:
256	if (idx < 0)
257		kfree(dup_buf);
258	return idx;
259}
260