xref: /kernel/linux/linux-5.10/drivers/misc/mei/hw-me.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
5 */
6
7#include <linux/pci.h>
8
9#include <linux/kthread.h>
10#include <linux/interrupt.h>
11#include <linux/pm_runtime.h>
12#include <linux/sizes.h>
13
14#include "mei_dev.h"
15#include "hbm.h"
16
17#include "hw-me.h"
18#include "hw-me-regs.h"
19
20#include "mei-trace.h"
21
22/**
23 * mei_me_reg_read - Reads 32bit data from the mei device
24 *
25 * @hw: the me hardware structure
26 * @offset: offset from which to read the data
27 *
28 * Return: register value (u32)
29 */
30static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
31			       unsigned long offset)
32{
33	return ioread32(hw->mem_addr + offset);
34}
35
36
37/**
38 * mei_me_reg_write - Writes 32bit data to the mei device
39 *
40 * @hw: the me hardware structure
41 * @offset: offset from which to write the data
42 * @value: register value to write (u32)
43 */
44static inline void mei_me_reg_write(const struct mei_me_hw *hw,
45				 unsigned long offset, u32 value)
46{
47	iowrite32(value, hw->mem_addr + offset);
48}
49
50/**
51 * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
52 *  read window register
53 *
54 * @dev: the device structure
55 *
56 * Return: ME_CB_RW register value (u32)
57 */
58static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
59{
60	return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
61}
62
63/**
64 * mei_me_hcbww_write - write 32bit data to the host circular buffer
65 *
66 * @dev: the device structure
67 * @data: 32bit data to be written to the host circular buffer
68 */
69static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
70{
71	mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
72}
73
74/**
75 * mei_me_mecsr_read - Reads 32bit data from the ME CSR
76 *
77 * @dev: the device structure
78 *
79 * Return: ME_CSR_HA register value (u32)
80 */
81static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
82{
83	u32 reg;
84
85	reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
86	trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
87
88	return reg;
89}
90
91/**
92 * mei_hcsr_read - Reads 32bit data from the host CSR
93 *
94 * @dev: the device structure
95 *
96 * Return: H_CSR register value (u32)
97 */
98static inline u32 mei_hcsr_read(const struct mei_device *dev)
99{
100	u32 reg;
101
102	reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
103	trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
104
105	return reg;
106}
107
108/**
109 * mei_hcsr_write - writes H_CSR register to the mei device
110 *
111 * @dev: the device structure
112 * @reg: new register value
113 */
114static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
115{
116	trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
117	mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
118}
119
120/**
121 * mei_hcsr_set - writes H_CSR register to the mei device,
122 * and ignores the H_IS bit for it is write-one-to-zero.
123 *
124 * @dev: the device structure
125 * @reg: new register value
126 */
127static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
128{
129	reg &= ~H_CSR_IS_MASK;
130	mei_hcsr_write(dev, reg);
131}
132
133/**
134 * mei_hcsr_set_hig - set host interrupt (set H_IG)
135 *
136 * @dev: the device structure
137 */
138static inline void mei_hcsr_set_hig(struct mei_device *dev)
139{
140	u32 hcsr;
141
142	hcsr = mei_hcsr_read(dev) | H_IG;
143	mei_hcsr_set(dev, hcsr);
144}
145
146/**
147 * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
148 *
149 * @dev: the device structure
150 *
151 * Return: H_D0I3C register value (u32)
152 */
153static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
154{
155	u32 reg;
156
157	reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
158	trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
159
160	return reg;
161}
162
163/**
164 * mei_me_d0i3c_write - writes H_D0I3C register to device
165 *
166 * @dev: the device structure
167 * @reg: new register value
168 */
169static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
170{
171	trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
172	mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
173}
174
175/**
176 * mei_me_trc_status - read trc status register
177 *
178 * @dev: mei device
179 * @trc: trc status register value
180 *
181 * Return: 0 on success, error otherwise
182 */
183static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
184{
185	struct mei_me_hw *hw = to_me_hw(dev);
186
187	if (!hw->cfg->hw_trc_supported)
188		return -EOPNOTSUPP;
189
190	*trc = mei_me_reg_read(hw, ME_TRC);
191	trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc);
192
193	return 0;
194}
195
196/**
197 * mei_me_fw_status - read fw status register from pci config space
198 *
199 * @dev: mei device
200 * @fw_status: fw status register values
201 *
202 * Return: 0 on success, error otherwise
203 */
204static int mei_me_fw_status(struct mei_device *dev,
205			    struct mei_fw_status *fw_status)
206{
207	struct mei_me_hw *hw = to_me_hw(dev);
208	const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
209	int ret;
210	int i;
211
212	if (!fw_status || !hw->read_fws)
213		return -EINVAL;
214
215	fw_status->count = fw_src->count;
216	for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
217		ret = hw->read_fws(dev, fw_src->status[i],
218				   &fw_status->status[i]);
219		trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X",
220				       fw_src->status[i],
221				       fw_status->status[i]);
222		if (ret)
223			return ret;
224	}
225
226	return 0;
227}
228
229/**
230 * mei_me_hw_config - configure hw dependent settings
231 *
232 * @dev: mei device
233 *
234 * Return:
235 *  * -EINVAL when read_fws is not set
236 *  * 0 on success
237 *
238 */
239static int mei_me_hw_config(struct mei_device *dev)
240{
241	struct mei_me_hw *hw = to_me_hw(dev);
242	u32 hcsr, reg;
243
244	if (WARN_ON(!hw->read_fws))
245		return -EINVAL;
246
247	/* Doesn't change in runtime */
248	hcsr = mei_hcsr_read(dev);
249	hw->hbuf_depth = (hcsr & H_CBD) >> 24;
250
251	reg = 0;
252	hw->read_fws(dev, PCI_CFG_HFS_1, &reg);
253	trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
254	hw->d0i3_supported =
255		((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
256
257	hw->pg_state = MEI_PG_OFF;
258	if (hw->d0i3_supported) {
259		reg = mei_me_d0i3c_read(dev);
260		if (reg & H_D0I3C_I3)
261			hw->pg_state = MEI_PG_ON;
262	}
263
264	return 0;
265}
266
267/**
268 * mei_me_pg_state  - translate internal pg state
269 *   to the mei power gating state
270 *
271 * @dev:  mei device
272 *
273 * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
274 */
275static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
276{
277	struct mei_me_hw *hw = to_me_hw(dev);
278
279	return hw->pg_state;
280}
281
282static inline u32 me_intr_src(u32 hcsr)
283{
284	return hcsr & H_CSR_IS_MASK;
285}
286
287/**
288 * me_intr_disable - disables mei device interrupts
289 *      using supplied hcsr register value.
290 *
291 * @dev: the device structure
292 * @hcsr: supplied hcsr register value
293 */
294static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
295{
296	hcsr &= ~H_CSR_IE_MASK;
297	mei_hcsr_set(dev, hcsr);
298}
299
300/**
301 * me_intr_clear - clear and stop interrupts
302 *
303 * @dev: the device structure
304 * @hcsr: supplied hcsr register value
305 */
306static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
307{
308	if (me_intr_src(hcsr))
309		mei_hcsr_write(dev, hcsr);
310}
311
312/**
313 * mei_me_intr_clear - clear and stop interrupts
314 *
315 * @dev: the device structure
316 */
317static void mei_me_intr_clear(struct mei_device *dev)
318{
319	u32 hcsr = mei_hcsr_read(dev);
320
321	me_intr_clear(dev, hcsr);
322}
323/**
324 * mei_me_intr_enable - enables mei device interrupts
325 *
326 * @dev: the device structure
327 */
328static void mei_me_intr_enable(struct mei_device *dev)
329{
330	u32 hcsr = mei_hcsr_read(dev);
331
332	hcsr |= H_CSR_IE_MASK;
333	mei_hcsr_set(dev, hcsr);
334}
335
336/**
337 * mei_me_intr_disable - disables mei device interrupts
338 *
339 * @dev: the device structure
340 */
341static void mei_me_intr_disable(struct mei_device *dev)
342{
343	u32 hcsr = mei_hcsr_read(dev);
344
345	me_intr_disable(dev, hcsr);
346}
347
348/**
349 * mei_me_synchronize_irq - wait for pending IRQ handlers
350 *
351 * @dev: the device structure
352 */
353static void mei_me_synchronize_irq(struct mei_device *dev)
354{
355	struct mei_me_hw *hw = to_me_hw(dev);
356
357	synchronize_irq(hw->irq);
358}
359
360/**
361 * mei_me_hw_reset_release - release device from the reset
362 *
363 * @dev: the device structure
364 */
365static void mei_me_hw_reset_release(struct mei_device *dev)
366{
367	u32 hcsr = mei_hcsr_read(dev);
368
369	hcsr |= H_IG;
370	hcsr &= ~H_RST;
371	mei_hcsr_set(dev, hcsr);
372}
373
374/**
375 * mei_me_host_set_ready - enable device
376 *
377 * @dev: mei device
378 */
379static void mei_me_host_set_ready(struct mei_device *dev)
380{
381	u32 hcsr = mei_hcsr_read(dev);
382
383	hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
384	mei_hcsr_set(dev, hcsr);
385}
386
387/**
388 * mei_me_host_is_ready - check whether the host has turned ready
389 *
390 * @dev: mei device
391 * Return: bool
392 */
393static bool mei_me_host_is_ready(struct mei_device *dev)
394{
395	u32 hcsr = mei_hcsr_read(dev);
396
397	return (hcsr & H_RDY) == H_RDY;
398}
399
400/**
401 * mei_me_hw_is_ready - check whether the me(hw) has turned ready
402 *
403 * @dev: mei device
404 * Return: bool
405 */
406static bool mei_me_hw_is_ready(struct mei_device *dev)
407{
408	u32 mecsr = mei_me_mecsr_read(dev);
409
410	return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
411}
412
413/**
414 * mei_me_hw_is_resetting - check whether the me(hw) is in reset
415 *
416 * @dev: mei device
417 * Return: bool
418 */
419static bool mei_me_hw_is_resetting(struct mei_device *dev)
420{
421	u32 mecsr = mei_me_mecsr_read(dev);
422
423	return (mecsr & ME_RST_HRA) == ME_RST_HRA;
424}
425
426/**
427 * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
428 *  or timeout is reached
429 *
430 * @dev: mei device
431 * Return: 0 on success, error otherwise
432 */
433static int mei_me_hw_ready_wait(struct mei_device *dev)
434{
435	mutex_unlock(&dev->device_lock);
436	wait_event_timeout(dev->wait_hw_ready,
437			dev->recvd_hw_ready,
438			mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
439	mutex_lock(&dev->device_lock);
440	if (!dev->recvd_hw_ready) {
441		dev_err(dev->dev, "wait hw ready failed\n");
442		return -ETIME;
443	}
444
445	mei_me_hw_reset_release(dev);
446	dev->recvd_hw_ready = false;
447	return 0;
448}
449
450/**
451 * mei_me_hw_start - hw start routine
452 *
453 * @dev: mei device
454 * Return: 0 on success, error otherwise
455 */
456static int mei_me_hw_start(struct mei_device *dev)
457{
458	int ret = mei_me_hw_ready_wait(dev);
459
460	if (ret)
461		return ret;
462	dev_dbg(dev->dev, "hw is ready\n");
463
464	mei_me_host_set_ready(dev);
465	return ret;
466}
467
468
469/**
470 * mei_hbuf_filled_slots - gets number of device filled buffer slots
471 *
472 * @dev: the device structure
473 *
474 * Return: number of filled slots
475 */
476static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
477{
478	u32 hcsr;
479	char read_ptr, write_ptr;
480
481	hcsr = mei_hcsr_read(dev);
482
483	read_ptr = (char) ((hcsr & H_CBRP) >> 8);
484	write_ptr = (char) ((hcsr & H_CBWP) >> 16);
485
486	return (unsigned char) (write_ptr - read_ptr);
487}
488
489/**
490 * mei_me_hbuf_is_empty - checks if host buffer is empty.
491 *
492 * @dev: the device structure
493 *
494 * Return: true if empty, false - otherwise.
495 */
496static bool mei_me_hbuf_is_empty(struct mei_device *dev)
497{
498	return mei_hbuf_filled_slots(dev) == 0;
499}
500
501/**
502 * mei_me_hbuf_empty_slots - counts write empty slots.
503 *
504 * @dev: the device structure
505 *
506 * Return: -EOVERFLOW if overflow, otherwise empty slots count
507 */
508static int mei_me_hbuf_empty_slots(struct mei_device *dev)
509{
510	struct mei_me_hw *hw = to_me_hw(dev);
511	unsigned char filled_slots, empty_slots;
512
513	filled_slots = mei_hbuf_filled_slots(dev);
514	empty_slots = hw->hbuf_depth - filled_slots;
515
516	/* check for overflow */
517	if (filled_slots > hw->hbuf_depth)
518		return -EOVERFLOW;
519
520	return empty_slots;
521}
522
523/**
524 * mei_me_hbuf_depth - returns depth of the hw buffer.
525 *
526 * @dev: the device structure
527 *
528 * Return: size of hw buffer in slots
529 */
530static u32 mei_me_hbuf_depth(const struct mei_device *dev)
531{
532	struct mei_me_hw *hw = to_me_hw(dev);
533
534	return hw->hbuf_depth;
535}
536
537/**
538 * mei_me_hbuf_write - writes a message to host hw buffer.
539 *
540 * @dev: the device structure
541 * @hdr: header of message
542 * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes)
543 * @data: payload
544 * @data_len: payload length in bytes
545 *
546 * Return: 0 if success, < 0 - otherwise.
547 */
548static int mei_me_hbuf_write(struct mei_device *dev,
549			     const void *hdr, size_t hdr_len,
550			     const void *data, size_t data_len)
551{
552	unsigned long rem;
553	unsigned long i;
554	const u32 *reg_buf;
555	u32 dw_cnt;
556	int empty_slots;
557
558	if (WARN_ON(!hdr || !data || hdr_len & 0x3))
559		return -EINVAL;
560
561	dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
562
563	empty_slots = mei_hbuf_empty_slots(dev);
564	dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
565
566	if (empty_slots < 0)
567		return -EOVERFLOW;
568
569	dw_cnt = mei_data2slots(hdr_len + data_len);
570	if (dw_cnt > (u32)empty_slots)
571		return -EMSGSIZE;
572
573	reg_buf = hdr;
574	for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
575		mei_me_hcbww_write(dev, reg_buf[i]);
576
577	reg_buf = data;
578	for (i = 0; i < data_len / MEI_SLOT_SIZE; i++)
579		mei_me_hcbww_write(dev, reg_buf[i]);
580
581	rem = data_len & 0x3;
582	if (rem > 0) {
583		u32 reg = 0;
584
585		memcpy(&reg, (const u8 *)data + data_len - rem, rem);
586		mei_me_hcbww_write(dev, reg);
587	}
588
589	mei_hcsr_set_hig(dev);
590	if (!mei_me_hw_is_ready(dev))
591		return -EIO;
592
593	return 0;
594}
595
596/**
597 * mei_me_count_full_read_slots - counts read full slots.
598 *
599 * @dev: the device structure
600 *
601 * Return: -EOVERFLOW if overflow, otherwise filled slots count
602 */
603static int mei_me_count_full_read_slots(struct mei_device *dev)
604{
605	u32 me_csr;
606	char read_ptr, write_ptr;
607	unsigned char buffer_depth, filled_slots;
608
609	me_csr = mei_me_mecsr_read(dev);
610	buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
611	read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
612	write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
613	filled_slots = (unsigned char) (write_ptr - read_ptr);
614
615	/* check for overflow */
616	if (filled_slots > buffer_depth)
617		return -EOVERFLOW;
618
619	dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
620	return (int)filled_slots;
621}
622
623/**
624 * mei_me_read_slots - reads a message from mei device.
625 *
626 * @dev: the device structure
627 * @buffer: message buffer will be written
628 * @buffer_length: message size will be read
629 *
630 * Return: always 0
631 */
632static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
633			     unsigned long buffer_length)
634{
635	u32 *reg_buf = (u32 *)buffer;
636
637	for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE)
638		*reg_buf++ = mei_me_mecbrw_read(dev);
639
640	if (buffer_length > 0) {
641		u32 reg = mei_me_mecbrw_read(dev);
642
643		memcpy(reg_buf, &reg, buffer_length);
644	}
645
646	mei_hcsr_set_hig(dev);
647	return 0;
648}
649
650/**
651 * mei_me_pg_set - write pg enter register
652 *
653 * @dev: the device structure
654 */
655static void mei_me_pg_set(struct mei_device *dev)
656{
657	struct mei_me_hw *hw = to_me_hw(dev);
658	u32 reg;
659
660	reg = mei_me_reg_read(hw, H_HPG_CSR);
661	trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
662
663	reg |= H_HPG_CSR_PGI;
664
665	trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
666	mei_me_reg_write(hw, H_HPG_CSR, reg);
667}
668
669/**
670 * mei_me_pg_unset - write pg exit register
671 *
672 * @dev: the device structure
673 */
674static void mei_me_pg_unset(struct mei_device *dev)
675{
676	struct mei_me_hw *hw = to_me_hw(dev);
677	u32 reg;
678
679	reg = mei_me_reg_read(hw, H_HPG_CSR);
680	trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
681
682	WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
683
684	reg |= H_HPG_CSR_PGIHEXR;
685
686	trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
687	mei_me_reg_write(hw, H_HPG_CSR, reg);
688}
689
690/**
691 * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
692 *
693 * @dev: the device structure
694 *
695 * Return: 0 on success an error code otherwise
696 */
697static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
698{
699	struct mei_me_hw *hw = to_me_hw(dev);
700	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
701	int ret;
702
703	dev->pg_event = MEI_PG_EVENT_WAIT;
704
705	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
706	if (ret)
707		return ret;
708
709	mutex_unlock(&dev->device_lock);
710	wait_event_timeout(dev->wait_pg,
711		dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
712	mutex_lock(&dev->device_lock);
713
714	if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
715		mei_me_pg_set(dev);
716		ret = 0;
717	} else {
718		ret = -ETIME;
719	}
720
721	dev->pg_event = MEI_PG_EVENT_IDLE;
722	hw->pg_state = MEI_PG_ON;
723
724	return ret;
725}
726
727/**
728 * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
729 *
730 * @dev: the device structure
731 *
732 * Return: 0 on success an error code otherwise
733 */
734static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
735{
736	struct mei_me_hw *hw = to_me_hw(dev);
737	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
738	int ret;
739
740	if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
741		goto reply;
742
743	dev->pg_event = MEI_PG_EVENT_WAIT;
744
745	mei_me_pg_unset(dev);
746
747	mutex_unlock(&dev->device_lock);
748	wait_event_timeout(dev->wait_pg,
749		dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
750	mutex_lock(&dev->device_lock);
751
752reply:
753	if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
754		ret = -ETIME;
755		goto out;
756	}
757
758	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
759	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
760	if (ret)
761		return ret;
762
763	mutex_unlock(&dev->device_lock);
764	wait_event_timeout(dev->wait_pg,
765		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
766	mutex_lock(&dev->device_lock);
767
768	if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
769		ret = 0;
770	else
771		ret = -ETIME;
772
773out:
774	dev->pg_event = MEI_PG_EVENT_IDLE;
775	hw->pg_state = MEI_PG_OFF;
776
777	return ret;
778}
779
780/**
781 * mei_me_pg_in_transition - is device now in pg transition
782 *
783 * @dev: the device structure
784 *
785 * Return: true if in pg transition, false otherwise
786 */
787static bool mei_me_pg_in_transition(struct mei_device *dev)
788{
789	return dev->pg_event >= MEI_PG_EVENT_WAIT &&
790	       dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
791}
792
793/**
794 * mei_me_pg_is_enabled - detect if PG is supported by HW
795 *
796 * @dev: the device structure
797 *
798 * Return: true is pg supported, false otherwise
799 */
800static bool mei_me_pg_is_enabled(struct mei_device *dev)
801{
802	struct mei_me_hw *hw = to_me_hw(dev);
803	u32 reg = mei_me_mecsr_read(dev);
804
805	if (hw->d0i3_supported)
806		return true;
807
808	if ((reg & ME_PGIC_HRA) == 0)
809		goto notsupported;
810
811	if (!dev->hbm_f_pg_supported)
812		goto notsupported;
813
814	return true;
815
816notsupported:
817	dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
818		hw->d0i3_supported,
819		!!(reg & ME_PGIC_HRA),
820		dev->version.major_version,
821		dev->version.minor_version,
822		HBM_MAJOR_VERSION_PGI,
823		HBM_MINOR_VERSION_PGI);
824
825	return false;
826}
827
828/**
829 * mei_me_d0i3_set - write d0i3 register bit on mei device.
830 *
831 * @dev: the device structure
832 * @intr: ask for interrupt
833 *
834 * Return: D0I3C register value
835 */
836static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
837{
838	u32 reg = mei_me_d0i3c_read(dev);
839
840	reg |= H_D0I3C_I3;
841	if (intr)
842		reg |= H_D0I3C_IR;
843	else
844		reg &= ~H_D0I3C_IR;
845	mei_me_d0i3c_write(dev, reg);
846	/* read it to ensure HW consistency */
847	reg = mei_me_d0i3c_read(dev);
848	return reg;
849}
850
851/**
852 * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
853 *
854 * @dev: the device structure
855 *
856 * Return: D0I3C register value
857 */
858static u32 mei_me_d0i3_unset(struct mei_device *dev)
859{
860	u32 reg = mei_me_d0i3c_read(dev);
861
862	reg &= ~H_D0I3C_I3;
863	reg |= H_D0I3C_IR;
864	mei_me_d0i3c_write(dev, reg);
865	/* read it to ensure HW consistency */
866	reg = mei_me_d0i3c_read(dev);
867	return reg;
868}
869
870/**
871 * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
872 *
873 * @dev: the device structure
874 *
875 * Return: 0 on success an error code otherwise
876 */
877static int mei_me_d0i3_enter_sync(struct mei_device *dev)
878{
879	struct mei_me_hw *hw = to_me_hw(dev);
880	unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
881	unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
882	int ret;
883	u32 reg;
884
885	reg = mei_me_d0i3c_read(dev);
886	if (reg & H_D0I3C_I3) {
887		/* we are in d0i3, nothing to do */
888		dev_dbg(dev->dev, "d0i3 set not needed\n");
889		ret = 0;
890		goto on;
891	}
892
893	/* PGI entry procedure */
894	dev->pg_event = MEI_PG_EVENT_WAIT;
895
896	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
897	if (ret)
898		/* FIXME: should we reset here? */
899		goto out;
900
901	mutex_unlock(&dev->device_lock);
902	wait_event_timeout(dev->wait_pg,
903		dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
904	mutex_lock(&dev->device_lock);
905
906	if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
907		ret = -ETIME;
908		goto out;
909	}
910	/* end PGI entry procedure */
911
912	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
913
914	reg = mei_me_d0i3_set(dev, true);
915	if (!(reg & H_D0I3C_CIP)) {
916		dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
917		ret = 0;
918		goto on;
919	}
920
921	mutex_unlock(&dev->device_lock);
922	wait_event_timeout(dev->wait_pg,
923		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
924	mutex_lock(&dev->device_lock);
925
926	if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
927		reg = mei_me_d0i3c_read(dev);
928		if (!(reg & H_D0I3C_I3)) {
929			ret = -ETIME;
930			goto out;
931		}
932	}
933
934	ret = 0;
935on:
936	hw->pg_state = MEI_PG_ON;
937out:
938	dev->pg_event = MEI_PG_EVENT_IDLE;
939	dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
940	return ret;
941}
942
943/**
944 * mei_me_d0i3_enter - perform d0i3 entry procedure
945 *   no hbm PG handshake
946 *   no waiting for confirmation; runs with interrupts
947 *   disabled
948 *
949 * @dev: the device structure
950 *
951 * Return: 0 on success an error code otherwise
952 */
953static int mei_me_d0i3_enter(struct mei_device *dev)
954{
955	struct mei_me_hw *hw = to_me_hw(dev);
956	u32 reg;
957
958	reg = mei_me_d0i3c_read(dev);
959	if (reg & H_D0I3C_I3) {
960		/* we are in d0i3, nothing to do */
961		dev_dbg(dev->dev, "already d0i3 : set not needed\n");
962		goto on;
963	}
964
965	mei_me_d0i3_set(dev, false);
966on:
967	hw->pg_state = MEI_PG_ON;
968	dev->pg_event = MEI_PG_EVENT_IDLE;
969	dev_dbg(dev->dev, "d0i3 enter\n");
970	return 0;
971}
972
973/**
974 * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
975 *
976 * @dev: the device structure
977 *
978 * Return: 0 on success an error code otherwise
979 */
980static int mei_me_d0i3_exit_sync(struct mei_device *dev)
981{
982	struct mei_me_hw *hw = to_me_hw(dev);
983	unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
984	int ret;
985	u32 reg;
986
987	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
988
989	reg = mei_me_d0i3c_read(dev);
990	if (!(reg & H_D0I3C_I3)) {
991		/* we are not in d0i3, nothing to do */
992		dev_dbg(dev->dev, "d0i3 exit not needed\n");
993		ret = 0;
994		goto off;
995	}
996
997	reg = mei_me_d0i3_unset(dev);
998	if (!(reg & H_D0I3C_CIP)) {
999		dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
1000		ret = 0;
1001		goto off;
1002	}
1003
1004	mutex_unlock(&dev->device_lock);
1005	wait_event_timeout(dev->wait_pg,
1006		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
1007	mutex_lock(&dev->device_lock);
1008
1009	if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
1010		reg = mei_me_d0i3c_read(dev);
1011		if (reg & H_D0I3C_I3) {
1012			ret = -ETIME;
1013			goto out;
1014		}
1015	}
1016
1017	ret = 0;
1018off:
1019	hw->pg_state = MEI_PG_OFF;
1020out:
1021	dev->pg_event = MEI_PG_EVENT_IDLE;
1022
1023	dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
1024	return ret;
1025}
1026
1027/**
1028 * mei_me_pg_legacy_intr - perform legacy pg processing
1029 *			   in interrupt thread handler
1030 *
1031 * @dev: the device structure
1032 */
1033static void mei_me_pg_legacy_intr(struct mei_device *dev)
1034{
1035	struct mei_me_hw *hw = to_me_hw(dev);
1036
1037	if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
1038		return;
1039
1040	dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1041	hw->pg_state = MEI_PG_OFF;
1042	if (waitqueue_active(&dev->wait_pg))
1043		wake_up(&dev->wait_pg);
1044}
1045
1046/**
1047 * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
1048 *
1049 * @dev: the device structure
1050 * @intr_source: interrupt source
1051 */
1052static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
1053{
1054	struct mei_me_hw *hw = to_me_hw(dev);
1055
1056	if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
1057	    (intr_source & H_D0I3C_IS)) {
1058		dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1059		if (hw->pg_state == MEI_PG_ON) {
1060			hw->pg_state = MEI_PG_OFF;
1061			if (dev->hbm_state != MEI_HBM_IDLE) {
1062				/*
1063				 * force H_RDY because it could be
1064				 * wiped off during PG
1065				 */
1066				dev_dbg(dev->dev, "d0i3 set host ready\n");
1067				mei_me_host_set_ready(dev);
1068			}
1069		} else {
1070			hw->pg_state = MEI_PG_ON;
1071		}
1072
1073		wake_up(&dev->wait_pg);
1074	}
1075
1076	if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
1077		/*
1078		 * HW sent some data and we are in D0i3, so
1079		 * we got here because of HW initiated exit from D0i3.
1080		 * Start runtime pm resume sequence to exit low power state.
1081		 */
1082		dev_dbg(dev->dev, "d0i3 want resume\n");
1083		mei_hbm_pg_resume(dev);
1084	}
1085}
1086
1087/**
1088 * mei_me_pg_intr - perform pg processing in interrupt thread handler
1089 *
1090 * @dev: the device structure
1091 * @intr_source: interrupt source
1092 */
1093static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
1094{
1095	struct mei_me_hw *hw = to_me_hw(dev);
1096
1097	if (hw->d0i3_supported)
1098		mei_me_d0i3_intr(dev, intr_source);
1099	else
1100		mei_me_pg_legacy_intr(dev);
1101}
1102
1103/**
1104 * mei_me_pg_enter_sync - perform runtime pm entry procedure
1105 *
1106 * @dev: the device structure
1107 *
1108 * Return: 0 on success an error code otherwise
1109 */
1110int mei_me_pg_enter_sync(struct mei_device *dev)
1111{
1112	struct mei_me_hw *hw = to_me_hw(dev);
1113
1114	if (hw->d0i3_supported)
1115		return mei_me_d0i3_enter_sync(dev);
1116	else
1117		return mei_me_pg_legacy_enter_sync(dev);
1118}
1119
1120/**
1121 * mei_me_pg_exit_sync - perform runtime pm exit procedure
1122 *
1123 * @dev: the device structure
1124 *
1125 * Return: 0 on success an error code otherwise
1126 */
1127int mei_me_pg_exit_sync(struct mei_device *dev)
1128{
1129	struct mei_me_hw *hw = to_me_hw(dev);
1130
1131	if (hw->d0i3_supported)
1132		return mei_me_d0i3_exit_sync(dev);
1133	else
1134		return mei_me_pg_legacy_exit_sync(dev);
1135}
1136
1137/**
1138 * mei_me_hw_reset - resets fw via mei csr register.
1139 *
1140 * @dev: the device structure
1141 * @intr_enable: if interrupt should be enabled after reset.
1142 *
1143 * Return: 0 on success an error code otherwise
1144 */
1145static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
1146{
1147	struct mei_me_hw *hw = to_me_hw(dev);
1148	int ret;
1149	u32 hcsr;
1150
1151	if (intr_enable) {
1152		mei_me_intr_enable(dev);
1153		if (hw->d0i3_supported) {
1154			ret = mei_me_d0i3_exit_sync(dev);
1155			if (ret)
1156				return ret;
1157		}
1158	}
1159
1160	pm_runtime_set_active(dev->dev);
1161
1162	hcsr = mei_hcsr_read(dev);
1163	/* H_RST may be found lit before reset is started,
1164	 * for example if preceding reset flow hasn't completed.
1165	 * In that case asserting H_RST will be ignored, therefore
1166	 * we need to clean H_RST bit to start a successful reset sequence.
1167	 */
1168	if ((hcsr & H_RST) == H_RST) {
1169		dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
1170		hcsr &= ~H_RST;
1171		mei_hcsr_set(dev, hcsr);
1172		hcsr = mei_hcsr_read(dev);
1173	}
1174
1175	hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
1176
1177	if (!intr_enable)
1178		hcsr &= ~H_CSR_IE_MASK;
1179
1180	dev->recvd_hw_ready = false;
1181	mei_hcsr_write(dev, hcsr);
1182
1183	/*
1184	 * Host reads the H_CSR once to ensure that the
1185	 * posted write to H_CSR completes.
1186	 */
1187	hcsr = mei_hcsr_read(dev);
1188
1189	if ((hcsr & H_RST) == 0)
1190		dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
1191
1192	if ((hcsr & H_RDY) == H_RDY)
1193		dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
1194
1195	if (!intr_enable) {
1196		mei_me_hw_reset_release(dev);
1197		if (hw->d0i3_supported) {
1198			ret = mei_me_d0i3_enter(dev);
1199			if (ret)
1200				return ret;
1201		}
1202	}
1203	return 0;
1204}
1205
1206/**
1207 * mei_me_irq_quick_handler - The ISR of the MEI device
1208 *
1209 * @irq: The irq number
1210 * @dev_id: pointer to the device structure
1211 *
1212 * Return: irqreturn_t
1213 */
1214irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
1215{
1216	struct mei_device *dev = (struct mei_device *)dev_id;
1217	u32 hcsr;
1218
1219	hcsr = mei_hcsr_read(dev);
1220	if (!me_intr_src(hcsr))
1221		return IRQ_NONE;
1222
1223	dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
1224
1225	/* disable interrupts on device */
1226	me_intr_disable(dev, hcsr);
1227	return IRQ_WAKE_THREAD;
1228}
1229
1230/**
1231 * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
1232 * processing.
1233 *
1234 * @irq: The irq number
1235 * @dev_id: pointer to the device structure
1236 *
1237 * Return: irqreturn_t
1238 *
1239 */
1240irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1241{
1242	struct mei_device *dev = (struct mei_device *) dev_id;
1243	struct list_head cmpl_list;
1244	s32 slots;
1245	u32 hcsr;
1246	int rets = 0;
1247
1248	dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
1249	/* initialize our complete list */
1250	mutex_lock(&dev->device_lock);
1251
1252	hcsr = mei_hcsr_read(dev);
1253	me_intr_clear(dev, hcsr);
1254
1255	INIT_LIST_HEAD(&cmpl_list);
1256
1257	/* check if ME wants a reset */
1258	if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
1259		dev_warn(dev->dev, "FW not ready: resetting.\n");
1260		schedule_work(&dev->reset_work);
1261		goto end;
1262	}
1263
1264	if (mei_me_hw_is_resetting(dev))
1265		mei_hcsr_set_hig(dev);
1266
1267	mei_me_pg_intr(dev, me_intr_src(hcsr));
1268
1269	/*  check if we need to start the dev */
1270	if (!mei_host_is_ready(dev)) {
1271		if (mei_hw_is_ready(dev)) {
1272			dev_dbg(dev->dev, "we need to start the dev.\n");
1273			dev->recvd_hw_ready = true;
1274			wake_up(&dev->wait_hw_ready);
1275		} else {
1276			dev_dbg(dev->dev, "Spurious Interrupt\n");
1277		}
1278		goto end;
1279	}
1280	/* check slots available for reading */
1281	slots = mei_count_full_read_slots(dev);
1282	while (slots > 0) {
1283		dev_dbg(dev->dev, "slots to read = %08x\n", slots);
1284		rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
1285		/* There is a race between ME write and interrupt delivery:
1286		 * Not all data is always available immediately after the
1287		 * interrupt, so try to read again on the next interrupt.
1288		 */
1289		if (rets == -ENODATA)
1290			break;
1291
1292		if (rets &&
1293		    (dev->dev_state != MEI_DEV_RESETTING &&
1294		     dev->dev_state != MEI_DEV_POWER_DOWN)) {
1295			dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
1296						rets);
1297			schedule_work(&dev->reset_work);
1298			goto end;
1299		}
1300	}
1301
1302	dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1303
1304	/*
1305	 * During PG handshake only allowed write is the replay to the
1306	 * PG exit message, so block calling write function
1307	 * if the pg event is in PG handshake
1308	 */
1309	if (dev->pg_event != MEI_PG_EVENT_WAIT &&
1310	    dev->pg_event != MEI_PG_EVENT_RECEIVED) {
1311		rets = mei_irq_write_handler(dev, &cmpl_list);
1312		dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1313	}
1314
1315	mei_irq_compl_handler(dev, &cmpl_list);
1316
1317end:
1318	dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
1319	mei_me_intr_enable(dev);
1320	mutex_unlock(&dev->device_lock);
1321	return IRQ_HANDLED;
1322}
1323
1324static const struct mei_hw_ops mei_me_hw_ops = {
1325
1326	.trc_status = mei_me_trc_status,
1327	.fw_status = mei_me_fw_status,
1328	.pg_state  = mei_me_pg_state,
1329
1330	.host_is_ready = mei_me_host_is_ready,
1331
1332	.hw_is_ready = mei_me_hw_is_ready,
1333	.hw_reset = mei_me_hw_reset,
1334	.hw_config = mei_me_hw_config,
1335	.hw_start = mei_me_hw_start,
1336
1337	.pg_in_transition = mei_me_pg_in_transition,
1338	.pg_is_enabled = mei_me_pg_is_enabled,
1339
1340	.intr_clear = mei_me_intr_clear,
1341	.intr_enable = mei_me_intr_enable,
1342	.intr_disable = mei_me_intr_disable,
1343	.synchronize_irq = mei_me_synchronize_irq,
1344
1345	.hbuf_free_slots = mei_me_hbuf_empty_slots,
1346	.hbuf_is_ready = mei_me_hbuf_is_empty,
1347	.hbuf_depth = mei_me_hbuf_depth,
1348
1349	.write = mei_me_hbuf_write,
1350
1351	.rdbuf_full_slots = mei_me_count_full_read_slots,
1352	.read_hdr = mei_me_mecbrw_read,
1353	.read = mei_me_read_slots
1354};
1355
1356/**
1357 * mei_me_fw_type_nm() - check for nm sku
1358 *
1359 * Read ME FW Status register to check for the Node Manager (NM) Firmware.
1360 * The NM FW is only signaled in PCI function 0.
1361 * __Note__: Deprecated by PCH8 and newer.
1362 *
1363 * @pdev: pci device
1364 *
1365 * Return: true in case of NM firmware
1366 */
1367static bool mei_me_fw_type_nm(const struct pci_dev *pdev)
1368{
1369	u32 reg;
1370	unsigned int devfn;
1371
1372	devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1373	pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_2, &reg);
1374	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
1375	/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
1376	return (reg & 0x600) == 0x200;
1377}
1378
1379#define MEI_CFG_FW_NM                           \
1380	.quirk_probe = mei_me_fw_type_nm
1381
1382/**
1383 * mei_me_fw_sku_sps_4() - check for sps 4.0 sku
1384 *
1385 * Read ME FW Status register to check for SPS Firmware.
1386 * The SPS FW is only signaled in the PCI function 0.
1387 * __Note__: Deprecated by SPS 5.0 and newer.
1388 *
1389 * @pdev: pci device
1390 *
1391 * Return: true in case of SPS firmware
1392 */
1393static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
1394{
1395	u32 reg;
1396	unsigned int devfn;
1397
1398	devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1399	pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
1400	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
1401	return (reg & PCI_CFG_HFS_1_OPMODE_MSK) == PCI_CFG_HFS_1_OPMODE_SPS;
1402}
1403
1404#define MEI_CFG_FW_SPS_4                          \
1405	.quirk_probe = mei_me_fw_type_sps_4
1406
1407/**
1408 * mei_me_fw_sku_sps() - check for sps sku
1409 *
1410 * Read ME FW Status register to check for SPS Firmware.
1411 * The SPS FW is only signaled in pci function 0
1412 *
1413 * @pdev: pci device
1414 *
1415 * Return: true in case of SPS firmware
1416 */
1417static bool mei_me_fw_type_sps(const struct pci_dev *pdev)
1418{
1419	u32 reg;
1420	u32 fw_type;
1421	unsigned int devfn;
1422
1423	devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1424	pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, &reg);
1425	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
1426	fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
1427
1428	dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
1429
1430	return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
1431}
1432
1433#define MEI_CFG_KIND_ITOUCH                     \
1434	.kind = "itouch"
1435
1436#define MEI_CFG_FW_SPS                          \
1437	.quirk_probe = mei_me_fw_type_sps
1438
1439#define MEI_CFG_FW_VER_SUPP                     \
1440	.fw_ver_supported = 1
1441
1442#define MEI_CFG_ICH_HFS                      \
1443	.fw_status.count = 0
1444
1445#define MEI_CFG_ICH10_HFS                        \
1446	.fw_status.count = 1,                   \
1447	.fw_status.status[0] = PCI_CFG_HFS_1
1448
1449#define MEI_CFG_PCH_HFS                         \
1450	.fw_status.count = 2,                   \
1451	.fw_status.status[0] = PCI_CFG_HFS_1,   \
1452	.fw_status.status[1] = PCI_CFG_HFS_2
1453
1454#define MEI_CFG_PCH8_HFS                        \
1455	.fw_status.count = 6,                   \
1456	.fw_status.status[0] = PCI_CFG_HFS_1,   \
1457	.fw_status.status[1] = PCI_CFG_HFS_2,   \
1458	.fw_status.status[2] = PCI_CFG_HFS_3,   \
1459	.fw_status.status[3] = PCI_CFG_HFS_4,   \
1460	.fw_status.status[4] = PCI_CFG_HFS_5,   \
1461	.fw_status.status[5] = PCI_CFG_HFS_6
1462
1463#define MEI_CFG_DMA_128 \
1464	.dma_size[DMA_DSCR_HOST] = SZ_128K, \
1465	.dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
1466	.dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
1467
1468#define MEI_CFG_TRC \
1469	.hw_trc_supported = 1
1470
1471/* ICH Legacy devices */
1472static const struct mei_cfg mei_me_ich_cfg = {
1473	MEI_CFG_ICH_HFS,
1474};
1475
1476/* ICH devices */
1477static const struct mei_cfg mei_me_ich10_cfg = {
1478	MEI_CFG_ICH10_HFS,
1479};
1480
1481/* PCH6 devices */
1482static const struct mei_cfg mei_me_pch6_cfg = {
1483	MEI_CFG_PCH_HFS,
1484};
1485
1486/* PCH7 devices */
1487static const struct mei_cfg mei_me_pch7_cfg = {
1488	MEI_CFG_PCH_HFS,
1489	MEI_CFG_FW_VER_SUPP,
1490};
1491
1492/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
1493static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
1494	MEI_CFG_PCH_HFS,
1495	MEI_CFG_FW_VER_SUPP,
1496	MEI_CFG_FW_NM,
1497};
1498
1499/* PCH8 Lynx Point and newer devices */
1500static const struct mei_cfg mei_me_pch8_cfg = {
1501	MEI_CFG_PCH8_HFS,
1502	MEI_CFG_FW_VER_SUPP,
1503};
1504
1505/* PCH8 Lynx Point and newer devices - iTouch */
1506static const struct mei_cfg mei_me_pch8_itouch_cfg = {
1507	MEI_CFG_KIND_ITOUCH,
1508	MEI_CFG_PCH8_HFS,
1509	MEI_CFG_FW_VER_SUPP,
1510};
1511
1512/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
1513static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
1514	MEI_CFG_PCH8_HFS,
1515	MEI_CFG_FW_VER_SUPP,
1516	MEI_CFG_FW_SPS_4,
1517};
1518
1519/* LBG with quirk for SPS (4.0) Firmware exclusion */
1520static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
1521	MEI_CFG_PCH8_HFS,
1522	MEI_CFG_FW_VER_SUPP,
1523	MEI_CFG_FW_SPS_4,
1524};
1525
1526/* Cannon Lake and newer devices */
1527static const struct mei_cfg mei_me_pch12_cfg = {
1528	MEI_CFG_PCH8_HFS,
1529	MEI_CFG_FW_VER_SUPP,
1530	MEI_CFG_DMA_128,
1531};
1532
1533/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */
1534static const struct mei_cfg mei_me_pch12_sps_cfg = {
1535	MEI_CFG_PCH8_HFS,
1536	MEI_CFG_FW_VER_SUPP,
1537	MEI_CFG_DMA_128,
1538	MEI_CFG_FW_SPS,
1539};
1540
1541/* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion
1542 * w/o DMA support.
1543 */
1544static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = {
1545	MEI_CFG_KIND_ITOUCH,
1546	MEI_CFG_PCH8_HFS,
1547	MEI_CFG_FW_VER_SUPP,
1548	MEI_CFG_FW_SPS,
1549};
1550
1551/* Tiger Lake and newer devices */
1552static const struct mei_cfg mei_me_pch15_cfg = {
1553	MEI_CFG_PCH8_HFS,
1554	MEI_CFG_FW_VER_SUPP,
1555	MEI_CFG_DMA_128,
1556	MEI_CFG_TRC,
1557};
1558
1559/* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */
1560static const struct mei_cfg mei_me_pch15_sps_cfg = {
1561	MEI_CFG_PCH8_HFS,
1562	MEI_CFG_FW_VER_SUPP,
1563	MEI_CFG_DMA_128,
1564	MEI_CFG_TRC,
1565	MEI_CFG_FW_SPS,
1566};
1567
1568/*
1569 * mei_cfg_list - A list of platform platform specific configurations.
1570 * Note: has to be synchronized with  enum mei_cfg_idx.
1571 */
1572static const struct mei_cfg *const mei_cfg_list[] = {
1573	[MEI_ME_UNDEF_CFG] = NULL,
1574	[MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
1575	[MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
1576	[MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
1577	[MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
1578	[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
1579	[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
1580	[MEI_ME_PCH8_ITOUCH_CFG] = &mei_me_pch8_itouch_cfg,
1581	[MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
1582	[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
1583	[MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
1584	[MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
1585	[MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg,
1586	[MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
1587	[MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
1588};
1589
1590const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
1591{
1592	BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG);
1593
1594	if (idx >= MEI_ME_NUM_CFG)
1595		return NULL;
1596
1597	return mei_cfg_list[idx];
1598};
1599
1600/**
1601 * mei_me_dev_init - allocates and initializes the mei device structure
1602 *
1603 * @parent: device associated with physical device (pci/platform)
1604 * @cfg: per device generation config
1605 *
1606 * Return: The mei_device pointer on success, NULL on failure.
1607 */
1608struct mei_device *mei_me_dev_init(struct device *parent,
1609				   const struct mei_cfg *cfg)
1610{
1611	struct mei_device *dev;
1612	struct mei_me_hw *hw;
1613	int i;
1614
1615	dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
1616	if (!dev)
1617		return NULL;
1618
1619	hw = to_me_hw(dev);
1620
1621	for (i = 0; i < DMA_DSCR_NUM; i++)
1622		dev->dr_dscr[i].size = cfg->dma_size[i];
1623
1624	mei_device_init(dev, parent, &mei_me_hw_ops);
1625	hw->cfg = cfg;
1626
1627	dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
1628
1629	dev->kind = cfg->kind;
1630
1631	return dev;
1632}
1633
1634