Lines Matching refs:dev

54  * @dev: the device structure
58 static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
60 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
66 * @dev: the device structure
69 static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
71 mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
77 * @dev: the device structure
81 static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
85 reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
86 trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
94 * @dev: the device structure
98 static inline u32 mei_hcsr_read(const struct mei_device *dev)
102 reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
103 trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
111 * @dev: the device structure
114 static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
116 trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
117 mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
124 * @dev: the device structure
127 static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
130 mei_hcsr_write(dev, reg);
136 * @dev: the device structure
138 static inline void mei_hcsr_set_hig(struct mei_device *dev)
142 hcsr = mei_hcsr_read(dev) | H_IG;
143 mei_hcsr_set(dev, hcsr);
149 * @dev: the device structure
153 static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
157 reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
158 trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
166 * @dev: the device structure
169 static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
171 trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
172 mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
178 * @dev: mei device
183 static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
185 struct mei_me_hw *hw = to_me_hw(dev);
191 trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc);
199 * @dev: mei device
204 static int mei_me_fw_status(struct mei_device *dev,
207 struct mei_me_hw *hw = to_me_hw(dev);
217 ret = hw->read_fws(dev, fw_src->status[i],
219 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X",
232 * @dev: mei device
239 static int mei_me_hw_config(struct mei_device *dev)
241 struct mei_me_hw *hw = to_me_hw(dev);
248 hcsr = mei_hcsr_read(dev);
252 hw->read_fws(dev, PCI_CFG_HFS_1, &reg);
253 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
259 reg = mei_me_d0i3c_read(dev);
271 * @dev: mei device
275 static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
277 struct mei_me_hw *hw = to_me_hw(dev);
291 * @dev: the device structure
294 static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
297 mei_hcsr_set(dev, hcsr);
303 * @dev: the device structure
306 static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
309 mei_hcsr_write(dev, hcsr);
315 * @dev: the device structure
317 static void mei_me_intr_clear(struct mei_device *dev)
319 u32 hcsr = mei_hcsr_read(dev);
321 me_intr_clear(dev, hcsr);
326 * @dev: the device structure
328 static void mei_me_intr_enable(struct mei_device *dev)
330 u32 hcsr = mei_hcsr_read(dev);
333 mei_hcsr_set(dev, hcsr);
339 * @dev: the device structure
341 static void mei_me_intr_disable(struct mei_device *dev)
343 u32 hcsr = mei_hcsr_read(dev);
345 me_intr_disable(dev, hcsr);
351 * @dev: the device structure
353 static void mei_me_synchronize_irq(struct mei_device *dev)
355 struct mei_me_hw *hw = to_me_hw(dev);
363 * @dev: the device structure
365 static void mei_me_hw_reset_release(struct mei_device *dev)
367 u32 hcsr = mei_hcsr_read(dev);
371 mei_hcsr_set(dev, hcsr);
377 * @dev: mei device
379 static void mei_me_host_set_ready(struct mei_device *dev)
381 u32 hcsr = mei_hcsr_read(dev);
384 mei_hcsr_set(dev, hcsr);
390 * @dev: mei device
393 static bool mei_me_host_is_ready(struct mei_device *dev)
395 u32 hcsr = mei_hcsr_read(dev);
403 * @dev: mei device
406 static bool mei_me_hw_is_ready(struct mei_device *dev)
408 u32 mecsr = mei_me_mecsr_read(dev);
416 * @dev: mei device
419 static bool mei_me_hw_is_resetting(struct mei_device *dev)
421 u32 mecsr = mei_me_mecsr_read(dev);
430 * @dev: mei device
433 static int mei_me_hw_ready_wait(struct mei_device *dev)
435 mutex_unlock(&dev->device_lock);
436 wait_event_timeout(dev->wait_hw_ready,
437 dev->recvd_hw_ready,
439 mutex_lock(&dev->device_lock);
440 if (!dev->recvd_hw_ready) {
441 dev_err(dev->dev, "wait hw ready failed\n");
445 mei_me_hw_reset_release(dev);
446 dev->recvd_hw_ready = false;
453 * @dev: mei device
456 static int mei_me_hw_start(struct mei_device *dev)
458 int ret = mei_me_hw_ready_wait(dev);
462 dev_dbg(dev->dev, "hw is ready\n");
464 mei_me_host_set_ready(dev);
472 * @dev: the device structure
476 static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
481 hcsr = mei_hcsr_read(dev);
492 * @dev: the device structure
496 static bool mei_me_hbuf_is_empty(struct mei_device *dev)
498 return mei_hbuf_filled_slots(dev) == 0;
504 * @dev: the device structure
508 static int mei_me_hbuf_empty_slots(struct mei_device *dev)
510 struct mei_me_hw *hw = to_me_hw(dev);
513 filled_slots = mei_hbuf_filled_slots(dev);
526 * @dev: the device structure
530 static u32 mei_me_hbuf_depth(const struct mei_device *dev)
532 struct mei_me_hw *hw = to_me_hw(dev);
540 * @dev: the device structure
548 static int mei_me_hbuf_write(struct mei_device *dev,
561 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
563 empty_slots = mei_hbuf_empty_slots(dev);
564 dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
575 mei_me_hcbww_write(dev, reg_buf[i]);
579 mei_me_hcbww_write(dev, reg_buf[i]);
586 mei_me_hcbww_write(dev, reg);
589 mei_hcsr_set_hig(dev);
590 if (!mei_me_hw_is_ready(dev))
599 * @dev: the device structure
603 static int mei_me_count_full_read_slots(struct mei_device *dev)
609 me_csr = mei_me_mecsr_read(dev);
619 dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
626 * @dev: the device structure
632 static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
638 *reg_buf++ = mei_me_mecbrw_read(dev);
641 u32 reg = mei_me_mecbrw_read(dev);
646 mei_hcsr_set_hig(dev);
653 * @dev: the device structure
655 static void mei_me_pg_set(struct mei_device *dev)
657 struct mei_me_hw *hw = to_me_hw(dev);
661 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
665 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
672 * @dev: the device structure
674 static void mei_me_pg_unset(struct mei_device *dev)
676 struct mei_me_hw *hw = to_me_hw(dev);
680 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
686 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
693 * @dev: the device structure
697 static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
699 struct mei_me_hw *hw = to_me_hw(dev);
703 dev->pg_event = MEI_PG_EVENT_WAIT;
705 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
709 mutex_unlock(&dev->device_lock);
710 wait_event_timeout(dev->wait_pg,
711 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
712 mutex_lock(&dev->device_lock);
714 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
715 mei_me_pg_set(dev);
721 dev->pg_event = MEI_PG_EVENT_IDLE;
730 * @dev: the device structure
734 static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
736 struct mei_me_hw *hw = to_me_hw(dev);
740 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
743 dev->pg_event = MEI_PG_EVENT_WAIT;
745 mei_me_pg_unset(dev);
747 mutex_unlock(&dev->device_lock);
748 wait_event_timeout(dev->wait_pg,
749 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
750 mutex_lock(&dev->device_lock);
753 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
758 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
759 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
763 mutex_unlock(&dev->device_lock);
764 wait_event_timeout(dev->wait_pg,
765 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
766 mutex_lock(&dev->device_lock);
768 if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
774 dev->pg_event = MEI_PG_EVENT_IDLE;
783 * @dev: the device structure
787 static bool mei_me_pg_in_transition(struct mei_device *dev)
789 return dev->pg_event >= MEI_PG_EVENT_WAIT &&
790 dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
796 * @dev: the device structure
800 static bool mei_me_pg_is_enabled(struct mei_device *dev)
802 struct mei_me_hw *hw = to_me_hw(dev);
803 u32 reg = mei_me_mecsr_read(dev);
811 if (!dev->hbm_f_pg_supported)
817 dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
820 dev->version.major_version,
821 dev->version.minor_version,
831 * @dev: the device structure
836 static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
838 u32 reg = mei_me_d0i3c_read(dev);
845 mei_me_d0i3c_write(dev, reg);
847 reg = mei_me_d0i3c_read(dev);
854 * @dev: the device structure
858 static u32 mei_me_d0i3_unset(struct mei_device *dev)
860 u32 reg = mei_me_d0i3c_read(dev);
864 mei_me_d0i3c_write(dev, reg);
866 reg = mei_me_d0i3c_read(dev);
873 * @dev: the device structure
877 static int mei_me_d0i3_enter_sync(struct mei_device *dev)
879 struct mei_me_hw *hw = to_me_hw(dev);
885 reg = mei_me_d0i3c_read(dev);
888 dev_dbg(dev->dev, "d0i3 set not needed\n");
894 dev->pg_event = MEI_PG_EVENT_WAIT;
896 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
901 mutex_unlock(&dev->device_lock);
902 wait_event_timeout(dev->wait_pg,
903 dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
904 mutex_lock(&dev->device_lock);
906 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
912 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
914 reg = mei_me_d0i3_set(dev, true);
916 dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
921 mutex_unlock(&dev->device_lock);
922 wait_event_timeout(dev->wait_pg,
923 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
924 mutex_lock(&dev->device_lock);
926 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
927 reg = mei_me_d0i3c_read(dev);
938 dev->pg_event = MEI_PG_EVENT_IDLE;
939 dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
949 * @dev: the device structure
953 static int mei_me_d0i3_enter(struct mei_device *dev)
955 struct mei_me_hw *hw = to_me_hw(dev);
958 reg = mei_me_d0i3c_read(dev);
961 dev_dbg(dev->dev, "already d0i3 : set not needed\n");
965 mei_me_d0i3_set(dev, false);
968 dev->pg_event = MEI_PG_EVENT_IDLE;
969 dev_dbg(dev->dev, "d0i3 enter\n");
976 * @dev: the device structure
980 static int mei_me_d0i3_exit_sync(struct mei_device *dev)
982 struct mei_me_hw *hw = to_me_hw(dev);
987 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
989 reg = mei_me_d0i3c_read(dev);
992 dev_dbg(dev->dev, "d0i3 exit not needed\n");
997 reg = mei_me_d0i3_unset(dev);
999 dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
1004 mutex_unlock(&dev->device_lock);
1005 wait_event_timeout(dev->wait_pg,
1006 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
1007 mutex_lock(&dev->device_lock);
1009 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
1010 reg = mei_me_d0i3c_read(dev);
1021 dev->pg_event = MEI_PG_EVENT_IDLE;
1023 dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
1031 * @dev: the device structure
1033 static void mei_me_pg_legacy_intr(struct mei_device *dev)
1035 struct mei_me_hw *hw = to_me_hw(dev);
1037 if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
1040 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1042 if (waitqueue_active(&dev->wait_pg))
1043 wake_up(&dev->wait_pg);
1049 * @dev: the device structure
1052 static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
1054 struct mei_me_hw *hw = to_me_hw(dev);
1056 if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
1058 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1061 if (dev->hbm_state != MEI_HBM_IDLE) {
1066 dev_dbg(dev->dev, "d0i3 set host ready\n");
1067 mei_me_host_set_ready(dev);
1073 wake_up(&dev->wait_pg);
1082 dev_dbg(dev->dev, "d0i3 want resume\n");
1083 mei_hbm_pg_resume(dev);
1090 * @dev: the device structure
1093 static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
1095 struct mei_me_hw *hw = to_me_hw(dev);
1098 mei_me_d0i3_intr(dev, intr_source);
1100 mei_me_pg_legacy_intr(dev);
1106 * @dev: the device structure
1110 int mei_me_pg_enter_sync(struct mei_device *dev)
1112 struct mei_me_hw *hw = to_me_hw(dev);
1115 return mei_me_d0i3_enter_sync(dev);
1117 return mei_me_pg_legacy_enter_sync(dev);
1123 * @dev: the device structure
1127 int mei_me_pg_exit_sync(struct mei_device *dev)
1129 struct mei_me_hw *hw = to_me_hw(dev);
1132 return mei_me_d0i3_exit_sync(dev);
1134 return mei_me_pg_legacy_exit_sync(dev);
1140 * @dev: the device structure
1145 static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
1147 struct mei_me_hw *hw = to_me_hw(dev);
1152 mei_me_intr_enable(dev);
1154 ret = mei_me_d0i3_exit_sync(dev);
1160 pm_runtime_set_active(dev->dev);
1162 hcsr = mei_hcsr_read(dev);
1169 dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
1171 mei_hcsr_set(dev, hcsr);
1172 hcsr = mei_hcsr_read(dev);
1180 dev->recvd_hw_ready = false;
1181 mei_hcsr_write(dev, hcsr);
1187 hcsr = mei_hcsr_read(dev);
1190 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
1193 dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
1196 mei_me_hw_reset_release(dev);
1198 ret = mei_me_d0i3_enter(dev);
1216 struct mei_device *dev = (struct mei_device *)dev_id;
1219 hcsr = mei_hcsr_read(dev);
1223 dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
1226 me_intr_disable(dev, hcsr);
1242 struct mei_device *dev = (struct mei_device *) dev_id;
1248 dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
1250 mutex_lock(&dev->device_lock);
1252 hcsr = mei_hcsr_read(dev);
1253 me_intr_clear(dev, hcsr);
1258 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
1259 dev_warn(dev->dev, "FW not ready: resetting.\n");
1260 schedule_work(&dev->reset_work);
1264 if (mei_me_hw_is_resetting(dev))
1265 mei_hcsr_set_hig(dev);
1267 mei_me_pg_intr(dev, me_intr_src(hcsr));
1269 /* check if we need to start the dev */
1270 if (!mei_host_is_ready(dev)) {
1271 if (mei_hw_is_ready(dev)) {
1272 dev_dbg(dev->dev, "we need to start the dev.\n");
1273 dev->recvd_hw_ready = true;
1274 wake_up(&dev->wait_hw_ready);
1276 dev_dbg(dev->dev, "Spurious Interrupt\n");
1281 slots = mei_count_full_read_slots(dev);
1283 dev_dbg(dev->dev, "slots to read = %08x\n", slots);
1284 rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
1293 (dev->dev_state != MEI_DEV_RESETTING &&
1294 dev->dev_state != MEI_DEV_POWER_DOWN)) {
1295 dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
1297 schedule_work(&dev->reset_work);
1302 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1309 if (dev->pg_event != MEI_PG_EVENT_WAIT &&
1310 dev->pg_event != MEI_PG_EVENT_RECEIVED) {
1311 rets = mei_irq_write_handler(dev, &cmpl_list);
1312 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1315 mei_irq_compl_handler(dev, &cmpl_list);
1318 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
1319 mei_me_intr_enable(dev);
1320 mutex_unlock(&dev->device_lock);
1374 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
1400 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
1425 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
1428 dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
1611 struct mei_device *dev;
1615 dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
1616 if (!dev)
1619 hw = to_me_hw(dev);
1622 dev->dr_dscr[i].size = cfg->dma_size[i];
1624 mei_device_init(dev, parent, &mei_me_hw_ops);
1627 dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
1629 dev->kind = cfg->kind;
1631 return dev;