Lines Matching refs:mdev_state

128 struct mdev_state {
185 static bool is_intx(struct mdev_state *mdev_state)
187 return mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX;
190 static bool is_msi(struct mdev_state *mdev_state)
192 return mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX;
195 static bool is_noirq(struct mdev_state *mdev_state)
197 return !is_intx(mdev_state) && !is_msi(mdev_state);
200 static void mtty_trigger_interrupt(struct mdev_state *mdev_state)
202 lockdep_assert_held(&mdev_state->ops_lock);
204 if (is_msi(mdev_state)) {
205 if (mdev_state->msi_evtfd)
206 eventfd_signal(mdev_state->msi_evtfd, 1);
207 } else if (is_intx(mdev_state)) {
208 if (mdev_state->intx_evtfd && !mdev_state->intx_mask) {
209 eventfd_signal(mdev_state->intx_evtfd, 1);
210 mdev_state->intx_mask = true;
215 static void mtty_create_config_space(struct mdev_state *mdev_state)
218 STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
221 STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
224 STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
227 mdev_state->vconfig[0x8] = 0x10;
230 mdev_state->vconfig[0x9] = 0x02;
233 mdev_state->vconfig[0xa] = 0x00;
236 mdev_state->vconfig[0xb] = 0x07;
240 STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
241 mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
243 if (mdev_state->nr_ports == 2) {
245 STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
246 mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
250 STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
252 mdev_state->vconfig[0x34] = 0x00; /* Cap Ptr */
253 mdev_state->vconfig[0x3d] = 0x01; /* interrupt pin (INTA#) */
256 mdev_state->vconfig[0x40] = 0x23;
257 mdev_state->vconfig[0x43] = 0x80;
258 mdev_state->vconfig[0x44] = 0x23;
259 mdev_state->vconfig[0x48] = 0x23;
260 mdev_state->vconfig[0x4c] = 0x23;
262 mdev_state->vconfig[0x60] = 0x50;
263 mdev_state->vconfig[0x61] = 0x43;
264 mdev_state->vconfig[0x62] = 0x49;
265 mdev_state->vconfig[0x63] = 0x20;
266 mdev_state->vconfig[0x64] = 0x53;
267 mdev_state->vconfig[0x65] = 0x65;
268 mdev_state->vconfig[0x66] = 0x72;
269 mdev_state->vconfig[0x67] = 0x69;
270 mdev_state->vconfig[0x68] = 0x61;
271 mdev_state->vconfig[0x69] = 0x6c;
272 mdev_state->vconfig[0x6a] = 0x2f;
273 mdev_state->vconfig[0x6b] = 0x55;
274 mdev_state->vconfig[0x6c] = 0x41;
275 mdev_state->vconfig[0x6d] = 0x52;
276 mdev_state->vconfig[0x6e] = 0x54;
279 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
290 mdev_state->vconfig[0x3c] = buf[0];
305 if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
306 STORE_LE32(&mdev_state->vconfig[offset], 0);
314 bar_mask = mdev_state->bar_mask[bar_index];
318 cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
319 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
324 STORE_LE32(&mdev_state->vconfig[offset], 0);
333 static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
342 if (mdev_state->s[index].dlab) {
343 mdev_state->s[index].divisor |= data;
347 mutex_lock(&mdev_state->rxtx_lock);
350 if (mdev_state->s[index].rxtx.count <
351 mdev_state->s[index].max_fifo_size) {
352 mdev_state->s[index].rxtx.fifo[
353 mdev_state->s[index].rxtx.head] = data;
354 mdev_state->s[index].rxtx.count++;
355 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
356 mdev_state->s[index].overrun = false;
362 if ((mdev_state->s[index].uart_reg[UART_IER] &
364 (mdev_state->s[index].rxtx.count ==
365 mdev_state->s[index].intr_trigger_level)) {
371 mtty_trigger_interrupt(mdev_state);
377 mdev_state->s[index].overrun = true;
383 if (mdev_state->s[index].uart_reg[UART_IER] &
385 mtty_trigger_interrupt(mdev_state);
387 mutex_unlock(&mdev_state->rxtx_lock);
392 if (mdev_state->s[index].dlab)
393 mdev_state->s[index].divisor |= (u16)data << 8;
395 mdev_state->s[index].uart_reg[offset] = data;
396 mutex_lock(&mdev_state->rxtx_lock);
398 (mdev_state->s[index].rxtx.head ==
399 mdev_state->s[index].rxtx.tail)) {
404 mtty_trigger_interrupt(mdev_state);
407 mutex_unlock(&mdev_state->rxtx_lock);
413 mdev_state->s[index].fcr = data;
415 mutex_lock(&mdev_state->rxtx_lock);
418 mdev_state->s[index].rxtx.count = 0;
419 mdev_state->s[index].rxtx.head = 0;
420 mdev_state->s[index].rxtx.tail = 0;
422 mutex_unlock(&mdev_state->rxtx_lock);
426 mdev_state->s[index].intr_trigger_level = 1;
430 mdev_state->s[index].intr_trigger_level = 4;
434 mdev_state->s[index].intr_trigger_level = 8;
438 mdev_state->s[index].intr_trigger_level = 14;
447 mdev_state->s[index].intr_trigger_level = 1;
449 mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
451 mdev_state->s[index].max_fifo_size = 1;
452 mdev_state->s[index].intr_trigger_level = 1;
459 mdev_state->s[index].dlab = true;
460 mdev_state->s[index].divisor = 0;
462 mdev_state->s[index].dlab = false;
464 mdev_state->s[index].uart_reg[offset] = data;
468 mdev_state->s[index].uart_reg[offset] = data;
470 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
475 mtty_trigger_interrupt(mdev_state);
478 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
483 mtty_trigger_interrupt(mdev_state);
493 mdev_state->s[index].uart_reg[offset] = data;
501 static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
508 if (mdev_state->s[index].dlab) {
509 *buf = (u8)mdev_state->s[index].divisor;
513 mutex_lock(&mdev_state->rxtx_lock);
515 if (mdev_state->s[index].rxtx.head !=
516 mdev_state->s[index].rxtx.tail) {
517 *buf = mdev_state->s[index].rxtx.fifo[
518 mdev_state->s[index].rxtx.tail];
519 mdev_state->s[index].rxtx.count--;
520 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
523 if (mdev_state->s[index].rxtx.head ==
524 mdev_state->s[index].rxtx.tail) {
532 if (mdev_state->s[index].uart_reg[UART_IER] &
534 mtty_trigger_interrupt(mdev_state);
536 mutex_unlock(&mdev_state->rxtx_lock);
541 if (mdev_state->s[index].dlab) {
542 *buf = (u8)(mdev_state->s[index].divisor >> 8);
545 *buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
550 u8 ier = mdev_state->s[index].uart_reg[UART_IER];
553 mutex_lock(&mdev_state->rxtx_lock);
555 if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
560 (mdev_state->s[index].rxtx.count >=
561 mdev_state->s[index].intr_trigger_level))
566 (mdev_state->s[index].rxtx.head ==
567 mdev_state->s[index].rxtx.tail))
572 (mdev_state->s[index].uart_reg[UART_MCR] &
582 mutex_unlock(&mdev_state->rxtx_lock);
588 *buf = mdev_state->s[index].uart_reg[offset];
595 mutex_lock(&mdev_state->rxtx_lock);
597 if (mdev_state->s[index].rxtx.head !=
598 mdev_state->s[index].rxtx.tail)
602 if (mdev_state->s[index].overrun)
606 if (mdev_state->s[index].rxtx.head ==
607 mdev_state->s[index].rxtx.tail)
610 mutex_unlock(&mdev_state->rxtx_lock);
617 mutex_lock(&mdev_state->rxtx_lock);
619 if (mdev_state->s[index].uart_reg[UART_MCR] &
621 if (mdev_state->s[index].rxtx.count <
622 mdev_state->s[index].max_fifo_size)
626 mutex_unlock(&mdev_state->rxtx_lock);
631 *buf = mdev_state->s[index].uart_reg[offset];
639 static void mdev_read_base(struct mdev_state *mdev_state)
649 if (!mdev_state->region_info[index].size)
652 start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
654 mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
659 start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
671 mdev_state->region_info[index].start = ((u64)start_hi << 32) |
676 static ssize_t mdev_access(struct mdev_state *mdev_state, u8 *buf, size_t count,
686 mutex_lock(&mdev_state->ops_lock);
699 handle_pci_cfg_write(mdev_state, offset, buf, count);
701 memcpy(buf, (mdev_state->vconfig + offset), count);
708 if (!mdev_state->region_info[index].start)
709 mdev_read_base(mdev_state);
717 *buf, mdev_state->s[index].dlab);
719 handle_bar_write(index, mdev_state, offset, buf, count);
721 handle_bar_read(index, mdev_state, offset, buf, count);
727 *buf, mdev_state->s[index].dlab);
741 mutex_unlock(&mdev_state->ops_lock);
748 struct mdev_state *mdev_state =
749 container_of(vdev, struct mdev_state, vdev);
763 mdev_state->nr_ports = type->nr_ports;
764 mdev_state->irq_index = -1;
765 mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
766 mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
767 mutex_init(&mdev_state->rxtx_lock);
769 mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
770 if (!mdev_state->vconfig) {
775 mutex_init(&mdev_state->ops_lock);
776 mdev_state->mdev = mdev;
777 mtty_create_config_space(mdev_state);
787 struct mdev_state *mdev_state;
790 mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
792 if (IS_ERR(mdev_state))
793 return PTR_ERR(mdev_state);
795 ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
798 dev_set_drvdata(&mdev->dev, mdev_state);
802 vfio_put_device(&mdev_state->vdev);
808 struct mdev_state *mdev_state =
809 container_of(vdev, struct mdev_state, vdev);
811 atomic_add(mdev_state->nr_ports, &mdev_avail_ports);
812 kfree(mdev_state->vconfig);
817 struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
819 vfio_unregister_group_dev(&mdev_state->vdev);
820 vfio_put_device(&mdev_state->vdev);
823 static int mtty_reset(struct mdev_state *mdev_state)
833 struct mdev_state *mdev_state =
834 container_of(vdev, struct mdev_state, vdev);
844 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
856 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
868 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
894 struct mdev_state *mdev_state =
895 container_of(vdev, struct mdev_state, vdev);
908 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
920 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
932 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
950 static void mtty_disable_intx(struct mdev_state *mdev_state)
952 if (mdev_state->intx_evtfd) {
953 eventfd_ctx_put(mdev_state->intx_evtfd);
954 mdev_state->intx_evtfd = NULL;
955 mdev_state->intx_mask = false;
956 mdev_state->irq_index = -1;
960 static void mtty_disable_msi(struct mdev_state *mdev_state)
962 if (mdev_state->msi_evtfd) {
963 eventfd_ctx_put(mdev_state->msi_evtfd);
964 mdev_state->msi_evtfd = NULL;
965 mdev_state->irq_index = -1;
969 static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags,
975 mutex_lock(&mdev_state->ops_lock);
980 if (!is_intx(mdev_state) || start != 0 || count != 1) {
986 mdev_state->intx_mask = true;
991 mdev_state->intx_mask = true;
997 if (!is_intx(mdev_state) || start != 0 || count != 1) {
1003 mdev_state->intx_mask = false;
1008 mdev_state->intx_mask = false;
1014 if (is_intx(mdev_state) && !count &&
1016 mtty_disable_intx(mdev_state);
1020 if (!(is_intx(mdev_state) || is_noirq(mdev_state)) ||
1030 mtty_disable_intx(mdev_state);
1040 mdev_state->intx_evtfd = evt;
1041 mdev_state->irq_index = index;
1045 if (!is_intx(mdev_state)) {
1051 mtty_trigger_interrupt(mdev_state);
1056 mtty_trigger_interrupt(mdev_state);
1068 if (is_msi(mdev_state) && !count &&
1070 mtty_disable_msi(mdev_state);
1074 if (!(is_msi(mdev_state) || is_noirq(mdev_state)) ||
1084 mtty_disable_msi(mdev_state);
1094 mdev_state->msi_evtfd = evt;
1095 mdev_state->irq_index = index;
1099 if (!is_msi(mdev_state)) {
1105 mtty_trigger_interrupt(mdev_state);
1110 mtty_trigger_interrupt(mdev_state);
1116 dev_dbg(mdev_state->vdev.dev, "%s: MSIX_IRQ\n", __func__);
1120 dev_dbg(mdev_state->vdev.dev, "%s: ERR_IRQ\n", __func__);
1124 dev_dbg(mdev_state->vdev.dev, "%s: REQ_IRQ\n", __func__);
1129 mutex_unlock(&mdev_state->ops_lock);
1133 static int mtty_get_region_info(struct mdev_state *mdev_state,
1144 mutex_lock(&mdev_state->ops_lock);
1154 if (mdev_state->nr_ports == 2)
1162 mdev_state->region_info[bar_index].size = size;
1163 mdev_state->region_info[bar_index].vfio_offset =
1170 mutex_unlock(&mdev_state->ops_lock);
1204 struct mdev_state *mdev_state =
1205 container_of(vdev, struct mdev_state, vdev);
1226 memcpy(&mdev_state->dev_info, &info, sizeof(info));
1247 ret = mtty_get_region_info(mdev_state, &info, &cap_type_id,
1268 (info.index >= mdev_state->dev_info.num_irqs))
1292 mdev_state->dev_info.num_irqs,
1305 ret = mtty_set_irqs(mdev_state, hdr.flags, hdr.index, hdr.start,
1312 return mtty_reset(mdev_state);
1350 struct mdev_state *mdev_state =
1351 container_of(vdev, struct mdev_state, vdev);
1353 mtty_disable_intx(mdev_state);
1354 mtty_disable_msi(mdev_state);