18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * VMware VMCI Driver 48c2ecf20Sopenharmony_ci * 58c2ecf20Sopenharmony_ci * Copyright (C) 2012 VMware, Inc. All rights reserved. 68c2ecf20Sopenharmony_ci */ 78c2ecf20Sopenharmony_ci 88c2ecf20Sopenharmony_ci#include <linux/vmw_vmci_defs.h> 98c2ecf20Sopenharmony_ci#include <linux/vmw_vmci_api.h> 108c2ecf20Sopenharmony_ci#include <linux/moduleparam.h> 118c2ecf20Sopenharmony_ci#include <linux/interrupt.h> 128c2ecf20Sopenharmony_ci#include <linux/highmem.h> 138c2ecf20Sopenharmony_ci#include <linux/kernel.h> 148c2ecf20Sopenharmony_ci#include <linux/mm.h> 158c2ecf20Sopenharmony_ci#include <linux/module.h> 168c2ecf20Sopenharmony_ci#include <linux/sched.h> 178c2ecf20Sopenharmony_ci#include <linux/slab.h> 188c2ecf20Sopenharmony_ci#include <linux/init.h> 198c2ecf20Sopenharmony_ci#include <linux/pci.h> 208c2ecf20Sopenharmony_ci#include <linux/smp.h> 218c2ecf20Sopenharmony_ci#include <linux/io.h> 228c2ecf20Sopenharmony_ci#include <linux/vmalloc.h> 238c2ecf20Sopenharmony_ci 248c2ecf20Sopenharmony_ci#include "vmci_datagram.h" 258c2ecf20Sopenharmony_ci#include "vmci_doorbell.h" 268c2ecf20Sopenharmony_ci#include "vmci_context.h" 278c2ecf20Sopenharmony_ci#include "vmci_driver.h" 288c2ecf20Sopenharmony_ci#include "vmci_event.h" 298c2ecf20Sopenharmony_ci 308c2ecf20Sopenharmony_ci#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740 318c2ecf20Sopenharmony_ci 328c2ecf20Sopenharmony_ci#define VMCI_UTIL_NUM_RESOURCES 1 338c2ecf20Sopenharmony_ci 348c2ecf20Sopenharmony_cistatic bool vmci_disable_msi; 358c2ecf20Sopenharmony_cimodule_param_named(disable_msi, vmci_disable_msi, bool, 0); 368c2ecf20Sopenharmony_ciMODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); 378c2ecf20Sopenharmony_ci 388c2ecf20Sopenharmony_cistatic bool vmci_disable_msix; 398c2ecf20Sopenharmony_cimodule_param_named(disable_msix, vmci_disable_msix, bool, 0); 408c2ecf20Sopenharmony_ciMODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); 418c2ecf20Sopenharmony_ci 428c2ecf20Sopenharmony_cistatic u32 ctx_update_sub_id = VMCI_INVALID_ID; 438c2ecf20Sopenharmony_cistatic u32 vm_context_id = VMCI_INVALID_ID; 448c2ecf20Sopenharmony_ci 458c2ecf20Sopenharmony_cistruct vmci_guest_device { 468c2ecf20Sopenharmony_ci struct device *dev; /* PCI device we are attached to */ 478c2ecf20Sopenharmony_ci void __iomem *iobase; 488c2ecf20Sopenharmony_ci 498c2ecf20Sopenharmony_ci bool exclusive_vectors; 508c2ecf20Sopenharmony_ci 518c2ecf20Sopenharmony_ci struct tasklet_struct datagram_tasklet; 528c2ecf20Sopenharmony_ci struct tasklet_struct bm_tasklet; 538c2ecf20Sopenharmony_ci 548c2ecf20Sopenharmony_ci void *data_buffer; 558c2ecf20Sopenharmony_ci void *notification_bitmap; 568c2ecf20Sopenharmony_ci dma_addr_t notification_base; 578c2ecf20Sopenharmony_ci}; 588c2ecf20Sopenharmony_ci 598c2ecf20Sopenharmony_cistatic bool use_ppn64; 608c2ecf20Sopenharmony_ci 618c2ecf20Sopenharmony_cibool vmci_use_ppn64(void) 628c2ecf20Sopenharmony_ci{ 638c2ecf20Sopenharmony_ci return use_ppn64; 648c2ecf20Sopenharmony_ci} 658c2ecf20Sopenharmony_ci 668c2ecf20Sopenharmony_ci/* vmci_dev singleton device and supporting data*/ 678c2ecf20Sopenharmony_cistruct pci_dev *vmci_pdev; 688c2ecf20Sopenharmony_cistatic struct vmci_guest_device *vmci_dev_g; 698c2ecf20Sopenharmony_cistatic DEFINE_SPINLOCK(vmci_dev_spinlock); 708c2ecf20Sopenharmony_ci 718c2ecf20Sopenharmony_cistatic atomic_t vmci_num_guest_devices = ATOMIC_INIT(0); 728c2ecf20Sopenharmony_ci 738c2ecf20Sopenharmony_cibool vmci_guest_code_active(void) 748c2ecf20Sopenharmony_ci{ 758c2ecf20Sopenharmony_ci return atomic_read(&vmci_num_guest_devices) != 0; 768c2ecf20Sopenharmony_ci} 778c2ecf20Sopenharmony_ci 788c2ecf20Sopenharmony_ciu32 vmci_get_vm_context_id(void) 798c2ecf20Sopenharmony_ci{ 808c2ecf20Sopenharmony_ci if (vm_context_id == VMCI_INVALID_ID) { 818c2ecf20Sopenharmony_ci struct vmci_datagram get_cid_msg; 828c2ecf20Sopenharmony_ci get_cid_msg.dst = 838c2ecf20Sopenharmony_ci vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 848c2ecf20Sopenharmony_ci VMCI_GET_CONTEXT_ID); 858c2ecf20Sopenharmony_ci get_cid_msg.src = VMCI_ANON_SRC_HANDLE; 868c2ecf20Sopenharmony_ci get_cid_msg.payload_size = 0; 878c2ecf20Sopenharmony_ci vm_context_id = vmci_send_datagram(&get_cid_msg); 888c2ecf20Sopenharmony_ci } 898c2ecf20Sopenharmony_ci return vm_context_id; 908c2ecf20Sopenharmony_ci} 918c2ecf20Sopenharmony_ci 928c2ecf20Sopenharmony_ci/* 938c2ecf20Sopenharmony_ci * VM to hypervisor call mechanism. We use the standard VMware naming 948c2ecf20Sopenharmony_ci * convention since shared code is calling this function as well. 958c2ecf20Sopenharmony_ci */ 968c2ecf20Sopenharmony_ciint vmci_send_datagram(struct vmci_datagram *dg) 978c2ecf20Sopenharmony_ci{ 988c2ecf20Sopenharmony_ci unsigned long flags; 998c2ecf20Sopenharmony_ci int result; 1008c2ecf20Sopenharmony_ci 1018c2ecf20Sopenharmony_ci /* Check args. */ 1028c2ecf20Sopenharmony_ci if (dg == NULL) 1038c2ecf20Sopenharmony_ci return VMCI_ERROR_INVALID_ARGS; 1048c2ecf20Sopenharmony_ci 1058c2ecf20Sopenharmony_ci /* 1068c2ecf20Sopenharmony_ci * Need to acquire spinlock on the device because the datagram 1078c2ecf20Sopenharmony_ci * data may be spread over multiple pages and the monitor may 1088c2ecf20Sopenharmony_ci * interleave device user rpc calls from multiple 1098c2ecf20Sopenharmony_ci * VCPUs. Acquiring the spinlock precludes that 1108c2ecf20Sopenharmony_ci * possibility. Disabling interrupts to avoid incoming 1118c2ecf20Sopenharmony_ci * datagrams during a "rep out" and possibly landing up in 1128c2ecf20Sopenharmony_ci * this function. 1138c2ecf20Sopenharmony_ci */ 1148c2ecf20Sopenharmony_ci spin_lock_irqsave(&vmci_dev_spinlock, flags); 1158c2ecf20Sopenharmony_ci 1168c2ecf20Sopenharmony_ci if (vmci_dev_g) { 1178c2ecf20Sopenharmony_ci iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR, 1188c2ecf20Sopenharmony_ci dg, VMCI_DG_SIZE(dg)); 1198c2ecf20Sopenharmony_ci result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR); 1208c2ecf20Sopenharmony_ci } else { 1218c2ecf20Sopenharmony_ci result = VMCI_ERROR_UNAVAILABLE; 1228c2ecf20Sopenharmony_ci } 1238c2ecf20Sopenharmony_ci 1248c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&vmci_dev_spinlock, flags); 1258c2ecf20Sopenharmony_ci 1268c2ecf20Sopenharmony_ci return result; 1278c2ecf20Sopenharmony_ci} 1288c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(vmci_send_datagram); 1298c2ecf20Sopenharmony_ci 1308c2ecf20Sopenharmony_ci/* 1318c2ecf20Sopenharmony_ci * Gets called with the new context id if updated or resumed. 1328c2ecf20Sopenharmony_ci * Context id. 1338c2ecf20Sopenharmony_ci */ 1348c2ecf20Sopenharmony_cistatic void vmci_guest_cid_update(u32 sub_id, 1358c2ecf20Sopenharmony_ci const struct vmci_event_data *event_data, 1368c2ecf20Sopenharmony_ci void *client_data) 1378c2ecf20Sopenharmony_ci{ 1388c2ecf20Sopenharmony_ci const struct vmci_event_payld_ctx *ev_payload = 1398c2ecf20Sopenharmony_ci vmci_event_data_const_payload(event_data); 1408c2ecf20Sopenharmony_ci 1418c2ecf20Sopenharmony_ci if (sub_id != ctx_update_sub_id) { 1428c2ecf20Sopenharmony_ci pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id); 1438c2ecf20Sopenharmony_ci return; 1448c2ecf20Sopenharmony_ci } 1458c2ecf20Sopenharmony_ci 1468c2ecf20Sopenharmony_ci if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) { 1478c2ecf20Sopenharmony_ci pr_devel("Invalid event data\n"); 1488c2ecf20Sopenharmony_ci return; 1498c2ecf20Sopenharmony_ci } 1508c2ecf20Sopenharmony_ci 1518c2ecf20Sopenharmony_ci pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n", 1528c2ecf20Sopenharmony_ci vm_context_id, ev_payload->context_id, event_data->event); 1538c2ecf20Sopenharmony_ci 1548c2ecf20Sopenharmony_ci vm_context_id = ev_payload->context_id; 1558c2ecf20Sopenharmony_ci} 1568c2ecf20Sopenharmony_ci 1578c2ecf20Sopenharmony_ci/* 1588c2ecf20Sopenharmony_ci * Verify that the host supports the hypercalls we need. If it does not, 1598c2ecf20Sopenharmony_ci * try to find fallback hypercalls and use those instead. Returns 1608c2ecf20Sopenharmony_ci * true if required hypercalls (or fallback hypercalls) are 1618c2ecf20Sopenharmony_ci * supported by the host, false otherwise. 1628c2ecf20Sopenharmony_ci */ 1638c2ecf20Sopenharmony_cistatic int vmci_check_host_caps(struct pci_dev *pdev) 1648c2ecf20Sopenharmony_ci{ 1658c2ecf20Sopenharmony_ci bool result; 1668c2ecf20Sopenharmony_ci struct vmci_resource_query_msg *msg; 1678c2ecf20Sopenharmony_ci u32 msg_size = sizeof(struct vmci_resource_query_hdr) + 1688c2ecf20Sopenharmony_ci VMCI_UTIL_NUM_RESOURCES * sizeof(u32); 1698c2ecf20Sopenharmony_ci struct vmci_datagram *check_msg; 1708c2ecf20Sopenharmony_ci 1718c2ecf20Sopenharmony_ci check_msg = kzalloc(msg_size, GFP_KERNEL); 1728c2ecf20Sopenharmony_ci if (!check_msg) { 1738c2ecf20Sopenharmony_ci dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); 1748c2ecf20Sopenharmony_ci return -ENOMEM; 1758c2ecf20Sopenharmony_ci } 1768c2ecf20Sopenharmony_ci 1778c2ecf20Sopenharmony_ci check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1788c2ecf20Sopenharmony_ci VMCI_RESOURCES_QUERY); 1798c2ecf20Sopenharmony_ci check_msg->src = VMCI_ANON_SRC_HANDLE; 1808c2ecf20Sopenharmony_ci check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE; 1818c2ecf20Sopenharmony_ci msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg); 1828c2ecf20Sopenharmony_ci 1838c2ecf20Sopenharmony_ci msg->num_resources = VMCI_UTIL_NUM_RESOURCES; 1848c2ecf20Sopenharmony_ci msg->resources[0] = VMCI_GET_CONTEXT_ID; 1858c2ecf20Sopenharmony_ci 1868c2ecf20Sopenharmony_ci /* Checks that hyper calls are supported */ 1878c2ecf20Sopenharmony_ci result = vmci_send_datagram(check_msg) == 0x01; 1888c2ecf20Sopenharmony_ci kfree(check_msg); 1898c2ecf20Sopenharmony_ci 1908c2ecf20Sopenharmony_ci dev_dbg(&pdev->dev, "%s: Host capability check: %s\n", 1918c2ecf20Sopenharmony_ci __func__, result ? "PASSED" : "FAILED"); 1928c2ecf20Sopenharmony_ci 1938c2ecf20Sopenharmony_ci /* We need the vector. There are no fallbacks. */ 1948c2ecf20Sopenharmony_ci return result ? 0 : -ENXIO; 1958c2ecf20Sopenharmony_ci} 1968c2ecf20Sopenharmony_ci 1978c2ecf20Sopenharmony_ci/* 1988c2ecf20Sopenharmony_ci * Reads datagrams from the data in port and dispatches them. We 1998c2ecf20Sopenharmony_ci * always start reading datagrams into only the first page of the 2008c2ecf20Sopenharmony_ci * datagram buffer. If the datagrams don't fit into one page, we 2018c2ecf20Sopenharmony_ci * use the maximum datagram buffer size for the remainder of the 2028c2ecf20Sopenharmony_ci * invocation. This is a simple heuristic for not penalizing 2038c2ecf20Sopenharmony_ci * small datagrams. 2048c2ecf20Sopenharmony_ci * 2058c2ecf20Sopenharmony_ci * This function assumes that it has exclusive access to the data 2068c2ecf20Sopenharmony_ci * in port for the duration of the call. 2078c2ecf20Sopenharmony_ci */ 2088c2ecf20Sopenharmony_cistatic void vmci_dispatch_dgs(unsigned long data) 2098c2ecf20Sopenharmony_ci{ 2108c2ecf20Sopenharmony_ci struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data; 2118c2ecf20Sopenharmony_ci u8 *dg_in_buffer = vmci_dev->data_buffer; 2128c2ecf20Sopenharmony_ci struct vmci_datagram *dg; 2138c2ecf20Sopenharmony_ci size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE; 2148c2ecf20Sopenharmony_ci size_t current_dg_in_buffer_size = PAGE_SIZE; 2158c2ecf20Sopenharmony_ci size_t remaining_bytes; 2168c2ecf20Sopenharmony_ci 2178c2ecf20Sopenharmony_ci BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE); 2188c2ecf20Sopenharmony_ci 2198c2ecf20Sopenharmony_ci ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, 2208c2ecf20Sopenharmony_ci vmci_dev->data_buffer, current_dg_in_buffer_size); 2218c2ecf20Sopenharmony_ci dg = (struct vmci_datagram *)dg_in_buffer; 2228c2ecf20Sopenharmony_ci remaining_bytes = current_dg_in_buffer_size; 2238c2ecf20Sopenharmony_ci 2248c2ecf20Sopenharmony_ci while (dg->dst.resource != VMCI_INVALID_ID || 2258c2ecf20Sopenharmony_ci remaining_bytes > PAGE_SIZE) { 2268c2ecf20Sopenharmony_ci unsigned dg_in_size; 2278c2ecf20Sopenharmony_ci 2288c2ecf20Sopenharmony_ci /* 2298c2ecf20Sopenharmony_ci * When the input buffer spans multiple pages, a datagram can 2308c2ecf20Sopenharmony_ci * start on any page boundary in the buffer. 2318c2ecf20Sopenharmony_ci */ 2328c2ecf20Sopenharmony_ci if (dg->dst.resource == VMCI_INVALID_ID) { 2338c2ecf20Sopenharmony_ci dg = (struct vmci_datagram *)roundup( 2348c2ecf20Sopenharmony_ci (uintptr_t)dg + 1, PAGE_SIZE); 2358c2ecf20Sopenharmony_ci remaining_bytes = 2368c2ecf20Sopenharmony_ci (size_t)(dg_in_buffer + 2378c2ecf20Sopenharmony_ci current_dg_in_buffer_size - 2388c2ecf20Sopenharmony_ci (u8 *)dg); 2398c2ecf20Sopenharmony_ci continue; 2408c2ecf20Sopenharmony_ci } 2418c2ecf20Sopenharmony_ci 2428c2ecf20Sopenharmony_ci dg_in_size = VMCI_DG_SIZE_ALIGNED(dg); 2438c2ecf20Sopenharmony_ci 2448c2ecf20Sopenharmony_ci if (dg_in_size <= dg_in_buffer_size) { 2458c2ecf20Sopenharmony_ci int result; 2468c2ecf20Sopenharmony_ci 2478c2ecf20Sopenharmony_ci /* 2488c2ecf20Sopenharmony_ci * If the remaining bytes in the datagram 2498c2ecf20Sopenharmony_ci * buffer doesn't contain the complete 2508c2ecf20Sopenharmony_ci * datagram, we first make sure we have enough 2518c2ecf20Sopenharmony_ci * room for it and then we read the reminder 2528c2ecf20Sopenharmony_ci * of the datagram and possibly any following 2538c2ecf20Sopenharmony_ci * datagrams. 2548c2ecf20Sopenharmony_ci */ 2558c2ecf20Sopenharmony_ci if (dg_in_size > remaining_bytes) { 2568c2ecf20Sopenharmony_ci if (remaining_bytes != 2578c2ecf20Sopenharmony_ci current_dg_in_buffer_size) { 2588c2ecf20Sopenharmony_ci 2598c2ecf20Sopenharmony_ci /* 2608c2ecf20Sopenharmony_ci * We move the partial 2618c2ecf20Sopenharmony_ci * datagram to the front and 2628c2ecf20Sopenharmony_ci * read the reminder of the 2638c2ecf20Sopenharmony_ci * datagram and possibly 2648c2ecf20Sopenharmony_ci * following calls into the 2658c2ecf20Sopenharmony_ci * following bytes. 2668c2ecf20Sopenharmony_ci */ 2678c2ecf20Sopenharmony_ci memmove(dg_in_buffer, dg_in_buffer + 2688c2ecf20Sopenharmony_ci current_dg_in_buffer_size - 2698c2ecf20Sopenharmony_ci remaining_bytes, 2708c2ecf20Sopenharmony_ci remaining_bytes); 2718c2ecf20Sopenharmony_ci dg = (struct vmci_datagram *) 2728c2ecf20Sopenharmony_ci dg_in_buffer; 2738c2ecf20Sopenharmony_ci } 2748c2ecf20Sopenharmony_ci 2758c2ecf20Sopenharmony_ci if (current_dg_in_buffer_size != 2768c2ecf20Sopenharmony_ci dg_in_buffer_size) 2778c2ecf20Sopenharmony_ci current_dg_in_buffer_size = 2788c2ecf20Sopenharmony_ci dg_in_buffer_size; 2798c2ecf20Sopenharmony_ci 2808c2ecf20Sopenharmony_ci ioread8_rep(vmci_dev->iobase + 2818c2ecf20Sopenharmony_ci VMCI_DATA_IN_ADDR, 2828c2ecf20Sopenharmony_ci vmci_dev->data_buffer + 2838c2ecf20Sopenharmony_ci remaining_bytes, 2848c2ecf20Sopenharmony_ci current_dg_in_buffer_size - 2858c2ecf20Sopenharmony_ci remaining_bytes); 2868c2ecf20Sopenharmony_ci } 2878c2ecf20Sopenharmony_ci 2888c2ecf20Sopenharmony_ci /* 2898c2ecf20Sopenharmony_ci * We special case event datagrams from the 2908c2ecf20Sopenharmony_ci * hypervisor. 2918c2ecf20Sopenharmony_ci */ 2928c2ecf20Sopenharmony_ci if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && 2938c2ecf20Sopenharmony_ci dg->dst.resource == VMCI_EVENT_HANDLER) { 2948c2ecf20Sopenharmony_ci result = vmci_event_dispatch(dg); 2958c2ecf20Sopenharmony_ci } else { 2968c2ecf20Sopenharmony_ci result = vmci_datagram_invoke_guest_handler(dg); 2978c2ecf20Sopenharmony_ci } 2988c2ecf20Sopenharmony_ci if (result < VMCI_SUCCESS) 2998c2ecf20Sopenharmony_ci dev_dbg(vmci_dev->dev, 3008c2ecf20Sopenharmony_ci "Datagram with resource (ID=0x%x) failed (err=%d)\n", 3018c2ecf20Sopenharmony_ci dg->dst.resource, result); 3028c2ecf20Sopenharmony_ci 3038c2ecf20Sopenharmony_ci /* On to the next datagram. */ 3048c2ecf20Sopenharmony_ci dg = (struct vmci_datagram *)((u8 *)dg + 3058c2ecf20Sopenharmony_ci dg_in_size); 3068c2ecf20Sopenharmony_ci } else { 3078c2ecf20Sopenharmony_ci size_t bytes_to_skip; 3088c2ecf20Sopenharmony_ci 3098c2ecf20Sopenharmony_ci /* 3108c2ecf20Sopenharmony_ci * Datagram doesn't fit in datagram buffer of maximal 3118c2ecf20Sopenharmony_ci * size. We drop it. 3128c2ecf20Sopenharmony_ci */ 3138c2ecf20Sopenharmony_ci dev_dbg(vmci_dev->dev, 3148c2ecf20Sopenharmony_ci "Failed to receive datagram (size=%u bytes)\n", 3158c2ecf20Sopenharmony_ci dg_in_size); 3168c2ecf20Sopenharmony_ci 3178c2ecf20Sopenharmony_ci bytes_to_skip = dg_in_size - remaining_bytes; 3188c2ecf20Sopenharmony_ci if (current_dg_in_buffer_size != dg_in_buffer_size) 3198c2ecf20Sopenharmony_ci current_dg_in_buffer_size = dg_in_buffer_size; 3208c2ecf20Sopenharmony_ci 3218c2ecf20Sopenharmony_ci for (;;) { 3228c2ecf20Sopenharmony_ci ioread8_rep(vmci_dev->iobase + 3238c2ecf20Sopenharmony_ci VMCI_DATA_IN_ADDR, 3248c2ecf20Sopenharmony_ci vmci_dev->data_buffer, 3258c2ecf20Sopenharmony_ci current_dg_in_buffer_size); 3268c2ecf20Sopenharmony_ci if (bytes_to_skip <= current_dg_in_buffer_size) 3278c2ecf20Sopenharmony_ci break; 3288c2ecf20Sopenharmony_ci 3298c2ecf20Sopenharmony_ci bytes_to_skip -= current_dg_in_buffer_size; 3308c2ecf20Sopenharmony_ci } 3318c2ecf20Sopenharmony_ci dg = (struct vmci_datagram *)(dg_in_buffer + 3328c2ecf20Sopenharmony_ci bytes_to_skip); 3338c2ecf20Sopenharmony_ci } 3348c2ecf20Sopenharmony_ci 3358c2ecf20Sopenharmony_ci remaining_bytes = 3368c2ecf20Sopenharmony_ci (size_t) (dg_in_buffer + current_dg_in_buffer_size - 3378c2ecf20Sopenharmony_ci (u8 *)dg); 3388c2ecf20Sopenharmony_ci 3398c2ecf20Sopenharmony_ci if (remaining_bytes < VMCI_DG_HEADERSIZE) { 3408c2ecf20Sopenharmony_ci /* Get the next batch of datagrams. */ 3418c2ecf20Sopenharmony_ci 3428c2ecf20Sopenharmony_ci ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, 3438c2ecf20Sopenharmony_ci vmci_dev->data_buffer, 3448c2ecf20Sopenharmony_ci current_dg_in_buffer_size); 3458c2ecf20Sopenharmony_ci dg = (struct vmci_datagram *)dg_in_buffer; 3468c2ecf20Sopenharmony_ci remaining_bytes = current_dg_in_buffer_size; 3478c2ecf20Sopenharmony_ci } 3488c2ecf20Sopenharmony_ci } 3498c2ecf20Sopenharmony_ci} 3508c2ecf20Sopenharmony_ci 3518c2ecf20Sopenharmony_ci/* 3528c2ecf20Sopenharmony_ci * Scans the notification bitmap for raised flags, clears them 3538c2ecf20Sopenharmony_ci * and handles the notifications. 3548c2ecf20Sopenharmony_ci */ 3558c2ecf20Sopenharmony_cistatic void vmci_process_bitmap(unsigned long data) 3568c2ecf20Sopenharmony_ci{ 3578c2ecf20Sopenharmony_ci struct vmci_guest_device *dev = (struct vmci_guest_device *)data; 3588c2ecf20Sopenharmony_ci 3598c2ecf20Sopenharmony_ci if (!dev->notification_bitmap) { 3608c2ecf20Sopenharmony_ci dev_dbg(dev->dev, "No bitmap present in %s\n", __func__); 3618c2ecf20Sopenharmony_ci return; 3628c2ecf20Sopenharmony_ci } 3638c2ecf20Sopenharmony_ci 3648c2ecf20Sopenharmony_ci vmci_dbell_scan_notification_entries(dev->notification_bitmap); 3658c2ecf20Sopenharmony_ci} 3668c2ecf20Sopenharmony_ci 3678c2ecf20Sopenharmony_ci/* 3688c2ecf20Sopenharmony_ci * Interrupt handler for legacy or MSI interrupt, or for first MSI-X 3698c2ecf20Sopenharmony_ci * interrupt (vector VMCI_INTR_DATAGRAM). 3708c2ecf20Sopenharmony_ci */ 3718c2ecf20Sopenharmony_cistatic irqreturn_t vmci_interrupt(int irq, void *_dev) 3728c2ecf20Sopenharmony_ci{ 3738c2ecf20Sopenharmony_ci struct vmci_guest_device *dev = _dev; 3748c2ecf20Sopenharmony_ci 3758c2ecf20Sopenharmony_ci /* 3768c2ecf20Sopenharmony_ci * If we are using MSI-X with exclusive vectors then we simply schedule 3778c2ecf20Sopenharmony_ci * the datagram tasklet, since we know the interrupt was meant for us. 3788c2ecf20Sopenharmony_ci * Otherwise we must read the ICR to determine what to do. 3798c2ecf20Sopenharmony_ci */ 3808c2ecf20Sopenharmony_ci 3818c2ecf20Sopenharmony_ci if (dev->exclusive_vectors) { 3828c2ecf20Sopenharmony_ci tasklet_schedule(&dev->datagram_tasklet); 3838c2ecf20Sopenharmony_ci } else { 3848c2ecf20Sopenharmony_ci unsigned int icr; 3858c2ecf20Sopenharmony_ci 3868c2ecf20Sopenharmony_ci /* Acknowledge interrupt and determine what needs doing. */ 3878c2ecf20Sopenharmony_ci icr = ioread32(dev->iobase + VMCI_ICR_ADDR); 3888c2ecf20Sopenharmony_ci if (icr == 0 || icr == ~0) 3898c2ecf20Sopenharmony_ci return IRQ_NONE; 3908c2ecf20Sopenharmony_ci 3918c2ecf20Sopenharmony_ci if (icr & VMCI_ICR_DATAGRAM) { 3928c2ecf20Sopenharmony_ci tasklet_schedule(&dev->datagram_tasklet); 3938c2ecf20Sopenharmony_ci icr &= ~VMCI_ICR_DATAGRAM; 3948c2ecf20Sopenharmony_ci } 3958c2ecf20Sopenharmony_ci 3968c2ecf20Sopenharmony_ci if (icr & VMCI_ICR_NOTIFICATION) { 3978c2ecf20Sopenharmony_ci tasklet_schedule(&dev->bm_tasklet); 3988c2ecf20Sopenharmony_ci icr &= ~VMCI_ICR_NOTIFICATION; 3998c2ecf20Sopenharmony_ci } 4008c2ecf20Sopenharmony_ci 4018c2ecf20Sopenharmony_ci if (icr != 0) 4028c2ecf20Sopenharmony_ci dev_warn(dev->dev, 4038c2ecf20Sopenharmony_ci "Ignoring unknown interrupt cause (%d)\n", 4048c2ecf20Sopenharmony_ci icr); 4058c2ecf20Sopenharmony_ci } 4068c2ecf20Sopenharmony_ci 4078c2ecf20Sopenharmony_ci return IRQ_HANDLED; 4088c2ecf20Sopenharmony_ci} 4098c2ecf20Sopenharmony_ci 4108c2ecf20Sopenharmony_ci/* 4118c2ecf20Sopenharmony_ci * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION, 4128c2ecf20Sopenharmony_ci * which is for the notification bitmap. Will only get called if we are 4138c2ecf20Sopenharmony_ci * using MSI-X with exclusive vectors. 4148c2ecf20Sopenharmony_ci */ 4158c2ecf20Sopenharmony_cistatic irqreturn_t vmci_interrupt_bm(int irq, void *_dev) 4168c2ecf20Sopenharmony_ci{ 4178c2ecf20Sopenharmony_ci struct vmci_guest_device *dev = _dev; 4188c2ecf20Sopenharmony_ci 4198c2ecf20Sopenharmony_ci /* For MSI-X we can just assume it was meant for us. */ 4208c2ecf20Sopenharmony_ci tasklet_schedule(&dev->bm_tasklet); 4218c2ecf20Sopenharmony_ci 4228c2ecf20Sopenharmony_ci return IRQ_HANDLED; 4238c2ecf20Sopenharmony_ci} 4248c2ecf20Sopenharmony_ci 4258c2ecf20Sopenharmony_ci/* 4268c2ecf20Sopenharmony_ci * Most of the initialization at module load time is done here. 4278c2ecf20Sopenharmony_ci */ 4288c2ecf20Sopenharmony_cistatic int vmci_guest_probe_device(struct pci_dev *pdev, 4298c2ecf20Sopenharmony_ci const struct pci_device_id *id) 4308c2ecf20Sopenharmony_ci{ 4318c2ecf20Sopenharmony_ci struct vmci_guest_device *vmci_dev; 4328c2ecf20Sopenharmony_ci void __iomem *iobase; 4338c2ecf20Sopenharmony_ci unsigned int capabilities; 4348c2ecf20Sopenharmony_ci unsigned int caps_in_use; 4358c2ecf20Sopenharmony_ci unsigned long cmd; 4368c2ecf20Sopenharmony_ci int vmci_err; 4378c2ecf20Sopenharmony_ci int error; 4388c2ecf20Sopenharmony_ci 4398c2ecf20Sopenharmony_ci dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n"); 4408c2ecf20Sopenharmony_ci 4418c2ecf20Sopenharmony_ci error = pcim_enable_device(pdev); 4428c2ecf20Sopenharmony_ci if (error) { 4438c2ecf20Sopenharmony_ci dev_err(&pdev->dev, 4448c2ecf20Sopenharmony_ci "Failed to enable VMCI device: %d\n", error); 4458c2ecf20Sopenharmony_ci return error; 4468c2ecf20Sopenharmony_ci } 4478c2ecf20Sopenharmony_ci 4488c2ecf20Sopenharmony_ci error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME); 4498c2ecf20Sopenharmony_ci if (error) { 4508c2ecf20Sopenharmony_ci dev_err(&pdev->dev, "Failed to reserve/map IO regions\n"); 4518c2ecf20Sopenharmony_ci return error; 4528c2ecf20Sopenharmony_ci } 4538c2ecf20Sopenharmony_ci 4548c2ecf20Sopenharmony_ci iobase = pcim_iomap_table(pdev)[0]; 4558c2ecf20Sopenharmony_ci 4568c2ecf20Sopenharmony_ci dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n", 4578c2ecf20Sopenharmony_ci (unsigned long)iobase, pdev->irq); 4588c2ecf20Sopenharmony_ci 4598c2ecf20Sopenharmony_ci vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL); 4608c2ecf20Sopenharmony_ci if (!vmci_dev) { 4618c2ecf20Sopenharmony_ci dev_err(&pdev->dev, 4628c2ecf20Sopenharmony_ci "Can't allocate memory for VMCI device\n"); 4638c2ecf20Sopenharmony_ci return -ENOMEM; 4648c2ecf20Sopenharmony_ci } 4658c2ecf20Sopenharmony_ci 4668c2ecf20Sopenharmony_ci vmci_dev->dev = &pdev->dev; 4678c2ecf20Sopenharmony_ci vmci_dev->exclusive_vectors = false; 4688c2ecf20Sopenharmony_ci vmci_dev->iobase = iobase; 4698c2ecf20Sopenharmony_ci 4708c2ecf20Sopenharmony_ci tasklet_init(&vmci_dev->datagram_tasklet, 4718c2ecf20Sopenharmony_ci vmci_dispatch_dgs, (unsigned long)vmci_dev); 4728c2ecf20Sopenharmony_ci tasklet_init(&vmci_dev->bm_tasklet, 4738c2ecf20Sopenharmony_ci vmci_process_bitmap, (unsigned long)vmci_dev); 4748c2ecf20Sopenharmony_ci 4758c2ecf20Sopenharmony_ci vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE); 4768c2ecf20Sopenharmony_ci if (!vmci_dev->data_buffer) { 4778c2ecf20Sopenharmony_ci dev_err(&pdev->dev, 4788c2ecf20Sopenharmony_ci "Can't allocate memory for datagram buffer\n"); 4798c2ecf20Sopenharmony_ci return -ENOMEM; 4808c2ecf20Sopenharmony_ci } 4818c2ecf20Sopenharmony_ci 4828c2ecf20Sopenharmony_ci pci_set_master(pdev); /* To enable queue_pair functionality. */ 4838c2ecf20Sopenharmony_ci 4848c2ecf20Sopenharmony_ci /* 4858c2ecf20Sopenharmony_ci * Verify that the VMCI Device supports the capabilities that 4868c2ecf20Sopenharmony_ci * we need. If the device is missing capabilities that we would 4878c2ecf20Sopenharmony_ci * like to use, check for fallback capabilities and use those 4888c2ecf20Sopenharmony_ci * instead (so we can run a new VM on old hosts). Fail the load if 4898c2ecf20Sopenharmony_ci * a required capability is missing and there is no fallback. 4908c2ecf20Sopenharmony_ci * 4918c2ecf20Sopenharmony_ci * Right now, we need datagrams. There are no fallbacks. 4928c2ecf20Sopenharmony_ci */ 4938c2ecf20Sopenharmony_ci capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR); 4948c2ecf20Sopenharmony_ci if (!(capabilities & VMCI_CAPS_DATAGRAM)) { 4958c2ecf20Sopenharmony_ci dev_err(&pdev->dev, "Device does not support datagrams\n"); 4968c2ecf20Sopenharmony_ci error = -ENXIO; 4978c2ecf20Sopenharmony_ci goto err_free_data_buffer; 4988c2ecf20Sopenharmony_ci } 4998c2ecf20Sopenharmony_ci caps_in_use = VMCI_CAPS_DATAGRAM; 5008c2ecf20Sopenharmony_ci 5018c2ecf20Sopenharmony_ci /* 5028c2ecf20Sopenharmony_ci * Use 64-bit PPNs if the device supports. 5038c2ecf20Sopenharmony_ci * 5048c2ecf20Sopenharmony_ci * There is no check for the return value of dma_set_mask_and_coherent 5058c2ecf20Sopenharmony_ci * since this driver can handle the default mask values if 5068c2ecf20Sopenharmony_ci * dma_set_mask_and_coherent fails. 5078c2ecf20Sopenharmony_ci */ 5088c2ecf20Sopenharmony_ci if (capabilities & VMCI_CAPS_PPN64) { 5098c2ecf20Sopenharmony_ci dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 5108c2ecf20Sopenharmony_ci use_ppn64 = true; 5118c2ecf20Sopenharmony_ci caps_in_use |= VMCI_CAPS_PPN64; 5128c2ecf20Sopenharmony_ci } else { 5138c2ecf20Sopenharmony_ci dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); 5148c2ecf20Sopenharmony_ci use_ppn64 = false; 5158c2ecf20Sopenharmony_ci } 5168c2ecf20Sopenharmony_ci 5178c2ecf20Sopenharmony_ci /* 5188c2ecf20Sopenharmony_ci * If the hardware supports notifications, we will use that as 5198c2ecf20Sopenharmony_ci * well. 5208c2ecf20Sopenharmony_ci */ 5218c2ecf20Sopenharmony_ci if (capabilities & VMCI_CAPS_NOTIFICATIONS) { 5228c2ecf20Sopenharmony_ci vmci_dev->notification_bitmap = dma_alloc_coherent( 5238c2ecf20Sopenharmony_ci &pdev->dev, PAGE_SIZE, &vmci_dev->notification_base, 5248c2ecf20Sopenharmony_ci GFP_KERNEL); 5258c2ecf20Sopenharmony_ci if (!vmci_dev->notification_bitmap) { 5268c2ecf20Sopenharmony_ci dev_warn(&pdev->dev, 5278c2ecf20Sopenharmony_ci "Unable to allocate notification bitmap\n"); 5288c2ecf20Sopenharmony_ci } else { 5298c2ecf20Sopenharmony_ci memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE); 5308c2ecf20Sopenharmony_ci caps_in_use |= VMCI_CAPS_NOTIFICATIONS; 5318c2ecf20Sopenharmony_ci } 5328c2ecf20Sopenharmony_ci } 5338c2ecf20Sopenharmony_ci 5348c2ecf20Sopenharmony_ci dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use); 5358c2ecf20Sopenharmony_ci 5368c2ecf20Sopenharmony_ci /* Let the host know which capabilities we intend to use. */ 5378c2ecf20Sopenharmony_ci iowrite32(caps_in_use, vmci_dev->iobase + VMCI_CAPS_ADDR); 5388c2ecf20Sopenharmony_ci 5398c2ecf20Sopenharmony_ci /* Set up global device so that we can start sending datagrams */ 5408c2ecf20Sopenharmony_ci spin_lock_irq(&vmci_dev_spinlock); 5418c2ecf20Sopenharmony_ci vmci_dev_g = vmci_dev; 5428c2ecf20Sopenharmony_ci vmci_pdev = pdev; 5438c2ecf20Sopenharmony_ci spin_unlock_irq(&vmci_dev_spinlock); 5448c2ecf20Sopenharmony_ci 5458c2ecf20Sopenharmony_ci /* 5468c2ecf20Sopenharmony_ci * Register notification bitmap with device if that capability is 5478c2ecf20Sopenharmony_ci * used. 5488c2ecf20Sopenharmony_ci */ 5498c2ecf20Sopenharmony_ci if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) { 5508c2ecf20Sopenharmony_ci unsigned long bitmap_ppn = 5518c2ecf20Sopenharmony_ci vmci_dev->notification_base >> PAGE_SHIFT; 5528c2ecf20Sopenharmony_ci if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { 5538c2ecf20Sopenharmony_ci dev_warn(&pdev->dev, 5548c2ecf20Sopenharmony_ci "VMCI device unable to register notification bitmap with PPN 0x%lx\n", 5558c2ecf20Sopenharmony_ci bitmap_ppn); 5568c2ecf20Sopenharmony_ci error = -ENXIO; 5578c2ecf20Sopenharmony_ci goto err_remove_vmci_dev_g; 5588c2ecf20Sopenharmony_ci } 5598c2ecf20Sopenharmony_ci } 5608c2ecf20Sopenharmony_ci 5618c2ecf20Sopenharmony_ci /* Check host capabilities. */ 5628c2ecf20Sopenharmony_ci error = vmci_check_host_caps(pdev); 5638c2ecf20Sopenharmony_ci if (error) 5648c2ecf20Sopenharmony_ci goto err_remove_bitmap; 5658c2ecf20Sopenharmony_ci 5668c2ecf20Sopenharmony_ci /* Enable device. */ 5678c2ecf20Sopenharmony_ci 5688c2ecf20Sopenharmony_ci /* 5698c2ecf20Sopenharmony_ci * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can 5708c2ecf20Sopenharmony_ci * update the internal context id when needed. 5718c2ecf20Sopenharmony_ci */ 5728c2ecf20Sopenharmony_ci vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE, 5738c2ecf20Sopenharmony_ci vmci_guest_cid_update, NULL, 5748c2ecf20Sopenharmony_ci &ctx_update_sub_id); 5758c2ecf20Sopenharmony_ci if (vmci_err < VMCI_SUCCESS) 5768c2ecf20Sopenharmony_ci dev_warn(&pdev->dev, 5778c2ecf20Sopenharmony_ci "Failed to subscribe to event (type=%d): %d\n", 5788c2ecf20Sopenharmony_ci VMCI_EVENT_CTX_ID_UPDATE, vmci_err); 5798c2ecf20Sopenharmony_ci 5808c2ecf20Sopenharmony_ci /* 5818c2ecf20Sopenharmony_ci * Enable interrupts. Try MSI-X first, then MSI, and then fallback on 5828c2ecf20Sopenharmony_ci * legacy interrupts. 5838c2ecf20Sopenharmony_ci */ 5848c2ecf20Sopenharmony_ci error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, 5858c2ecf20Sopenharmony_ci PCI_IRQ_MSIX); 5868c2ecf20Sopenharmony_ci if (error < 0) { 5878c2ecf20Sopenharmony_ci error = pci_alloc_irq_vectors(pdev, 1, 1, 5888c2ecf20Sopenharmony_ci PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); 5898c2ecf20Sopenharmony_ci if (error < 0) 5908c2ecf20Sopenharmony_ci goto err_remove_bitmap; 5918c2ecf20Sopenharmony_ci } else { 5928c2ecf20Sopenharmony_ci vmci_dev->exclusive_vectors = true; 5938c2ecf20Sopenharmony_ci } 5948c2ecf20Sopenharmony_ci 5958c2ecf20Sopenharmony_ci /* 5968c2ecf20Sopenharmony_ci * Request IRQ for legacy or MSI interrupts, or for first 5978c2ecf20Sopenharmony_ci * MSI-X vector. 5988c2ecf20Sopenharmony_ci */ 5998c2ecf20Sopenharmony_ci error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt, 6008c2ecf20Sopenharmony_ci IRQF_SHARED, KBUILD_MODNAME, vmci_dev); 6018c2ecf20Sopenharmony_ci if (error) { 6028c2ecf20Sopenharmony_ci dev_err(&pdev->dev, "Irq %u in use: %d\n", 6038c2ecf20Sopenharmony_ci pci_irq_vector(pdev, 0), error); 6048c2ecf20Sopenharmony_ci goto err_disable_msi; 6058c2ecf20Sopenharmony_ci } 6068c2ecf20Sopenharmony_ci 6078c2ecf20Sopenharmony_ci /* 6088c2ecf20Sopenharmony_ci * For MSI-X with exclusive vectors we need to request an 6098c2ecf20Sopenharmony_ci * interrupt for each vector so that we get a separate 6108c2ecf20Sopenharmony_ci * interrupt handler routine. This allows us to distinguish 6118c2ecf20Sopenharmony_ci * between the vectors. 6128c2ecf20Sopenharmony_ci */ 6138c2ecf20Sopenharmony_ci if (vmci_dev->exclusive_vectors) { 6148c2ecf20Sopenharmony_ci error = request_irq(pci_irq_vector(pdev, 1), 6158c2ecf20Sopenharmony_ci vmci_interrupt_bm, 0, KBUILD_MODNAME, 6168c2ecf20Sopenharmony_ci vmci_dev); 6178c2ecf20Sopenharmony_ci if (error) { 6188c2ecf20Sopenharmony_ci dev_err(&pdev->dev, 6198c2ecf20Sopenharmony_ci "Failed to allocate irq %u: %d\n", 6208c2ecf20Sopenharmony_ci pci_irq_vector(pdev, 1), error); 6218c2ecf20Sopenharmony_ci goto err_free_irq; 6228c2ecf20Sopenharmony_ci } 6238c2ecf20Sopenharmony_ci } 6248c2ecf20Sopenharmony_ci 6258c2ecf20Sopenharmony_ci dev_dbg(&pdev->dev, "Registered device\n"); 6268c2ecf20Sopenharmony_ci 6278c2ecf20Sopenharmony_ci atomic_inc(&vmci_num_guest_devices); 6288c2ecf20Sopenharmony_ci 6298c2ecf20Sopenharmony_ci /* Enable specific interrupt bits. */ 6308c2ecf20Sopenharmony_ci cmd = VMCI_IMR_DATAGRAM; 6318c2ecf20Sopenharmony_ci if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) 6328c2ecf20Sopenharmony_ci cmd |= VMCI_IMR_NOTIFICATION; 6338c2ecf20Sopenharmony_ci iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR); 6348c2ecf20Sopenharmony_ci 6358c2ecf20Sopenharmony_ci /* Enable interrupts. */ 6368c2ecf20Sopenharmony_ci iowrite32(VMCI_CONTROL_INT_ENABLE, 6378c2ecf20Sopenharmony_ci vmci_dev->iobase + VMCI_CONTROL_ADDR); 6388c2ecf20Sopenharmony_ci 6398c2ecf20Sopenharmony_ci pci_set_drvdata(pdev, vmci_dev); 6408c2ecf20Sopenharmony_ci 6418c2ecf20Sopenharmony_ci vmci_call_vsock_callback(false); 6428c2ecf20Sopenharmony_ci return 0; 6438c2ecf20Sopenharmony_ci 6448c2ecf20Sopenharmony_cierr_free_irq: 6458c2ecf20Sopenharmony_ci free_irq(pci_irq_vector(pdev, 0), vmci_dev); 6468c2ecf20Sopenharmony_ci tasklet_kill(&vmci_dev->datagram_tasklet); 6478c2ecf20Sopenharmony_ci tasklet_kill(&vmci_dev->bm_tasklet); 6488c2ecf20Sopenharmony_ci 6498c2ecf20Sopenharmony_cierr_disable_msi: 6508c2ecf20Sopenharmony_ci pci_free_irq_vectors(pdev); 6518c2ecf20Sopenharmony_ci 6528c2ecf20Sopenharmony_ci vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); 6538c2ecf20Sopenharmony_ci if (vmci_err < VMCI_SUCCESS) 6548c2ecf20Sopenharmony_ci dev_warn(&pdev->dev, 6558c2ecf20Sopenharmony_ci "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", 6568c2ecf20Sopenharmony_ci VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); 6578c2ecf20Sopenharmony_ci 6588c2ecf20Sopenharmony_cierr_remove_bitmap: 6598c2ecf20Sopenharmony_ci if (vmci_dev->notification_bitmap) { 6608c2ecf20Sopenharmony_ci iowrite32(VMCI_CONTROL_RESET, 6618c2ecf20Sopenharmony_ci vmci_dev->iobase + VMCI_CONTROL_ADDR); 6628c2ecf20Sopenharmony_ci dma_free_coherent(&pdev->dev, PAGE_SIZE, 6638c2ecf20Sopenharmony_ci vmci_dev->notification_bitmap, 6648c2ecf20Sopenharmony_ci vmci_dev->notification_base); 6658c2ecf20Sopenharmony_ci } 6668c2ecf20Sopenharmony_ci 6678c2ecf20Sopenharmony_cierr_remove_vmci_dev_g: 6688c2ecf20Sopenharmony_ci spin_lock_irq(&vmci_dev_spinlock); 6698c2ecf20Sopenharmony_ci vmci_pdev = NULL; 6708c2ecf20Sopenharmony_ci vmci_dev_g = NULL; 6718c2ecf20Sopenharmony_ci spin_unlock_irq(&vmci_dev_spinlock); 6728c2ecf20Sopenharmony_ci 6738c2ecf20Sopenharmony_cierr_free_data_buffer: 6748c2ecf20Sopenharmony_ci vfree(vmci_dev->data_buffer); 6758c2ecf20Sopenharmony_ci 6768c2ecf20Sopenharmony_ci /* The rest are managed resources and will be freed by PCI core */ 6778c2ecf20Sopenharmony_ci return error; 6788c2ecf20Sopenharmony_ci} 6798c2ecf20Sopenharmony_ci 6808c2ecf20Sopenharmony_cistatic void vmci_guest_remove_device(struct pci_dev *pdev) 6818c2ecf20Sopenharmony_ci{ 6828c2ecf20Sopenharmony_ci struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev); 6838c2ecf20Sopenharmony_ci int vmci_err; 6848c2ecf20Sopenharmony_ci 6858c2ecf20Sopenharmony_ci dev_dbg(&pdev->dev, "Removing device\n"); 6868c2ecf20Sopenharmony_ci 6878c2ecf20Sopenharmony_ci atomic_dec(&vmci_num_guest_devices); 6888c2ecf20Sopenharmony_ci 6898c2ecf20Sopenharmony_ci vmci_qp_guest_endpoints_exit(); 6908c2ecf20Sopenharmony_ci 6918c2ecf20Sopenharmony_ci vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); 6928c2ecf20Sopenharmony_ci if (vmci_err < VMCI_SUCCESS) 6938c2ecf20Sopenharmony_ci dev_warn(&pdev->dev, 6948c2ecf20Sopenharmony_ci "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", 6958c2ecf20Sopenharmony_ci VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); 6968c2ecf20Sopenharmony_ci 6978c2ecf20Sopenharmony_ci spin_lock_irq(&vmci_dev_spinlock); 6988c2ecf20Sopenharmony_ci vmci_dev_g = NULL; 6998c2ecf20Sopenharmony_ci vmci_pdev = NULL; 7008c2ecf20Sopenharmony_ci spin_unlock_irq(&vmci_dev_spinlock); 7018c2ecf20Sopenharmony_ci 7028c2ecf20Sopenharmony_ci dev_dbg(&pdev->dev, "Resetting vmci device\n"); 7038c2ecf20Sopenharmony_ci iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR); 7048c2ecf20Sopenharmony_ci 7058c2ecf20Sopenharmony_ci /* 7068c2ecf20Sopenharmony_ci * Free IRQ and then disable MSI/MSI-X as appropriate. For 7078c2ecf20Sopenharmony_ci * MSI-X, we might have multiple vectors, each with their own 7088c2ecf20Sopenharmony_ci * IRQ, which we must free too. 7098c2ecf20Sopenharmony_ci */ 7108c2ecf20Sopenharmony_ci if (vmci_dev->exclusive_vectors) 7118c2ecf20Sopenharmony_ci free_irq(pci_irq_vector(pdev, 1), vmci_dev); 7128c2ecf20Sopenharmony_ci free_irq(pci_irq_vector(pdev, 0), vmci_dev); 7138c2ecf20Sopenharmony_ci pci_free_irq_vectors(pdev); 7148c2ecf20Sopenharmony_ci 7158c2ecf20Sopenharmony_ci tasklet_kill(&vmci_dev->datagram_tasklet); 7168c2ecf20Sopenharmony_ci tasklet_kill(&vmci_dev->bm_tasklet); 7178c2ecf20Sopenharmony_ci 7188c2ecf20Sopenharmony_ci if (vmci_dev->notification_bitmap) { 7198c2ecf20Sopenharmony_ci /* 7208c2ecf20Sopenharmony_ci * The device reset above cleared the bitmap state of the 7218c2ecf20Sopenharmony_ci * device, so we can safely free it here. 7228c2ecf20Sopenharmony_ci */ 7238c2ecf20Sopenharmony_ci 7248c2ecf20Sopenharmony_ci dma_free_coherent(&pdev->dev, PAGE_SIZE, 7258c2ecf20Sopenharmony_ci vmci_dev->notification_bitmap, 7268c2ecf20Sopenharmony_ci vmci_dev->notification_base); 7278c2ecf20Sopenharmony_ci } 7288c2ecf20Sopenharmony_ci 7298c2ecf20Sopenharmony_ci vfree(vmci_dev->data_buffer); 7308c2ecf20Sopenharmony_ci 7318c2ecf20Sopenharmony_ci /* The rest are managed resources and will be freed by PCI core */ 7328c2ecf20Sopenharmony_ci} 7338c2ecf20Sopenharmony_ci 7348c2ecf20Sopenharmony_cistatic const struct pci_device_id vmci_ids[] = { 7358c2ecf20Sopenharmony_ci { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), }, 7368c2ecf20Sopenharmony_ci { 0 }, 7378c2ecf20Sopenharmony_ci}; 7388c2ecf20Sopenharmony_ciMODULE_DEVICE_TABLE(pci, vmci_ids); 7398c2ecf20Sopenharmony_ci 7408c2ecf20Sopenharmony_cistatic struct pci_driver vmci_guest_driver = { 7418c2ecf20Sopenharmony_ci .name = KBUILD_MODNAME, 7428c2ecf20Sopenharmony_ci .id_table = vmci_ids, 7438c2ecf20Sopenharmony_ci .probe = vmci_guest_probe_device, 7448c2ecf20Sopenharmony_ci .remove = vmci_guest_remove_device, 7458c2ecf20Sopenharmony_ci}; 7468c2ecf20Sopenharmony_ci 7478c2ecf20Sopenharmony_ciint __init vmci_guest_init(void) 7488c2ecf20Sopenharmony_ci{ 7498c2ecf20Sopenharmony_ci return pci_register_driver(&vmci_guest_driver); 7508c2ecf20Sopenharmony_ci} 7518c2ecf20Sopenharmony_ci 7528c2ecf20Sopenharmony_civoid __exit vmci_guest_exit(void) 7538c2ecf20Sopenharmony_ci{ 7548c2ecf20Sopenharmony_ci pci_unregister_driver(&vmci_guest_driver); 7558c2ecf20Sopenharmony_ci} 756