162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * VMware VMCI Driver 462306a36Sopenharmony_ci * 562306a36Sopenharmony_ci * Copyright (C) 2012 VMware, Inc. All rights reserved. 662306a36Sopenharmony_ci */ 762306a36Sopenharmony_ci 862306a36Sopenharmony_ci#include <linux/vmw_vmci_defs.h> 962306a36Sopenharmony_ci#include <linux/vmw_vmci_api.h> 1062306a36Sopenharmony_ci#include <linux/moduleparam.h> 1162306a36Sopenharmony_ci#include <linux/interrupt.h> 1262306a36Sopenharmony_ci#include <linux/highmem.h> 1362306a36Sopenharmony_ci#include <linux/kernel.h> 1462306a36Sopenharmony_ci#include <linux/mm.h> 1562306a36Sopenharmony_ci#include <linux/module.h> 1662306a36Sopenharmony_ci#include <linux/processor.h> 1762306a36Sopenharmony_ci#include <linux/sched.h> 1862306a36Sopenharmony_ci#include <linux/slab.h> 1962306a36Sopenharmony_ci#include <linux/init.h> 2062306a36Sopenharmony_ci#include <linux/pci.h> 2162306a36Sopenharmony_ci#include <linux/smp.h> 2262306a36Sopenharmony_ci#include <linux/io.h> 2362306a36Sopenharmony_ci#include <linux/vmalloc.h> 2462306a36Sopenharmony_ci 2562306a36Sopenharmony_ci#include "vmci_datagram.h" 2662306a36Sopenharmony_ci#include "vmci_doorbell.h" 2762306a36Sopenharmony_ci#include "vmci_context.h" 2862306a36Sopenharmony_ci#include "vmci_driver.h" 2962306a36Sopenharmony_ci#include "vmci_event.h" 3062306a36Sopenharmony_ci 3162306a36Sopenharmony_ci#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740 3262306a36Sopenharmony_ci 3362306a36Sopenharmony_ci#define VMCI_UTIL_NUM_RESOURCES 1 3462306a36Sopenharmony_ci 3562306a36Sopenharmony_ci/* 3662306a36Sopenharmony_ci * Datagram buffers for DMA send/receive must accommodate at least 3762306a36Sopenharmony_ci * a maximum sized datagram and the header. 3862306a36Sopenharmony_ci */ 3962306a36Sopenharmony_ci#define VMCI_DMA_DG_BUFFER_SIZE (VMCI_MAX_DG_SIZE + PAGE_SIZE) 4062306a36Sopenharmony_ci 4162306a36Sopenharmony_cistatic bool vmci_disable_msi; 4262306a36Sopenharmony_cimodule_param_named(disable_msi, vmci_disable_msi, bool, 0); 4362306a36Sopenharmony_ciMODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); 4462306a36Sopenharmony_ci 4562306a36Sopenharmony_cistatic bool vmci_disable_msix; 4662306a36Sopenharmony_cimodule_param_named(disable_msix, vmci_disable_msix, bool, 0); 4762306a36Sopenharmony_ciMODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); 4862306a36Sopenharmony_ci 4962306a36Sopenharmony_cistatic u32 ctx_update_sub_id = VMCI_INVALID_ID; 5062306a36Sopenharmony_cistatic u32 vm_context_id = VMCI_INVALID_ID; 5162306a36Sopenharmony_ci 5262306a36Sopenharmony_cistruct vmci_guest_device { 5362306a36Sopenharmony_ci struct device *dev; /* PCI device we are attached to */ 5462306a36Sopenharmony_ci void __iomem *iobase; 5562306a36Sopenharmony_ci void __iomem *mmio_base; 5662306a36Sopenharmony_ci 5762306a36Sopenharmony_ci bool exclusive_vectors; 5862306a36Sopenharmony_ci 5962306a36Sopenharmony_ci struct wait_queue_head inout_wq; 6062306a36Sopenharmony_ci 6162306a36Sopenharmony_ci void *data_buffer; 6262306a36Sopenharmony_ci dma_addr_t data_buffer_base; 6362306a36Sopenharmony_ci void *tx_buffer; 6462306a36Sopenharmony_ci dma_addr_t tx_buffer_base; 6562306a36Sopenharmony_ci void *notification_bitmap; 6662306a36Sopenharmony_ci dma_addr_t notification_base; 6762306a36Sopenharmony_ci}; 6862306a36Sopenharmony_ci 6962306a36Sopenharmony_cistatic bool use_ppn64; 7062306a36Sopenharmony_ci 7162306a36Sopenharmony_cibool vmci_use_ppn64(void) 7262306a36Sopenharmony_ci{ 7362306a36Sopenharmony_ci return use_ppn64; 7462306a36Sopenharmony_ci} 7562306a36Sopenharmony_ci 7662306a36Sopenharmony_ci/* vmci_dev singleton device and supporting data*/ 7762306a36Sopenharmony_cistruct pci_dev *vmci_pdev; 7862306a36Sopenharmony_cistatic struct vmci_guest_device *vmci_dev_g; 7962306a36Sopenharmony_cistatic DEFINE_SPINLOCK(vmci_dev_spinlock); 8062306a36Sopenharmony_ci 8162306a36Sopenharmony_cistatic atomic_t vmci_num_guest_devices = ATOMIC_INIT(0); 8262306a36Sopenharmony_ci 8362306a36Sopenharmony_cibool vmci_guest_code_active(void) 8462306a36Sopenharmony_ci{ 8562306a36Sopenharmony_ci return atomic_read(&vmci_num_guest_devices) != 0; 8662306a36Sopenharmony_ci} 8762306a36Sopenharmony_ci 8862306a36Sopenharmony_ciu32 vmci_get_vm_context_id(void) 8962306a36Sopenharmony_ci{ 9062306a36Sopenharmony_ci if (vm_context_id == VMCI_INVALID_ID) { 9162306a36Sopenharmony_ci struct vmci_datagram get_cid_msg; 9262306a36Sopenharmony_ci get_cid_msg.dst = 9362306a36Sopenharmony_ci vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 9462306a36Sopenharmony_ci VMCI_GET_CONTEXT_ID); 9562306a36Sopenharmony_ci get_cid_msg.src = VMCI_ANON_SRC_HANDLE; 9662306a36Sopenharmony_ci get_cid_msg.payload_size = 0; 9762306a36Sopenharmony_ci vm_context_id = vmci_send_datagram(&get_cid_msg); 9862306a36Sopenharmony_ci } 9962306a36Sopenharmony_ci return vm_context_id; 10062306a36Sopenharmony_ci} 10162306a36Sopenharmony_ci 10262306a36Sopenharmony_cistatic unsigned int vmci_read_reg(struct vmci_guest_device *dev, u32 reg) 10362306a36Sopenharmony_ci{ 10462306a36Sopenharmony_ci if (dev->mmio_base != NULL) 10562306a36Sopenharmony_ci return readl(dev->mmio_base + reg); 10662306a36Sopenharmony_ci return ioread32(dev->iobase + reg); 10762306a36Sopenharmony_ci} 10862306a36Sopenharmony_ci 10962306a36Sopenharmony_cistatic void vmci_write_reg(struct vmci_guest_device *dev, u32 val, u32 reg) 11062306a36Sopenharmony_ci{ 11162306a36Sopenharmony_ci if (dev->mmio_base != NULL) 11262306a36Sopenharmony_ci writel(val, dev->mmio_base + reg); 11362306a36Sopenharmony_ci else 11462306a36Sopenharmony_ci iowrite32(val, dev->iobase + reg); 11562306a36Sopenharmony_ci} 11662306a36Sopenharmony_ci 11762306a36Sopenharmony_cistatic void vmci_read_data(struct vmci_guest_device *vmci_dev, 11862306a36Sopenharmony_ci void *dest, size_t size) 11962306a36Sopenharmony_ci{ 12062306a36Sopenharmony_ci if (vmci_dev->mmio_base == NULL) 12162306a36Sopenharmony_ci ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, 12262306a36Sopenharmony_ci dest, size); 12362306a36Sopenharmony_ci else { 12462306a36Sopenharmony_ci /* 12562306a36Sopenharmony_ci * For DMA datagrams, the data_buffer will contain the header on the 12662306a36Sopenharmony_ci * first page, followed by the incoming datagram(s) on the following 12762306a36Sopenharmony_ci * pages. The header uses an S/G element immediately following the 12862306a36Sopenharmony_ci * header on the first page to point to the data area. 12962306a36Sopenharmony_ci */ 13062306a36Sopenharmony_ci struct vmci_data_in_out_header *buffer_header = vmci_dev->data_buffer; 13162306a36Sopenharmony_ci struct vmci_sg_elem *sg_array = (struct vmci_sg_elem *)(buffer_header + 1); 13262306a36Sopenharmony_ci size_t buffer_offset = dest - vmci_dev->data_buffer; 13362306a36Sopenharmony_ci 13462306a36Sopenharmony_ci buffer_header->opcode = 1; 13562306a36Sopenharmony_ci buffer_header->size = 1; 13662306a36Sopenharmony_ci buffer_header->busy = 0; 13762306a36Sopenharmony_ci sg_array[0].addr = vmci_dev->data_buffer_base + buffer_offset; 13862306a36Sopenharmony_ci sg_array[0].size = size; 13962306a36Sopenharmony_ci 14062306a36Sopenharmony_ci vmci_write_reg(vmci_dev, lower_32_bits(vmci_dev->data_buffer_base), 14162306a36Sopenharmony_ci VMCI_DATA_IN_LOW_ADDR); 14262306a36Sopenharmony_ci 14362306a36Sopenharmony_ci wait_event(vmci_dev->inout_wq, buffer_header->busy == 1); 14462306a36Sopenharmony_ci } 14562306a36Sopenharmony_ci} 14662306a36Sopenharmony_ci 14762306a36Sopenharmony_cistatic int vmci_write_data(struct vmci_guest_device *dev, 14862306a36Sopenharmony_ci struct vmci_datagram *dg) 14962306a36Sopenharmony_ci{ 15062306a36Sopenharmony_ci int result; 15162306a36Sopenharmony_ci 15262306a36Sopenharmony_ci if (dev->mmio_base != NULL) { 15362306a36Sopenharmony_ci struct vmci_data_in_out_header *buffer_header = dev->tx_buffer; 15462306a36Sopenharmony_ci u8 *dg_out_buffer = (u8 *)(buffer_header + 1); 15562306a36Sopenharmony_ci 15662306a36Sopenharmony_ci if (VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) 15762306a36Sopenharmony_ci return VMCI_ERROR_INVALID_ARGS; 15862306a36Sopenharmony_ci 15962306a36Sopenharmony_ci /* 16062306a36Sopenharmony_ci * Initialize send buffer with outgoing datagram 16162306a36Sopenharmony_ci * and set up header for inline data. Device will 16262306a36Sopenharmony_ci * not access buffer asynchronously - only after 16362306a36Sopenharmony_ci * the write to VMCI_DATA_OUT_LOW_ADDR. 16462306a36Sopenharmony_ci */ 16562306a36Sopenharmony_ci memcpy(dg_out_buffer, dg, VMCI_DG_SIZE(dg)); 16662306a36Sopenharmony_ci buffer_header->opcode = 0; 16762306a36Sopenharmony_ci buffer_header->size = VMCI_DG_SIZE(dg); 16862306a36Sopenharmony_ci buffer_header->busy = 1; 16962306a36Sopenharmony_ci 17062306a36Sopenharmony_ci vmci_write_reg(dev, lower_32_bits(dev->tx_buffer_base), 17162306a36Sopenharmony_ci VMCI_DATA_OUT_LOW_ADDR); 17262306a36Sopenharmony_ci 17362306a36Sopenharmony_ci /* Caller holds a spinlock, so cannot block. */ 17462306a36Sopenharmony_ci spin_until_cond(buffer_header->busy == 0); 17562306a36Sopenharmony_ci 17662306a36Sopenharmony_ci result = vmci_read_reg(vmci_dev_g, VMCI_RESULT_LOW_ADDR); 17762306a36Sopenharmony_ci if (result == VMCI_SUCCESS) 17862306a36Sopenharmony_ci result = (int)buffer_header->result; 17962306a36Sopenharmony_ci } else { 18062306a36Sopenharmony_ci iowrite8_rep(dev->iobase + VMCI_DATA_OUT_ADDR, 18162306a36Sopenharmony_ci dg, VMCI_DG_SIZE(dg)); 18262306a36Sopenharmony_ci result = vmci_read_reg(vmci_dev_g, VMCI_RESULT_LOW_ADDR); 18362306a36Sopenharmony_ci } 18462306a36Sopenharmony_ci 18562306a36Sopenharmony_ci return result; 18662306a36Sopenharmony_ci} 18762306a36Sopenharmony_ci 18862306a36Sopenharmony_ci/* 18962306a36Sopenharmony_ci * VM to hypervisor call mechanism. We use the standard VMware naming 19062306a36Sopenharmony_ci * convention since shared code is calling this function as well. 19162306a36Sopenharmony_ci */ 19262306a36Sopenharmony_ciint vmci_send_datagram(struct vmci_datagram *dg) 19362306a36Sopenharmony_ci{ 19462306a36Sopenharmony_ci unsigned long flags; 19562306a36Sopenharmony_ci int result; 19662306a36Sopenharmony_ci 19762306a36Sopenharmony_ci /* Check args. */ 19862306a36Sopenharmony_ci if (dg == NULL) 19962306a36Sopenharmony_ci return VMCI_ERROR_INVALID_ARGS; 20062306a36Sopenharmony_ci 20162306a36Sopenharmony_ci /* 20262306a36Sopenharmony_ci * Need to acquire spinlock on the device because the datagram 20362306a36Sopenharmony_ci * data may be spread over multiple pages and the monitor may 20462306a36Sopenharmony_ci * interleave device user rpc calls from multiple 20562306a36Sopenharmony_ci * VCPUs. Acquiring the spinlock precludes that 20662306a36Sopenharmony_ci * possibility. Disabling interrupts to avoid incoming 20762306a36Sopenharmony_ci * datagrams during a "rep out" and possibly landing up in 20862306a36Sopenharmony_ci * this function. 20962306a36Sopenharmony_ci */ 21062306a36Sopenharmony_ci spin_lock_irqsave(&vmci_dev_spinlock, flags); 21162306a36Sopenharmony_ci 21262306a36Sopenharmony_ci if (vmci_dev_g) { 21362306a36Sopenharmony_ci vmci_write_data(vmci_dev_g, dg); 21462306a36Sopenharmony_ci result = vmci_read_reg(vmci_dev_g, VMCI_RESULT_LOW_ADDR); 21562306a36Sopenharmony_ci } else { 21662306a36Sopenharmony_ci result = VMCI_ERROR_UNAVAILABLE; 21762306a36Sopenharmony_ci } 21862306a36Sopenharmony_ci 21962306a36Sopenharmony_ci spin_unlock_irqrestore(&vmci_dev_spinlock, flags); 22062306a36Sopenharmony_ci 22162306a36Sopenharmony_ci return result; 22262306a36Sopenharmony_ci} 22362306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(vmci_send_datagram); 22462306a36Sopenharmony_ci 22562306a36Sopenharmony_ci/* 22662306a36Sopenharmony_ci * Gets called with the new context id if updated or resumed. 22762306a36Sopenharmony_ci * Context id. 22862306a36Sopenharmony_ci */ 22962306a36Sopenharmony_cistatic void vmci_guest_cid_update(u32 sub_id, 23062306a36Sopenharmony_ci const struct vmci_event_data *event_data, 23162306a36Sopenharmony_ci void *client_data) 23262306a36Sopenharmony_ci{ 23362306a36Sopenharmony_ci const struct vmci_event_payld_ctx *ev_payload = 23462306a36Sopenharmony_ci vmci_event_data_const_payload(event_data); 23562306a36Sopenharmony_ci 23662306a36Sopenharmony_ci if (sub_id != ctx_update_sub_id) { 23762306a36Sopenharmony_ci pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id); 23862306a36Sopenharmony_ci return; 23962306a36Sopenharmony_ci } 24062306a36Sopenharmony_ci 24162306a36Sopenharmony_ci if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) { 24262306a36Sopenharmony_ci pr_devel("Invalid event data\n"); 24362306a36Sopenharmony_ci return; 24462306a36Sopenharmony_ci } 24562306a36Sopenharmony_ci 24662306a36Sopenharmony_ci pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n", 24762306a36Sopenharmony_ci vm_context_id, ev_payload->context_id, event_data->event); 24862306a36Sopenharmony_ci 24962306a36Sopenharmony_ci vm_context_id = ev_payload->context_id; 25062306a36Sopenharmony_ci} 25162306a36Sopenharmony_ci 25262306a36Sopenharmony_ci/* 25362306a36Sopenharmony_ci * Verify that the host supports the hypercalls we need. If it does not, 25462306a36Sopenharmony_ci * try to find fallback hypercalls and use those instead. Returns 0 if 25562306a36Sopenharmony_ci * required hypercalls (or fallback hypercalls) are supported by the host, 25662306a36Sopenharmony_ci * an error code otherwise. 25762306a36Sopenharmony_ci */ 25862306a36Sopenharmony_cistatic int vmci_check_host_caps(struct pci_dev *pdev) 25962306a36Sopenharmony_ci{ 26062306a36Sopenharmony_ci bool result; 26162306a36Sopenharmony_ci struct vmci_resource_query_msg *msg; 26262306a36Sopenharmony_ci u32 msg_size = sizeof(struct vmci_resource_query_hdr) + 26362306a36Sopenharmony_ci VMCI_UTIL_NUM_RESOURCES * sizeof(u32); 26462306a36Sopenharmony_ci struct vmci_datagram *check_msg; 26562306a36Sopenharmony_ci 26662306a36Sopenharmony_ci check_msg = kzalloc(msg_size, GFP_KERNEL); 26762306a36Sopenharmony_ci if (!check_msg) { 26862306a36Sopenharmony_ci dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); 26962306a36Sopenharmony_ci return -ENOMEM; 27062306a36Sopenharmony_ci } 27162306a36Sopenharmony_ci 27262306a36Sopenharmony_ci check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 27362306a36Sopenharmony_ci VMCI_RESOURCES_QUERY); 27462306a36Sopenharmony_ci check_msg->src = VMCI_ANON_SRC_HANDLE; 27562306a36Sopenharmony_ci check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE; 27662306a36Sopenharmony_ci msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg); 27762306a36Sopenharmony_ci 27862306a36Sopenharmony_ci msg->num_resources = VMCI_UTIL_NUM_RESOURCES; 27962306a36Sopenharmony_ci msg->resources[0] = VMCI_GET_CONTEXT_ID; 28062306a36Sopenharmony_ci 28162306a36Sopenharmony_ci /* Checks that hyper calls are supported */ 28262306a36Sopenharmony_ci result = vmci_send_datagram(check_msg) == 0x01; 28362306a36Sopenharmony_ci kfree(check_msg); 28462306a36Sopenharmony_ci 28562306a36Sopenharmony_ci dev_dbg(&pdev->dev, "%s: Host capability check: %s\n", 28662306a36Sopenharmony_ci __func__, result ? "PASSED" : "FAILED"); 28762306a36Sopenharmony_ci 28862306a36Sopenharmony_ci /* We need the vector. There are no fallbacks. */ 28962306a36Sopenharmony_ci return result ? 0 : -ENXIO; 29062306a36Sopenharmony_ci} 29162306a36Sopenharmony_ci 29262306a36Sopenharmony_ci/* 29362306a36Sopenharmony_ci * Reads datagrams from the device and dispatches them. For IO port 29462306a36Sopenharmony_ci * based access to the device, we always start reading datagrams into 29562306a36Sopenharmony_ci * only the first page of the datagram buffer. If the datagrams don't 29662306a36Sopenharmony_ci * fit into one page, we use the maximum datagram buffer size for the 29762306a36Sopenharmony_ci * remainder of the invocation. This is a simple heuristic for not 29862306a36Sopenharmony_ci * penalizing small datagrams. For DMA-based datagrams, we always 29962306a36Sopenharmony_ci * use the maximum datagram buffer size, since there is no performance 30062306a36Sopenharmony_ci * penalty for doing so. 30162306a36Sopenharmony_ci * 30262306a36Sopenharmony_ci * This function assumes that it has exclusive access to the data 30362306a36Sopenharmony_ci * in register(s) for the duration of the call. 30462306a36Sopenharmony_ci */ 30562306a36Sopenharmony_cistatic void vmci_dispatch_dgs(struct vmci_guest_device *vmci_dev) 30662306a36Sopenharmony_ci{ 30762306a36Sopenharmony_ci u8 *dg_in_buffer = vmci_dev->data_buffer; 30862306a36Sopenharmony_ci struct vmci_datagram *dg; 30962306a36Sopenharmony_ci size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE; 31062306a36Sopenharmony_ci size_t current_dg_in_buffer_size; 31162306a36Sopenharmony_ci size_t remaining_bytes; 31262306a36Sopenharmony_ci bool is_io_port = vmci_dev->mmio_base == NULL; 31362306a36Sopenharmony_ci 31462306a36Sopenharmony_ci BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE); 31562306a36Sopenharmony_ci 31662306a36Sopenharmony_ci if (!is_io_port) { 31762306a36Sopenharmony_ci /* For mmio, the first page is used for the header. */ 31862306a36Sopenharmony_ci dg_in_buffer += PAGE_SIZE; 31962306a36Sopenharmony_ci 32062306a36Sopenharmony_ci /* 32162306a36Sopenharmony_ci * For DMA-based datagram operations, there is no performance 32262306a36Sopenharmony_ci * penalty for reading the maximum buffer size. 32362306a36Sopenharmony_ci */ 32462306a36Sopenharmony_ci current_dg_in_buffer_size = VMCI_MAX_DG_SIZE; 32562306a36Sopenharmony_ci } else { 32662306a36Sopenharmony_ci current_dg_in_buffer_size = PAGE_SIZE; 32762306a36Sopenharmony_ci } 32862306a36Sopenharmony_ci vmci_read_data(vmci_dev, dg_in_buffer, current_dg_in_buffer_size); 32962306a36Sopenharmony_ci dg = (struct vmci_datagram *)dg_in_buffer; 33062306a36Sopenharmony_ci remaining_bytes = current_dg_in_buffer_size; 33162306a36Sopenharmony_ci 33262306a36Sopenharmony_ci /* 33362306a36Sopenharmony_ci * Read through the buffer until an invalid datagram header is 33462306a36Sopenharmony_ci * encountered. The exit condition for datagrams read through 33562306a36Sopenharmony_ci * VMCI_DATA_IN_ADDR is a bit more complicated, since a datagram 33662306a36Sopenharmony_ci * can start on any page boundary in the buffer. 33762306a36Sopenharmony_ci */ 33862306a36Sopenharmony_ci while (dg->dst.resource != VMCI_INVALID_ID || 33962306a36Sopenharmony_ci (is_io_port && remaining_bytes > PAGE_SIZE)) { 34062306a36Sopenharmony_ci unsigned dg_in_size; 34162306a36Sopenharmony_ci 34262306a36Sopenharmony_ci /* 34362306a36Sopenharmony_ci * If using VMCI_DATA_IN_ADDR, skip to the next page 34462306a36Sopenharmony_ci * as a datagram can start on any page boundary. 34562306a36Sopenharmony_ci */ 34662306a36Sopenharmony_ci if (dg->dst.resource == VMCI_INVALID_ID) { 34762306a36Sopenharmony_ci dg = (struct vmci_datagram *)roundup( 34862306a36Sopenharmony_ci (uintptr_t)dg + 1, PAGE_SIZE); 34962306a36Sopenharmony_ci remaining_bytes = 35062306a36Sopenharmony_ci (size_t)(dg_in_buffer + 35162306a36Sopenharmony_ci current_dg_in_buffer_size - 35262306a36Sopenharmony_ci (u8 *)dg); 35362306a36Sopenharmony_ci continue; 35462306a36Sopenharmony_ci } 35562306a36Sopenharmony_ci 35662306a36Sopenharmony_ci dg_in_size = VMCI_DG_SIZE_ALIGNED(dg); 35762306a36Sopenharmony_ci 35862306a36Sopenharmony_ci if (dg_in_size <= dg_in_buffer_size) { 35962306a36Sopenharmony_ci int result; 36062306a36Sopenharmony_ci 36162306a36Sopenharmony_ci /* 36262306a36Sopenharmony_ci * If the remaining bytes in the datagram 36362306a36Sopenharmony_ci * buffer doesn't contain the complete 36462306a36Sopenharmony_ci * datagram, we first make sure we have enough 36562306a36Sopenharmony_ci * room for it and then we read the reminder 36662306a36Sopenharmony_ci * of the datagram and possibly any following 36762306a36Sopenharmony_ci * datagrams. 36862306a36Sopenharmony_ci */ 36962306a36Sopenharmony_ci if (dg_in_size > remaining_bytes) { 37062306a36Sopenharmony_ci if (remaining_bytes != 37162306a36Sopenharmony_ci current_dg_in_buffer_size) { 37262306a36Sopenharmony_ci 37362306a36Sopenharmony_ci /* 37462306a36Sopenharmony_ci * We move the partial 37562306a36Sopenharmony_ci * datagram to the front and 37662306a36Sopenharmony_ci * read the reminder of the 37762306a36Sopenharmony_ci * datagram and possibly 37862306a36Sopenharmony_ci * following calls into the 37962306a36Sopenharmony_ci * following bytes. 38062306a36Sopenharmony_ci */ 38162306a36Sopenharmony_ci memmove(dg_in_buffer, dg_in_buffer + 38262306a36Sopenharmony_ci current_dg_in_buffer_size - 38362306a36Sopenharmony_ci remaining_bytes, 38462306a36Sopenharmony_ci remaining_bytes); 38562306a36Sopenharmony_ci dg = (struct vmci_datagram *) 38662306a36Sopenharmony_ci dg_in_buffer; 38762306a36Sopenharmony_ci } 38862306a36Sopenharmony_ci 38962306a36Sopenharmony_ci if (current_dg_in_buffer_size != 39062306a36Sopenharmony_ci dg_in_buffer_size) 39162306a36Sopenharmony_ci current_dg_in_buffer_size = 39262306a36Sopenharmony_ci dg_in_buffer_size; 39362306a36Sopenharmony_ci 39462306a36Sopenharmony_ci vmci_read_data(vmci_dev, 39562306a36Sopenharmony_ci dg_in_buffer + 39662306a36Sopenharmony_ci remaining_bytes, 39762306a36Sopenharmony_ci current_dg_in_buffer_size - 39862306a36Sopenharmony_ci remaining_bytes); 39962306a36Sopenharmony_ci } 40062306a36Sopenharmony_ci 40162306a36Sopenharmony_ci /* 40262306a36Sopenharmony_ci * We special case event datagrams from the 40362306a36Sopenharmony_ci * hypervisor. 40462306a36Sopenharmony_ci */ 40562306a36Sopenharmony_ci if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && 40662306a36Sopenharmony_ci dg->dst.resource == VMCI_EVENT_HANDLER) { 40762306a36Sopenharmony_ci result = vmci_event_dispatch(dg); 40862306a36Sopenharmony_ci } else { 40962306a36Sopenharmony_ci result = vmci_datagram_invoke_guest_handler(dg); 41062306a36Sopenharmony_ci } 41162306a36Sopenharmony_ci if (result < VMCI_SUCCESS) 41262306a36Sopenharmony_ci dev_dbg(vmci_dev->dev, 41362306a36Sopenharmony_ci "Datagram with resource (ID=0x%x) failed (err=%d)\n", 41462306a36Sopenharmony_ci dg->dst.resource, result); 41562306a36Sopenharmony_ci 41662306a36Sopenharmony_ci /* On to the next datagram. */ 41762306a36Sopenharmony_ci dg = (struct vmci_datagram *)((u8 *)dg + 41862306a36Sopenharmony_ci dg_in_size); 41962306a36Sopenharmony_ci } else { 42062306a36Sopenharmony_ci size_t bytes_to_skip; 42162306a36Sopenharmony_ci 42262306a36Sopenharmony_ci /* 42362306a36Sopenharmony_ci * Datagram doesn't fit in datagram buffer of maximal 42462306a36Sopenharmony_ci * size. We drop it. 42562306a36Sopenharmony_ci */ 42662306a36Sopenharmony_ci dev_dbg(vmci_dev->dev, 42762306a36Sopenharmony_ci "Failed to receive datagram (size=%u bytes)\n", 42862306a36Sopenharmony_ci dg_in_size); 42962306a36Sopenharmony_ci 43062306a36Sopenharmony_ci bytes_to_skip = dg_in_size - remaining_bytes; 43162306a36Sopenharmony_ci if (current_dg_in_buffer_size != dg_in_buffer_size) 43262306a36Sopenharmony_ci current_dg_in_buffer_size = dg_in_buffer_size; 43362306a36Sopenharmony_ci 43462306a36Sopenharmony_ci for (;;) { 43562306a36Sopenharmony_ci vmci_read_data(vmci_dev, dg_in_buffer, 43662306a36Sopenharmony_ci current_dg_in_buffer_size); 43762306a36Sopenharmony_ci if (bytes_to_skip <= current_dg_in_buffer_size) 43862306a36Sopenharmony_ci break; 43962306a36Sopenharmony_ci 44062306a36Sopenharmony_ci bytes_to_skip -= current_dg_in_buffer_size; 44162306a36Sopenharmony_ci } 44262306a36Sopenharmony_ci dg = (struct vmci_datagram *)(dg_in_buffer + 44362306a36Sopenharmony_ci bytes_to_skip); 44462306a36Sopenharmony_ci } 44562306a36Sopenharmony_ci 44662306a36Sopenharmony_ci remaining_bytes = 44762306a36Sopenharmony_ci (size_t) (dg_in_buffer + current_dg_in_buffer_size - 44862306a36Sopenharmony_ci (u8 *)dg); 44962306a36Sopenharmony_ci 45062306a36Sopenharmony_ci if (remaining_bytes < VMCI_DG_HEADERSIZE) { 45162306a36Sopenharmony_ci /* Get the next batch of datagrams. */ 45262306a36Sopenharmony_ci 45362306a36Sopenharmony_ci vmci_read_data(vmci_dev, dg_in_buffer, 45462306a36Sopenharmony_ci current_dg_in_buffer_size); 45562306a36Sopenharmony_ci dg = (struct vmci_datagram *)dg_in_buffer; 45662306a36Sopenharmony_ci remaining_bytes = current_dg_in_buffer_size; 45762306a36Sopenharmony_ci } 45862306a36Sopenharmony_ci } 45962306a36Sopenharmony_ci} 46062306a36Sopenharmony_ci 46162306a36Sopenharmony_ci/* 46262306a36Sopenharmony_ci * Scans the notification bitmap for raised flags, clears them 46362306a36Sopenharmony_ci * and handles the notifications. 46462306a36Sopenharmony_ci */ 46562306a36Sopenharmony_cistatic void vmci_process_bitmap(struct vmci_guest_device *dev) 46662306a36Sopenharmony_ci{ 46762306a36Sopenharmony_ci if (!dev->notification_bitmap) { 46862306a36Sopenharmony_ci dev_dbg(dev->dev, "No bitmap present in %s\n", __func__); 46962306a36Sopenharmony_ci return; 47062306a36Sopenharmony_ci } 47162306a36Sopenharmony_ci 47262306a36Sopenharmony_ci vmci_dbell_scan_notification_entries(dev->notification_bitmap); 47362306a36Sopenharmony_ci} 47462306a36Sopenharmony_ci 47562306a36Sopenharmony_ci/* 47662306a36Sopenharmony_ci * Interrupt handler for legacy or MSI interrupt, or for first MSI-X 47762306a36Sopenharmony_ci * interrupt (vector VMCI_INTR_DATAGRAM). 47862306a36Sopenharmony_ci */ 47962306a36Sopenharmony_cistatic irqreturn_t vmci_interrupt(int irq, void *_dev) 48062306a36Sopenharmony_ci{ 48162306a36Sopenharmony_ci struct vmci_guest_device *dev = _dev; 48262306a36Sopenharmony_ci 48362306a36Sopenharmony_ci /* 48462306a36Sopenharmony_ci * If we are using MSI-X with exclusive vectors then we simply call 48562306a36Sopenharmony_ci * vmci_dispatch_dgs(), since we know the interrupt was meant for us. 48662306a36Sopenharmony_ci * Otherwise we must read the ICR to determine what to do. 48762306a36Sopenharmony_ci */ 48862306a36Sopenharmony_ci 48962306a36Sopenharmony_ci if (dev->exclusive_vectors) { 49062306a36Sopenharmony_ci vmci_dispatch_dgs(dev); 49162306a36Sopenharmony_ci } else { 49262306a36Sopenharmony_ci unsigned int icr; 49362306a36Sopenharmony_ci 49462306a36Sopenharmony_ci /* Acknowledge interrupt and determine what needs doing. */ 49562306a36Sopenharmony_ci icr = vmci_read_reg(dev, VMCI_ICR_ADDR); 49662306a36Sopenharmony_ci if (icr == 0 || icr == ~0) 49762306a36Sopenharmony_ci return IRQ_NONE; 49862306a36Sopenharmony_ci 49962306a36Sopenharmony_ci if (icr & VMCI_ICR_DATAGRAM) { 50062306a36Sopenharmony_ci vmci_dispatch_dgs(dev); 50162306a36Sopenharmony_ci icr &= ~VMCI_ICR_DATAGRAM; 50262306a36Sopenharmony_ci } 50362306a36Sopenharmony_ci 50462306a36Sopenharmony_ci if (icr & VMCI_ICR_NOTIFICATION) { 50562306a36Sopenharmony_ci vmci_process_bitmap(dev); 50662306a36Sopenharmony_ci icr &= ~VMCI_ICR_NOTIFICATION; 50762306a36Sopenharmony_ci } 50862306a36Sopenharmony_ci 50962306a36Sopenharmony_ci 51062306a36Sopenharmony_ci if (icr & VMCI_ICR_DMA_DATAGRAM) { 51162306a36Sopenharmony_ci wake_up_all(&dev->inout_wq); 51262306a36Sopenharmony_ci icr &= ~VMCI_ICR_DMA_DATAGRAM; 51362306a36Sopenharmony_ci } 51462306a36Sopenharmony_ci 51562306a36Sopenharmony_ci if (icr != 0) 51662306a36Sopenharmony_ci dev_warn(dev->dev, 51762306a36Sopenharmony_ci "Ignoring unknown interrupt cause (%d)\n", 51862306a36Sopenharmony_ci icr); 51962306a36Sopenharmony_ci } 52062306a36Sopenharmony_ci 52162306a36Sopenharmony_ci return IRQ_HANDLED; 52262306a36Sopenharmony_ci} 52362306a36Sopenharmony_ci 52462306a36Sopenharmony_ci/* 52562306a36Sopenharmony_ci * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION, 52662306a36Sopenharmony_ci * which is for the notification bitmap. Will only get called if we are 52762306a36Sopenharmony_ci * using MSI-X with exclusive vectors. 52862306a36Sopenharmony_ci */ 52962306a36Sopenharmony_cistatic irqreturn_t vmci_interrupt_bm(int irq, void *_dev) 53062306a36Sopenharmony_ci{ 53162306a36Sopenharmony_ci struct vmci_guest_device *dev = _dev; 53262306a36Sopenharmony_ci 53362306a36Sopenharmony_ci /* For MSI-X we can just assume it was meant for us. */ 53462306a36Sopenharmony_ci vmci_process_bitmap(dev); 53562306a36Sopenharmony_ci 53662306a36Sopenharmony_ci return IRQ_HANDLED; 53762306a36Sopenharmony_ci} 53862306a36Sopenharmony_ci 53962306a36Sopenharmony_ci/* 54062306a36Sopenharmony_ci * Interrupt handler for MSI-X interrupt vector VMCI_INTR_DMA_DATAGRAM, 54162306a36Sopenharmony_ci * which is for the completion of a DMA datagram send or receive operation. 54262306a36Sopenharmony_ci * Will only get called if we are using MSI-X with exclusive vectors. 54362306a36Sopenharmony_ci */ 54462306a36Sopenharmony_cistatic irqreturn_t vmci_interrupt_dma_datagram(int irq, void *_dev) 54562306a36Sopenharmony_ci{ 54662306a36Sopenharmony_ci struct vmci_guest_device *dev = _dev; 54762306a36Sopenharmony_ci 54862306a36Sopenharmony_ci wake_up_all(&dev->inout_wq); 54962306a36Sopenharmony_ci 55062306a36Sopenharmony_ci return IRQ_HANDLED; 55162306a36Sopenharmony_ci} 55262306a36Sopenharmony_ci 55362306a36Sopenharmony_cistatic void vmci_free_dg_buffers(struct vmci_guest_device *vmci_dev) 55462306a36Sopenharmony_ci{ 55562306a36Sopenharmony_ci if (vmci_dev->mmio_base != NULL) { 55662306a36Sopenharmony_ci if (vmci_dev->tx_buffer != NULL) 55762306a36Sopenharmony_ci dma_free_coherent(vmci_dev->dev, 55862306a36Sopenharmony_ci VMCI_DMA_DG_BUFFER_SIZE, 55962306a36Sopenharmony_ci vmci_dev->tx_buffer, 56062306a36Sopenharmony_ci vmci_dev->tx_buffer_base); 56162306a36Sopenharmony_ci if (vmci_dev->data_buffer != NULL) 56262306a36Sopenharmony_ci dma_free_coherent(vmci_dev->dev, 56362306a36Sopenharmony_ci VMCI_DMA_DG_BUFFER_SIZE, 56462306a36Sopenharmony_ci vmci_dev->data_buffer, 56562306a36Sopenharmony_ci vmci_dev->data_buffer_base); 56662306a36Sopenharmony_ci } else { 56762306a36Sopenharmony_ci vfree(vmci_dev->data_buffer); 56862306a36Sopenharmony_ci } 56962306a36Sopenharmony_ci} 57062306a36Sopenharmony_ci 57162306a36Sopenharmony_ci/* 57262306a36Sopenharmony_ci * Most of the initialization at module load time is done here. 57362306a36Sopenharmony_ci */ 57462306a36Sopenharmony_cistatic int vmci_guest_probe_device(struct pci_dev *pdev, 57562306a36Sopenharmony_ci const struct pci_device_id *id) 57662306a36Sopenharmony_ci{ 57762306a36Sopenharmony_ci struct vmci_guest_device *vmci_dev; 57862306a36Sopenharmony_ci void __iomem *iobase = NULL; 57962306a36Sopenharmony_ci void __iomem *mmio_base = NULL; 58062306a36Sopenharmony_ci unsigned int num_irq_vectors; 58162306a36Sopenharmony_ci unsigned int capabilities; 58262306a36Sopenharmony_ci unsigned int caps_in_use; 58362306a36Sopenharmony_ci unsigned long cmd; 58462306a36Sopenharmony_ci int vmci_err; 58562306a36Sopenharmony_ci int error; 58662306a36Sopenharmony_ci 58762306a36Sopenharmony_ci dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n"); 58862306a36Sopenharmony_ci 58962306a36Sopenharmony_ci error = pcim_enable_device(pdev); 59062306a36Sopenharmony_ci if (error) { 59162306a36Sopenharmony_ci dev_err(&pdev->dev, 59262306a36Sopenharmony_ci "Failed to enable VMCI device: %d\n", error); 59362306a36Sopenharmony_ci return error; 59462306a36Sopenharmony_ci } 59562306a36Sopenharmony_ci 59662306a36Sopenharmony_ci /* 59762306a36Sopenharmony_ci * The VMCI device with mmio access to registers requests 256KB 59862306a36Sopenharmony_ci * for BAR1. If present, driver will use new VMCI device 59962306a36Sopenharmony_ci * functionality for register access and datagram send/recv. 60062306a36Sopenharmony_ci */ 60162306a36Sopenharmony_ci 60262306a36Sopenharmony_ci if (pci_resource_len(pdev, 1) == VMCI_WITH_MMIO_ACCESS_BAR_SIZE) { 60362306a36Sopenharmony_ci dev_info(&pdev->dev, "MMIO register access is available\n"); 60462306a36Sopenharmony_ci mmio_base = pci_iomap_range(pdev, 1, VMCI_MMIO_ACCESS_OFFSET, 60562306a36Sopenharmony_ci VMCI_MMIO_ACCESS_SIZE); 60662306a36Sopenharmony_ci /* If the map fails, we fall back to IOIO access. */ 60762306a36Sopenharmony_ci if (!mmio_base) 60862306a36Sopenharmony_ci dev_warn(&pdev->dev, "Failed to map MMIO register access\n"); 60962306a36Sopenharmony_ci } 61062306a36Sopenharmony_ci 61162306a36Sopenharmony_ci if (!mmio_base) { 61262306a36Sopenharmony_ci if (IS_ENABLED(CONFIG_ARM64)) { 61362306a36Sopenharmony_ci dev_err(&pdev->dev, "MMIO base is invalid\n"); 61462306a36Sopenharmony_ci return -ENXIO; 61562306a36Sopenharmony_ci } 61662306a36Sopenharmony_ci error = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME); 61762306a36Sopenharmony_ci if (error) { 61862306a36Sopenharmony_ci dev_err(&pdev->dev, "Failed to reserve/map IO regions\n"); 61962306a36Sopenharmony_ci return error; 62062306a36Sopenharmony_ci } 62162306a36Sopenharmony_ci iobase = pcim_iomap_table(pdev)[0]; 62262306a36Sopenharmony_ci } 62362306a36Sopenharmony_ci 62462306a36Sopenharmony_ci vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL); 62562306a36Sopenharmony_ci if (!vmci_dev) { 62662306a36Sopenharmony_ci dev_err(&pdev->dev, 62762306a36Sopenharmony_ci "Can't allocate memory for VMCI device\n"); 62862306a36Sopenharmony_ci return -ENOMEM; 62962306a36Sopenharmony_ci } 63062306a36Sopenharmony_ci 63162306a36Sopenharmony_ci vmci_dev->dev = &pdev->dev; 63262306a36Sopenharmony_ci vmci_dev->exclusive_vectors = false; 63362306a36Sopenharmony_ci vmci_dev->iobase = iobase; 63462306a36Sopenharmony_ci vmci_dev->mmio_base = mmio_base; 63562306a36Sopenharmony_ci 63662306a36Sopenharmony_ci init_waitqueue_head(&vmci_dev->inout_wq); 63762306a36Sopenharmony_ci 63862306a36Sopenharmony_ci if (mmio_base != NULL) { 63962306a36Sopenharmony_ci vmci_dev->tx_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE, 64062306a36Sopenharmony_ci &vmci_dev->tx_buffer_base, 64162306a36Sopenharmony_ci GFP_KERNEL); 64262306a36Sopenharmony_ci if (!vmci_dev->tx_buffer) { 64362306a36Sopenharmony_ci dev_err(&pdev->dev, 64462306a36Sopenharmony_ci "Can't allocate memory for datagram tx buffer\n"); 64562306a36Sopenharmony_ci return -ENOMEM; 64662306a36Sopenharmony_ci } 64762306a36Sopenharmony_ci 64862306a36Sopenharmony_ci vmci_dev->data_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE, 64962306a36Sopenharmony_ci &vmci_dev->data_buffer_base, 65062306a36Sopenharmony_ci GFP_KERNEL); 65162306a36Sopenharmony_ci } else { 65262306a36Sopenharmony_ci vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE); 65362306a36Sopenharmony_ci } 65462306a36Sopenharmony_ci if (!vmci_dev->data_buffer) { 65562306a36Sopenharmony_ci dev_err(&pdev->dev, 65662306a36Sopenharmony_ci "Can't allocate memory for datagram buffer\n"); 65762306a36Sopenharmony_ci error = -ENOMEM; 65862306a36Sopenharmony_ci goto err_free_data_buffers; 65962306a36Sopenharmony_ci } 66062306a36Sopenharmony_ci 66162306a36Sopenharmony_ci pci_set_master(pdev); /* To enable queue_pair functionality. */ 66262306a36Sopenharmony_ci 66362306a36Sopenharmony_ci /* 66462306a36Sopenharmony_ci * Verify that the VMCI Device supports the capabilities that 66562306a36Sopenharmony_ci * we need. If the device is missing capabilities that we would 66662306a36Sopenharmony_ci * like to use, check for fallback capabilities and use those 66762306a36Sopenharmony_ci * instead (so we can run a new VM on old hosts). Fail the load if 66862306a36Sopenharmony_ci * a required capability is missing and there is no fallback. 66962306a36Sopenharmony_ci * 67062306a36Sopenharmony_ci * Right now, we need datagrams. There are no fallbacks. 67162306a36Sopenharmony_ci */ 67262306a36Sopenharmony_ci capabilities = vmci_read_reg(vmci_dev, VMCI_CAPS_ADDR); 67362306a36Sopenharmony_ci if (!(capabilities & VMCI_CAPS_DATAGRAM)) { 67462306a36Sopenharmony_ci dev_err(&pdev->dev, "Device does not support datagrams\n"); 67562306a36Sopenharmony_ci error = -ENXIO; 67662306a36Sopenharmony_ci goto err_free_data_buffers; 67762306a36Sopenharmony_ci } 67862306a36Sopenharmony_ci caps_in_use = VMCI_CAPS_DATAGRAM; 67962306a36Sopenharmony_ci 68062306a36Sopenharmony_ci /* 68162306a36Sopenharmony_ci * Use 64-bit PPNs if the device supports. 68262306a36Sopenharmony_ci * 68362306a36Sopenharmony_ci * There is no check for the return value of dma_set_mask_and_coherent 68462306a36Sopenharmony_ci * since this driver can handle the default mask values if 68562306a36Sopenharmony_ci * dma_set_mask_and_coherent fails. 68662306a36Sopenharmony_ci */ 68762306a36Sopenharmony_ci if (capabilities & VMCI_CAPS_PPN64) { 68862306a36Sopenharmony_ci dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 68962306a36Sopenharmony_ci use_ppn64 = true; 69062306a36Sopenharmony_ci caps_in_use |= VMCI_CAPS_PPN64; 69162306a36Sopenharmony_ci } else { 69262306a36Sopenharmony_ci dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); 69362306a36Sopenharmony_ci use_ppn64 = false; 69462306a36Sopenharmony_ci } 69562306a36Sopenharmony_ci 69662306a36Sopenharmony_ci /* 69762306a36Sopenharmony_ci * If the hardware supports notifications, we will use that as 69862306a36Sopenharmony_ci * well. 69962306a36Sopenharmony_ci */ 70062306a36Sopenharmony_ci if (capabilities & VMCI_CAPS_NOTIFICATIONS) { 70162306a36Sopenharmony_ci vmci_dev->notification_bitmap = dma_alloc_coherent( 70262306a36Sopenharmony_ci &pdev->dev, PAGE_SIZE, &vmci_dev->notification_base, 70362306a36Sopenharmony_ci GFP_KERNEL); 70462306a36Sopenharmony_ci if (!vmci_dev->notification_bitmap) 70562306a36Sopenharmony_ci dev_warn(&pdev->dev, 70662306a36Sopenharmony_ci "Unable to allocate notification bitmap\n"); 70762306a36Sopenharmony_ci else 70862306a36Sopenharmony_ci caps_in_use |= VMCI_CAPS_NOTIFICATIONS; 70962306a36Sopenharmony_ci } 71062306a36Sopenharmony_ci 71162306a36Sopenharmony_ci if (mmio_base != NULL) { 71262306a36Sopenharmony_ci if (capabilities & VMCI_CAPS_DMA_DATAGRAM) { 71362306a36Sopenharmony_ci caps_in_use |= VMCI_CAPS_DMA_DATAGRAM; 71462306a36Sopenharmony_ci } else { 71562306a36Sopenharmony_ci dev_err(&pdev->dev, 71662306a36Sopenharmony_ci "Missing capability: VMCI_CAPS_DMA_DATAGRAM\n"); 71762306a36Sopenharmony_ci error = -ENXIO; 71862306a36Sopenharmony_ci goto err_free_notification_bitmap; 71962306a36Sopenharmony_ci } 72062306a36Sopenharmony_ci } 72162306a36Sopenharmony_ci 72262306a36Sopenharmony_ci dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use); 72362306a36Sopenharmony_ci 72462306a36Sopenharmony_ci /* Let the host know which capabilities we intend to use. */ 72562306a36Sopenharmony_ci vmci_write_reg(vmci_dev, caps_in_use, VMCI_CAPS_ADDR); 72662306a36Sopenharmony_ci 72762306a36Sopenharmony_ci if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) { 72862306a36Sopenharmony_ci /* Let the device know the size for pages passed down. */ 72962306a36Sopenharmony_ci vmci_write_reg(vmci_dev, PAGE_SHIFT, VMCI_GUEST_PAGE_SHIFT); 73062306a36Sopenharmony_ci 73162306a36Sopenharmony_ci /* Configure the high order parts of the data in/out buffers. */ 73262306a36Sopenharmony_ci vmci_write_reg(vmci_dev, upper_32_bits(vmci_dev->data_buffer_base), 73362306a36Sopenharmony_ci VMCI_DATA_IN_HIGH_ADDR); 73462306a36Sopenharmony_ci vmci_write_reg(vmci_dev, upper_32_bits(vmci_dev->tx_buffer_base), 73562306a36Sopenharmony_ci VMCI_DATA_OUT_HIGH_ADDR); 73662306a36Sopenharmony_ci } 73762306a36Sopenharmony_ci 73862306a36Sopenharmony_ci /* Set up global device so that we can start sending datagrams */ 73962306a36Sopenharmony_ci spin_lock_irq(&vmci_dev_spinlock); 74062306a36Sopenharmony_ci vmci_dev_g = vmci_dev; 74162306a36Sopenharmony_ci vmci_pdev = pdev; 74262306a36Sopenharmony_ci spin_unlock_irq(&vmci_dev_spinlock); 74362306a36Sopenharmony_ci 74462306a36Sopenharmony_ci /* 74562306a36Sopenharmony_ci * Register notification bitmap with device if that capability is 74662306a36Sopenharmony_ci * used. 74762306a36Sopenharmony_ci */ 74862306a36Sopenharmony_ci if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) { 74962306a36Sopenharmony_ci unsigned long bitmap_ppn = 75062306a36Sopenharmony_ci vmci_dev->notification_base >> PAGE_SHIFT; 75162306a36Sopenharmony_ci if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { 75262306a36Sopenharmony_ci dev_warn(&pdev->dev, 75362306a36Sopenharmony_ci "VMCI device unable to register notification bitmap with PPN 0x%lx\n", 75462306a36Sopenharmony_ci bitmap_ppn); 75562306a36Sopenharmony_ci error = -ENXIO; 75662306a36Sopenharmony_ci goto err_remove_vmci_dev_g; 75762306a36Sopenharmony_ci } 75862306a36Sopenharmony_ci } 75962306a36Sopenharmony_ci 76062306a36Sopenharmony_ci /* Check host capabilities. */ 76162306a36Sopenharmony_ci error = vmci_check_host_caps(pdev); 76262306a36Sopenharmony_ci if (error) 76362306a36Sopenharmony_ci goto err_remove_vmci_dev_g; 76462306a36Sopenharmony_ci 76562306a36Sopenharmony_ci /* Enable device. */ 76662306a36Sopenharmony_ci 76762306a36Sopenharmony_ci /* 76862306a36Sopenharmony_ci * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can 76962306a36Sopenharmony_ci * update the internal context id when needed. 77062306a36Sopenharmony_ci */ 77162306a36Sopenharmony_ci vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE, 77262306a36Sopenharmony_ci vmci_guest_cid_update, NULL, 77362306a36Sopenharmony_ci &ctx_update_sub_id); 77462306a36Sopenharmony_ci if (vmci_err < VMCI_SUCCESS) 77562306a36Sopenharmony_ci dev_warn(&pdev->dev, 77662306a36Sopenharmony_ci "Failed to subscribe to event (type=%d): %d\n", 77762306a36Sopenharmony_ci VMCI_EVENT_CTX_ID_UPDATE, vmci_err); 77862306a36Sopenharmony_ci 77962306a36Sopenharmony_ci /* 78062306a36Sopenharmony_ci * Enable interrupts. Try MSI-X first, then MSI, and then fallback on 78162306a36Sopenharmony_ci * legacy interrupts. 78262306a36Sopenharmony_ci */ 78362306a36Sopenharmony_ci if (vmci_dev->mmio_base != NULL) 78462306a36Sopenharmony_ci num_irq_vectors = VMCI_MAX_INTRS; 78562306a36Sopenharmony_ci else 78662306a36Sopenharmony_ci num_irq_vectors = VMCI_MAX_INTRS_NOTIFICATION; 78762306a36Sopenharmony_ci error = pci_alloc_irq_vectors(pdev, num_irq_vectors, num_irq_vectors, 78862306a36Sopenharmony_ci PCI_IRQ_MSIX); 78962306a36Sopenharmony_ci if (error < 0) { 79062306a36Sopenharmony_ci error = pci_alloc_irq_vectors(pdev, 1, 1, 79162306a36Sopenharmony_ci PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); 79262306a36Sopenharmony_ci if (error < 0) 79362306a36Sopenharmony_ci goto err_unsubscribe_event; 79462306a36Sopenharmony_ci } else { 79562306a36Sopenharmony_ci vmci_dev->exclusive_vectors = true; 79662306a36Sopenharmony_ci } 79762306a36Sopenharmony_ci 79862306a36Sopenharmony_ci /* 79962306a36Sopenharmony_ci * Request IRQ for legacy or MSI interrupts, or for first 80062306a36Sopenharmony_ci * MSI-X vector. 80162306a36Sopenharmony_ci */ 80262306a36Sopenharmony_ci error = request_threaded_irq(pci_irq_vector(pdev, 0), NULL, 80362306a36Sopenharmony_ci vmci_interrupt, IRQF_SHARED, 80462306a36Sopenharmony_ci KBUILD_MODNAME, vmci_dev); 80562306a36Sopenharmony_ci if (error) { 80662306a36Sopenharmony_ci dev_err(&pdev->dev, "Irq %u in use: %d\n", 80762306a36Sopenharmony_ci pci_irq_vector(pdev, 0), error); 80862306a36Sopenharmony_ci goto err_disable_msi; 80962306a36Sopenharmony_ci } 81062306a36Sopenharmony_ci 81162306a36Sopenharmony_ci /* 81262306a36Sopenharmony_ci * For MSI-X with exclusive vectors we need to request an 81362306a36Sopenharmony_ci * interrupt for each vector so that we get a separate 81462306a36Sopenharmony_ci * interrupt handler routine. This allows us to distinguish 81562306a36Sopenharmony_ci * between the vectors. 81662306a36Sopenharmony_ci */ 81762306a36Sopenharmony_ci if (vmci_dev->exclusive_vectors) { 81862306a36Sopenharmony_ci error = request_threaded_irq(pci_irq_vector(pdev, 1), NULL, 81962306a36Sopenharmony_ci vmci_interrupt_bm, 0, 82062306a36Sopenharmony_ci KBUILD_MODNAME, vmci_dev); 82162306a36Sopenharmony_ci if (error) { 82262306a36Sopenharmony_ci dev_err(&pdev->dev, 82362306a36Sopenharmony_ci "Failed to allocate irq %u: %d\n", 82462306a36Sopenharmony_ci pci_irq_vector(pdev, 1), error); 82562306a36Sopenharmony_ci goto err_free_irq; 82662306a36Sopenharmony_ci } 82762306a36Sopenharmony_ci if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) { 82862306a36Sopenharmony_ci error = request_threaded_irq(pci_irq_vector(pdev, 2), 82962306a36Sopenharmony_ci NULL, 83062306a36Sopenharmony_ci vmci_interrupt_dma_datagram, 83162306a36Sopenharmony_ci 0, KBUILD_MODNAME, 83262306a36Sopenharmony_ci vmci_dev); 83362306a36Sopenharmony_ci if (error) { 83462306a36Sopenharmony_ci dev_err(&pdev->dev, 83562306a36Sopenharmony_ci "Failed to allocate irq %u: %d\n", 83662306a36Sopenharmony_ci pci_irq_vector(pdev, 2), error); 83762306a36Sopenharmony_ci goto err_free_bm_irq; 83862306a36Sopenharmony_ci } 83962306a36Sopenharmony_ci } 84062306a36Sopenharmony_ci } 84162306a36Sopenharmony_ci 84262306a36Sopenharmony_ci dev_dbg(&pdev->dev, "Registered device\n"); 84362306a36Sopenharmony_ci 84462306a36Sopenharmony_ci atomic_inc(&vmci_num_guest_devices); 84562306a36Sopenharmony_ci 84662306a36Sopenharmony_ci /* Enable specific interrupt bits. */ 84762306a36Sopenharmony_ci cmd = VMCI_IMR_DATAGRAM; 84862306a36Sopenharmony_ci if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) 84962306a36Sopenharmony_ci cmd |= VMCI_IMR_NOTIFICATION; 85062306a36Sopenharmony_ci if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) 85162306a36Sopenharmony_ci cmd |= VMCI_IMR_DMA_DATAGRAM; 85262306a36Sopenharmony_ci vmci_write_reg(vmci_dev, cmd, VMCI_IMR_ADDR); 85362306a36Sopenharmony_ci 85462306a36Sopenharmony_ci /* Enable interrupts. */ 85562306a36Sopenharmony_ci vmci_write_reg(vmci_dev, VMCI_CONTROL_INT_ENABLE, VMCI_CONTROL_ADDR); 85662306a36Sopenharmony_ci 85762306a36Sopenharmony_ci pci_set_drvdata(pdev, vmci_dev); 85862306a36Sopenharmony_ci 85962306a36Sopenharmony_ci vmci_call_vsock_callback(false); 86062306a36Sopenharmony_ci return 0; 86162306a36Sopenharmony_ci 86262306a36Sopenharmony_cierr_free_bm_irq: 86362306a36Sopenharmony_ci if (vmci_dev->exclusive_vectors) 86462306a36Sopenharmony_ci free_irq(pci_irq_vector(pdev, 1), vmci_dev); 86562306a36Sopenharmony_ci 86662306a36Sopenharmony_cierr_free_irq: 86762306a36Sopenharmony_ci free_irq(pci_irq_vector(pdev, 0), vmci_dev); 86862306a36Sopenharmony_ci 86962306a36Sopenharmony_cierr_disable_msi: 87062306a36Sopenharmony_ci pci_free_irq_vectors(pdev); 87162306a36Sopenharmony_ci 87262306a36Sopenharmony_cierr_unsubscribe_event: 87362306a36Sopenharmony_ci vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); 87462306a36Sopenharmony_ci if (vmci_err < VMCI_SUCCESS) 87562306a36Sopenharmony_ci dev_warn(&pdev->dev, 87662306a36Sopenharmony_ci "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", 87762306a36Sopenharmony_ci VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); 87862306a36Sopenharmony_ci 87962306a36Sopenharmony_cierr_remove_vmci_dev_g: 88062306a36Sopenharmony_ci spin_lock_irq(&vmci_dev_spinlock); 88162306a36Sopenharmony_ci vmci_pdev = NULL; 88262306a36Sopenharmony_ci vmci_dev_g = NULL; 88362306a36Sopenharmony_ci spin_unlock_irq(&vmci_dev_spinlock); 88462306a36Sopenharmony_ci 88562306a36Sopenharmony_cierr_free_notification_bitmap: 88662306a36Sopenharmony_ci if (vmci_dev->notification_bitmap) { 88762306a36Sopenharmony_ci vmci_write_reg(vmci_dev, VMCI_CONTROL_RESET, VMCI_CONTROL_ADDR); 88862306a36Sopenharmony_ci dma_free_coherent(&pdev->dev, PAGE_SIZE, 88962306a36Sopenharmony_ci vmci_dev->notification_bitmap, 89062306a36Sopenharmony_ci vmci_dev->notification_base); 89162306a36Sopenharmony_ci } 89262306a36Sopenharmony_ci 89362306a36Sopenharmony_cierr_free_data_buffers: 89462306a36Sopenharmony_ci vmci_free_dg_buffers(vmci_dev); 89562306a36Sopenharmony_ci 89662306a36Sopenharmony_ci /* The rest are managed resources and will be freed by PCI core */ 89762306a36Sopenharmony_ci return error; 89862306a36Sopenharmony_ci} 89962306a36Sopenharmony_ci 90062306a36Sopenharmony_cistatic void vmci_guest_remove_device(struct pci_dev *pdev) 90162306a36Sopenharmony_ci{ 90262306a36Sopenharmony_ci struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev); 90362306a36Sopenharmony_ci int vmci_err; 90462306a36Sopenharmony_ci 90562306a36Sopenharmony_ci dev_dbg(&pdev->dev, "Removing device\n"); 90662306a36Sopenharmony_ci 90762306a36Sopenharmony_ci atomic_dec(&vmci_num_guest_devices); 90862306a36Sopenharmony_ci 90962306a36Sopenharmony_ci vmci_qp_guest_endpoints_exit(); 91062306a36Sopenharmony_ci 91162306a36Sopenharmony_ci vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); 91262306a36Sopenharmony_ci if (vmci_err < VMCI_SUCCESS) 91362306a36Sopenharmony_ci dev_warn(&pdev->dev, 91462306a36Sopenharmony_ci "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", 91562306a36Sopenharmony_ci VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); 91662306a36Sopenharmony_ci 91762306a36Sopenharmony_ci spin_lock_irq(&vmci_dev_spinlock); 91862306a36Sopenharmony_ci vmci_dev_g = NULL; 91962306a36Sopenharmony_ci vmci_pdev = NULL; 92062306a36Sopenharmony_ci spin_unlock_irq(&vmci_dev_spinlock); 92162306a36Sopenharmony_ci 92262306a36Sopenharmony_ci dev_dbg(&pdev->dev, "Resetting vmci device\n"); 92362306a36Sopenharmony_ci vmci_write_reg(vmci_dev, VMCI_CONTROL_RESET, VMCI_CONTROL_ADDR); 92462306a36Sopenharmony_ci 92562306a36Sopenharmony_ci /* 92662306a36Sopenharmony_ci * Free IRQ and then disable MSI/MSI-X as appropriate. For 92762306a36Sopenharmony_ci * MSI-X, we might have multiple vectors, each with their own 92862306a36Sopenharmony_ci * IRQ, which we must free too. 92962306a36Sopenharmony_ci */ 93062306a36Sopenharmony_ci if (vmci_dev->exclusive_vectors) { 93162306a36Sopenharmony_ci free_irq(pci_irq_vector(pdev, 1), vmci_dev); 93262306a36Sopenharmony_ci if (vmci_dev->mmio_base != NULL) 93362306a36Sopenharmony_ci free_irq(pci_irq_vector(pdev, 2), vmci_dev); 93462306a36Sopenharmony_ci } 93562306a36Sopenharmony_ci free_irq(pci_irq_vector(pdev, 0), vmci_dev); 93662306a36Sopenharmony_ci pci_free_irq_vectors(pdev); 93762306a36Sopenharmony_ci 93862306a36Sopenharmony_ci if (vmci_dev->notification_bitmap) { 93962306a36Sopenharmony_ci /* 94062306a36Sopenharmony_ci * The device reset above cleared the bitmap state of the 94162306a36Sopenharmony_ci * device, so we can safely free it here. 94262306a36Sopenharmony_ci */ 94362306a36Sopenharmony_ci 94462306a36Sopenharmony_ci dma_free_coherent(&pdev->dev, PAGE_SIZE, 94562306a36Sopenharmony_ci vmci_dev->notification_bitmap, 94662306a36Sopenharmony_ci vmci_dev->notification_base); 94762306a36Sopenharmony_ci } 94862306a36Sopenharmony_ci 94962306a36Sopenharmony_ci vmci_free_dg_buffers(vmci_dev); 95062306a36Sopenharmony_ci 95162306a36Sopenharmony_ci if (vmci_dev->mmio_base != NULL) 95262306a36Sopenharmony_ci pci_iounmap(pdev, vmci_dev->mmio_base); 95362306a36Sopenharmony_ci 95462306a36Sopenharmony_ci /* The rest are managed resources and will be freed by PCI core */ 95562306a36Sopenharmony_ci} 95662306a36Sopenharmony_ci 95762306a36Sopenharmony_cistatic const struct pci_device_id vmci_ids[] = { 95862306a36Sopenharmony_ci { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), }, 95962306a36Sopenharmony_ci { 0 }, 96062306a36Sopenharmony_ci}; 96162306a36Sopenharmony_ciMODULE_DEVICE_TABLE(pci, vmci_ids); 96262306a36Sopenharmony_ci 96362306a36Sopenharmony_cistatic struct pci_driver vmci_guest_driver = { 96462306a36Sopenharmony_ci .name = KBUILD_MODNAME, 96562306a36Sopenharmony_ci .id_table = vmci_ids, 96662306a36Sopenharmony_ci .probe = vmci_guest_probe_device, 96762306a36Sopenharmony_ci .remove = vmci_guest_remove_device, 96862306a36Sopenharmony_ci}; 96962306a36Sopenharmony_ci 97062306a36Sopenharmony_ciint __init vmci_guest_init(void) 97162306a36Sopenharmony_ci{ 97262306a36Sopenharmony_ci return pci_register_driver(&vmci_guest_driver); 97362306a36Sopenharmony_ci} 97462306a36Sopenharmony_ci 97562306a36Sopenharmony_civoid __exit vmci_guest_exit(void) 97662306a36Sopenharmony_ci{ 97762306a36Sopenharmony_ci pci_unregister_driver(&vmci_guest_driver); 97862306a36Sopenharmony_ci} 979