1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * drivers/uio/uio_dmem_genirq.c 4 * 5 * Userspace I/O platform driver with generic IRQ handling code. 6 * 7 * Copyright (C) 2012 Damian Hobson-Garcia 8 * 9 * Based on uio_pdrv_genirq.c by Magnus Damm 10 */ 11 12#include <linux/platform_device.h> 13#include <linux/uio_driver.h> 14#include <linux/spinlock.h> 15#include <linux/bitops.h> 16#include <linux/module.h> 17#include <linux/interrupt.h> 18#include <linux/platform_data/uio_dmem_genirq.h> 19#include <linux/stringify.h> 20#include <linux/pm_runtime.h> 21#include <linux/dma-mapping.h> 22#include <linux/slab.h> 23#include <linux/irq.h> 24 25#include <linux/of.h> 26#include <linux/of_platform.h> 27#include <linux/of_address.h> 28 29#define DRIVER_NAME "uio_dmem_genirq" 30#define DMEM_MAP_ERROR (~0) 31 32struct uio_dmem_genirq_platdata { 33 struct uio_info *uioinfo; 34 spinlock_t lock; 35 unsigned long flags; 36 struct platform_device *pdev; 37 unsigned int dmem_region_start; 38 unsigned int num_dmem_regions; 39 void *dmem_region_vaddr[MAX_UIO_MAPS]; 40 struct mutex alloc_lock; 41 unsigned int refcnt; 42}; 43 44static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode) 45{ 46 struct uio_dmem_genirq_platdata *priv = info->priv; 47 struct uio_mem *uiomem; 48 int dmem_region = priv->dmem_region_start; 49 50 uiomem = &priv->uioinfo->mem[priv->dmem_region_start]; 51 52 mutex_lock(&priv->alloc_lock); 53 while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) { 54 void *addr; 55 if (!uiomem->size) 56 break; 57 58 addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size, 59 (dma_addr_t *)&uiomem->addr, GFP_KERNEL); 60 if (!addr) { 61 uiomem->addr = DMEM_MAP_ERROR; 62 } 63 priv->dmem_region_vaddr[dmem_region++] = addr; 64 ++uiomem; 65 } 66 priv->refcnt++; 67 68 mutex_unlock(&priv->alloc_lock); 69 /* Wait until the Runtime PM code has woken up the device */ 70 pm_runtime_get_sync(&priv->pdev->dev); 71 return 0; 72} 73 74static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode) 75{ 76 struct uio_dmem_genirq_platdata *priv = info->priv; 77 struct uio_mem *uiomem; 78 int dmem_region = priv->dmem_region_start; 79 80 /* Tell the Runtime PM code that the device has become idle */ 81 pm_runtime_put_sync(&priv->pdev->dev); 82 83 uiomem = &priv->uioinfo->mem[priv->dmem_region_start]; 84 85 mutex_lock(&priv->alloc_lock); 86 87 priv->refcnt--; 88 while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) { 89 if (!uiomem->size) 90 break; 91 if (priv->dmem_region_vaddr[dmem_region]) { 92 dma_free_coherent(&priv->pdev->dev, uiomem->size, 93 priv->dmem_region_vaddr[dmem_region], 94 uiomem->addr); 95 } 96 uiomem->addr = DMEM_MAP_ERROR; 97 ++dmem_region; 98 ++uiomem; 99 } 100 101 mutex_unlock(&priv->alloc_lock); 102 return 0; 103} 104 105static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info) 106{ 107 struct uio_dmem_genirq_platdata *priv = dev_info->priv; 108 109 /* Just disable the interrupt in the interrupt controller, and 110 * remember the state so we can allow user space to enable it later. 111 */ 112 113 spin_lock(&priv->lock); 114 if (!test_and_set_bit(0, &priv->flags)) 115 disable_irq_nosync(irq); 116 spin_unlock(&priv->lock); 117 118 return IRQ_HANDLED; 119} 120 121static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) 122{ 123 struct uio_dmem_genirq_platdata *priv = dev_info->priv; 124 unsigned long flags; 125 126 /* Allow user space to enable and disable the interrupt 127 * in the interrupt controller, but keep track of the 128 * state to prevent per-irq depth damage. 129 * 130 * Serialize this operation to support multiple tasks and concurrency 131 * with irq handler on SMP systems. 132 */ 133 134 spin_lock_irqsave(&priv->lock, flags); 135 if (irq_on) { 136 if (test_and_clear_bit(0, &priv->flags)) 137 enable_irq(dev_info->irq); 138 } else { 139 if (!test_and_set_bit(0, &priv->flags)) 140 disable_irq_nosync(dev_info->irq); 141 } 142 spin_unlock_irqrestore(&priv->lock, flags); 143 144 return 0; 145} 146 147static int uio_dmem_genirq_probe(struct platform_device *pdev) 148{ 149 struct uio_dmem_genirq_pdata *pdata = dev_get_platdata(&pdev->dev); 150 struct uio_info *uioinfo = &pdata->uioinfo; 151 struct uio_dmem_genirq_platdata *priv; 152 struct uio_mem *uiomem; 153 int ret = -EINVAL; 154 int i; 155 156 if (pdev->dev.of_node) { 157 /* alloc uioinfo for one device */ 158 uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL); 159 if (!uioinfo) { 160 ret = -ENOMEM; 161 dev_err(&pdev->dev, "unable to kmalloc\n"); 162 goto bad2; 163 } 164 uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn", 165 pdev->dev.of_node); 166 uioinfo->version = "devicetree"; 167 } 168 169 if (!uioinfo || !uioinfo->name || !uioinfo->version) { 170 dev_err(&pdev->dev, "missing platform_data\n"); 171 goto bad0; 172 } 173 174 if (uioinfo->handler || uioinfo->irqcontrol || 175 uioinfo->irq_flags & IRQF_SHARED) { 176 dev_err(&pdev->dev, "interrupt configuration error\n"); 177 goto bad0; 178 } 179 180 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 181 if (!priv) { 182 ret = -ENOMEM; 183 dev_err(&pdev->dev, "unable to kmalloc\n"); 184 goto bad0; 185 } 186 187 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 188 189 priv->uioinfo = uioinfo; 190 spin_lock_init(&priv->lock); 191 priv->flags = 0; /* interrupt is enabled to begin with */ 192 priv->pdev = pdev; 193 mutex_init(&priv->alloc_lock); 194 195 if (!uioinfo->irq) { 196 /* Multiple IRQs are not supported */ 197 ret = platform_get_irq(pdev, 0); 198 if (ret == -ENXIO && pdev->dev.of_node) 199 ret = UIO_IRQ_NONE; 200 else if (ret < 0) 201 goto bad1; 202 uioinfo->irq = ret; 203 } 204 205 if (uioinfo->irq) { 206 struct irq_data *irq_data = irq_get_irq_data(uioinfo->irq); 207 208 /* 209 * If a level interrupt, dont do lazy disable. Otherwise the 210 * irq will fire again since clearing of the actual cause, on 211 * device level, is done in userspace 212 * irqd_is_level_type() isn't used since isn't valid until 213 * irq is configured. 214 */ 215 if (irq_data && 216 irqd_get_trigger_type(irq_data) & IRQ_TYPE_LEVEL_MASK) { 217 dev_dbg(&pdev->dev, "disable lazy unmask\n"); 218 irq_set_status_flags(uioinfo->irq, IRQ_DISABLE_UNLAZY); 219 } 220 } 221 222 uiomem = &uioinfo->mem[0]; 223 224 for (i = 0; i < pdev->num_resources; ++i) { 225 struct resource *r = &pdev->resource[i]; 226 227 if (r->flags != IORESOURCE_MEM) 228 continue; 229 230 if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) { 231 dev_warn(&pdev->dev, "device has more than " 232 __stringify(MAX_UIO_MAPS) 233 " I/O memory resources.\n"); 234 break; 235 } 236 237 uiomem->memtype = UIO_MEM_PHYS; 238 uiomem->addr = r->start; 239 uiomem->size = resource_size(r); 240 ++uiomem; 241 } 242 243 priv->dmem_region_start = uiomem - &uioinfo->mem[0]; 244 priv->num_dmem_regions = pdata->num_dynamic_regions; 245 246 for (i = 0; i < pdata->num_dynamic_regions; ++i) { 247 if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) { 248 dev_warn(&pdev->dev, "device has more than " 249 __stringify(MAX_UIO_MAPS) 250 " dynamic and fixed memory regions.\n"); 251 break; 252 } 253 uiomem->memtype = UIO_MEM_PHYS; 254 uiomem->addr = DMEM_MAP_ERROR; 255 uiomem->size = pdata->dynamic_region_sizes[i]; 256 ++uiomem; 257 } 258 259 while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) { 260 uiomem->size = 0; 261 ++uiomem; 262 } 263 264 /* This driver requires no hardware specific kernel code to handle 265 * interrupts. Instead, the interrupt handler simply disables the 266 * interrupt in the interrupt controller. User space is responsible 267 * for performing hardware specific acknowledge and re-enabling of 268 * the interrupt in the interrupt controller. 269 * 270 * Interrupt sharing is not supported. 271 */ 272 273 uioinfo->handler = uio_dmem_genirq_handler; 274 uioinfo->irqcontrol = uio_dmem_genirq_irqcontrol; 275 uioinfo->open = uio_dmem_genirq_open; 276 uioinfo->release = uio_dmem_genirq_release; 277 uioinfo->priv = priv; 278 279 /* Enable Runtime PM for this device: 280 * The device starts in suspended state to allow the hardware to be 281 * turned off by default. The Runtime PM bus code should power on the 282 * hardware and enable clocks at open(). 283 */ 284 pm_runtime_enable(&pdev->dev); 285 286 ret = uio_register_device(&pdev->dev, priv->uioinfo); 287 if (ret) { 288 dev_err(&pdev->dev, "unable to register uio device\n"); 289 pm_runtime_disable(&pdev->dev); 290 goto bad1; 291 } 292 293 platform_set_drvdata(pdev, priv); 294 return 0; 295 bad1: 296 kfree(priv); 297 bad0: 298 /* kfree uioinfo for OF */ 299 if (pdev->dev.of_node) 300 kfree(uioinfo); 301 bad2: 302 return ret; 303} 304 305static int uio_dmem_genirq_remove(struct platform_device *pdev) 306{ 307 struct uio_dmem_genirq_platdata *priv = platform_get_drvdata(pdev); 308 309 uio_unregister_device(priv->uioinfo); 310 pm_runtime_disable(&pdev->dev); 311 312 priv->uioinfo->handler = NULL; 313 priv->uioinfo->irqcontrol = NULL; 314 315 /* kfree uioinfo for OF */ 316 if (pdev->dev.of_node) 317 kfree(priv->uioinfo); 318 319 kfree(priv); 320 return 0; 321} 322 323static int uio_dmem_genirq_runtime_nop(struct device *dev) 324{ 325 /* Runtime PM callback shared between ->runtime_suspend() 326 * and ->runtime_resume(). Simply returns success. 327 * 328 * In this driver pm_runtime_get_sync() and pm_runtime_put_sync() 329 * are used at open() and release() time. This allows the 330 * Runtime PM code to turn off power to the device while the 331 * device is unused, ie before open() and after release(). 332 * 333 * This Runtime PM callback does not need to save or restore 334 * any registers since user space is responsbile for hardware 335 * register reinitialization after open(). 336 */ 337 return 0; 338} 339 340static const struct dev_pm_ops uio_dmem_genirq_dev_pm_ops = { 341 .runtime_suspend = uio_dmem_genirq_runtime_nop, 342 .runtime_resume = uio_dmem_genirq_runtime_nop, 343}; 344 345#ifdef CONFIG_OF 346static const struct of_device_id uio_of_genirq_match[] = { 347 { /* empty for now */ }, 348}; 349MODULE_DEVICE_TABLE(of, uio_of_genirq_match); 350#endif 351 352static struct platform_driver uio_dmem_genirq = { 353 .probe = uio_dmem_genirq_probe, 354 .remove = uio_dmem_genirq_remove, 355 .driver = { 356 .name = DRIVER_NAME, 357 .pm = &uio_dmem_genirq_dev_pm_ops, 358 .of_match_table = of_match_ptr(uio_of_genirq_match), 359 }, 360}; 361 362module_platform_driver(uio_dmem_genirq); 363 364MODULE_AUTHOR("Damian Hobson-Garcia"); 365MODULE_DESCRIPTION("Userspace I/O platform driver with dynamic memory."); 366MODULE_LICENSE("GPL v2"); 367MODULE_ALIAS("platform:" DRIVER_NAME); 368