1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Generic implementation of a polled input device 4 5 * Copyright (c) 2007 Dmitry Torokhov 6 */ 7 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10#include <linux/jiffies.h> 11#include <linux/slab.h> 12#include <linux/mutex.h> 13#include <linux/workqueue.h> 14#include <linux/module.h> 15#include <linux/input-polldev.h> 16 17MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>"); 18MODULE_DESCRIPTION("Generic implementation of a polled input device"); 19MODULE_LICENSE("GPL v2"); 20 21static void input_polldev_queue_work(struct input_polled_dev *dev) 22{ 23 unsigned long delay; 24 25 delay = msecs_to_jiffies(dev->poll_interval); 26 if (delay >= HZ) 27 delay = round_jiffies_relative(delay); 28 29 queue_delayed_work(system_freezable_wq, &dev->work, delay); 30} 31 32static void input_polled_device_work(struct work_struct *work) 33{ 34 struct input_polled_dev *dev = 35 container_of(work, struct input_polled_dev, work.work); 36 37 dev->poll(dev); 38 input_polldev_queue_work(dev); 39} 40 41static int input_open_polled_device(struct input_dev *input) 42{ 43 struct input_polled_dev *dev = input_get_drvdata(input); 44 45 if (dev->open) 46 dev->open(dev); 47 48 /* Only start polling if polling is enabled */ 49 if (dev->poll_interval > 0) { 50 dev->poll(dev); 51 input_polldev_queue_work(dev); 52 } 53 54 return 0; 55} 56 57static void input_close_polled_device(struct input_dev *input) 58{ 59 struct input_polled_dev *dev = input_get_drvdata(input); 60 61 cancel_delayed_work_sync(&dev->work); 62 63 if (dev->close) 64 dev->close(dev); 65} 66 67/* SYSFS interface */ 68 69static ssize_t input_polldev_get_poll(struct device *dev, 70 struct device_attribute *attr, char *buf) 71{ 72 struct input_polled_dev *polldev = dev_get_drvdata(dev); 73 74 return sprintf(buf, "%d\n", polldev->poll_interval); 75} 76 77static ssize_t input_polldev_set_poll(struct device *dev, 78 struct device_attribute *attr, const char *buf, 79 size_t count) 80{ 81 struct input_polled_dev *polldev = dev_get_drvdata(dev); 82 struct input_dev *input = polldev->input; 83 unsigned int interval; 84 int err; 85 86 err = kstrtouint(buf, 0, &interval); 87 if (err) 88 return err; 89 90 if (interval < polldev->poll_interval_min) 91 return -EINVAL; 92 93 if (interval > polldev->poll_interval_max) 94 return -EINVAL; 95 96 mutex_lock(&input->mutex); 97 98 polldev->poll_interval = interval; 99 100 if (input->users) { 101 cancel_delayed_work_sync(&polldev->work); 102 if (polldev->poll_interval > 0) 103 input_polldev_queue_work(polldev); 104 } 105 106 mutex_unlock(&input->mutex); 107 108 return count; 109} 110 111static DEVICE_ATTR(poll, S_IRUGO | S_IWUSR, input_polldev_get_poll, 112 input_polldev_set_poll); 113 114 115static ssize_t input_polldev_get_max(struct device *dev, 116 struct device_attribute *attr, char *buf) 117{ 118 struct input_polled_dev *polldev = dev_get_drvdata(dev); 119 120 return sprintf(buf, "%d\n", polldev->poll_interval_max); 121} 122 123static DEVICE_ATTR(max, S_IRUGO, input_polldev_get_max, NULL); 124 125static ssize_t input_polldev_get_min(struct device *dev, 126 struct device_attribute *attr, char *buf) 127{ 128 struct input_polled_dev *polldev = dev_get_drvdata(dev); 129 130 return sprintf(buf, "%d\n", polldev->poll_interval_min); 131} 132 133static DEVICE_ATTR(min, S_IRUGO, input_polldev_get_min, NULL); 134 135static struct attribute *sysfs_attrs[] = { 136 &dev_attr_poll.attr, 137 &dev_attr_max.attr, 138 &dev_attr_min.attr, 139 NULL 140}; 141 142static struct attribute_group input_polldev_attribute_group = { 143 .attrs = sysfs_attrs 144}; 145 146static const struct attribute_group *input_polldev_attribute_groups[] = { 147 &input_polldev_attribute_group, 148 NULL 149}; 150 151/** 152 * input_allocate_polled_device - allocate memory for polled device 153 * 154 * The function allocates memory for a polled device and also 155 * for an input device associated with this polled device. 156 */ 157struct input_polled_dev *input_allocate_polled_device(void) 158{ 159 struct input_polled_dev *dev; 160 161 dev = kzalloc(sizeof(struct input_polled_dev), GFP_KERNEL); 162 if (!dev) 163 return NULL; 164 165 dev->input = input_allocate_device(); 166 if (!dev->input) { 167 kfree(dev); 168 return NULL; 169 } 170 171 return dev; 172} 173EXPORT_SYMBOL(input_allocate_polled_device); 174 175struct input_polled_devres { 176 struct input_polled_dev *polldev; 177}; 178 179static int devm_input_polldev_match(struct device *dev, void *res, void *data) 180{ 181 struct input_polled_devres *devres = res; 182 183 return devres->polldev == data; 184} 185 186static void devm_input_polldev_release(struct device *dev, void *res) 187{ 188 struct input_polled_devres *devres = res; 189 struct input_polled_dev *polldev = devres->polldev; 190 191 dev_dbg(dev, "%s: dropping reference/freeing %s\n", 192 __func__, dev_name(&polldev->input->dev)); 193 194 input_put_device(polldev->input); 195 kfree(polldev); 196} 197 198static void devm_input_polldev_unregister(struct device *dev, void *res) 199{ 200 struct input_polled_devres *devres = res; 201 struct input_polled_dev *polldev = devres->polldev; 202 203 dev_dbg(dev, "%s: unregistering device %s\n", 204 __func__, dev_name(&polldev->input->dev)); 205 input_unregister_device(polldev->input); 206 207 /* 208 * Note that we are still holding extra reference to the input 209 * device so it will stick around until devm_input_polldev_release() 210 * is called. 211 */ 212} 213 214/** 215 * devm_input_allocate_polled_device - allocate managed polled device 216 * @dev: device owning the polled device being created 217 * 218 * Returns prepared &struct input_polled_dev or %NULL. 219 * 220 * Managed polled input devices do not need to be explicitly unregistered 221 * or freed as it will be done automatically when owner device unbinds 222 * from * its driver (or binding fails). Once such managed polled device 223 * is allocated, it is ready to be set up and registered in the same 224 * fashion as regular polled input devices (using 225 * input_register_polled_device() function). 226 * 227 * If you want to manually unregister and free such managed polled devices, 228 * it can be still done by calling input_unregister_polled_device() and 229 * input_free_polled_device(), although it is rarely needed. 230 * 231 * NOTE: the owner device is set up as parent of input device and users 232 * should not override it. 233 */ 234struct input_polled_dev *devm_input_allocate_polled_device(struct device *dev) 235{ 236 struct input_polled_dev *polldev; 237 struct input_polled_devres *devres; 238 239 devres = devres_alloc(devm_input_polldev_release, sizeof(*devres), 240 GFP_KERNEL); 241 if (!devres) 242 return NULL; 243 244 polldev = input_allocate_polled_device(); 245 if (!polldev) { 246 devres_free(devres); 247 return NULL; 248 } 249 250 polldev->input->dev.parent = dev; 251 polldev->devres_managed = true; 252 253 devres->polldev = polldev; 254 devres_add(dev, devres); 255 256 return polldev; 257} 258EXPORT_SYMBOL(devm_input_allocate_polled_device); 259 260/** 261 * input_free_polled_device - free memory allocated for polled device 262 * @dev: device to free 263 * 264 * The function frees memory allocated for polling device and drops 265 * reference to the associated input device. 266 */ 267void input_free_polled_device(struct input_polled_dev *dev) 268{ 269 if (dev) { 270 if (dev->devres_managed) 271 WARN_ON(devres_destroy(dev->input->dev.parent, 272 devm_input_polldev_release, 273 devm_input_polldev_match, 274 dev)); 275 input_put_device(dev->input); 276 kfree(dev); 277 } 278} 279EXPORT_SYMBOL(input_free_polled_device); 280 281/** 282 * input_register_polled_device - register polled device 283 * @dev: device to register 284 * 285 * The function registers previously initialized polled input device 286 * with input layer. The device should be allocated with call to 287 * input_allocate_polled_device(). Callers should also set up poll() 288 * method and set up capabilities (id, name, phys, bits) of the 289 * corresponding input_dev structure. 290 */ 291int input_register_polled_device(struct input_polled_dev *dev) 292{ 293 struct input_polled_devres *devres = NULL; 294 struct input_dev *input = dev->input; 295 int error; 296 297 if (dev->devres_managed) { 298 devres = devres_alloc(devm_input_polldev_unregister, 299 sizeof(*devres), GFP_KERNEL); 300 if (!devres) 301 return -ENOMEM; 302 303 devres->polldev = dev; 304 } 305 306 input_set_drvdata(input, dev); 307 INIT_DELAYED_WORK(&dev->work, input_polled_device_work); 308 309 if (!dev->poll_interval) 310 dev->poll_interval = 500; 311 if (!dev->poll_interval_max) 312 dev->poll_interval_max = dev->poll_interval; 313 314 input->open = input_open_polled_device; 315 input->close = input_close_polled_device; 316 317 input->dev.groups = input_polldev_attribute_groups; 318 319 error = input_register_device(input); 320 if (error) { 321 devres_free(devres); 322 return error; 323 } 324 325 /* 326 * Take extra reference to the underlying input device so 327 * that it survives call to input_unregister_polled_device() 328 * and is deleted only after input_free_polled_device() 329 * has been invoked. This is needed to ease task of freeing 330 * sparse keymaps. 331 */ 332 input_get_device(input); 333 334 if (dev->devres_managed) { 335 dev_dbg(input->dev.parent, "%s: registering %s with devres.\n", 336 __func__, dev_name(&input->dev)); 337 devres_add(input->dev.parent, devres); 338 } 339 340 return 0; 341} 342EXPORT_SYMBOL(input_register_polled_device); 343 344/** 345 * input_unregister_polled_device - unregister polled device 346 * @dev: device to unregister 347 * 348 * The function unregisters previously registered polled input 349 * device from input layer. Polling is stopped and device is 350 * ready to be freed with call to input_free_polled_device(). 351 */ 352void input_unregister_polled_device(struct input_polled_dev *dev) 353{ 354 if (dev->devres_managed) 355 WARN_ON(devres_destroy(dev->input->dev.parent, 356 devm_input_polldev_unregister, 357 devm_input_polldev_match, 358 dev)); 359 360 input_unregister_device(dev->input); 361} 362EXPORT_SYMBOL(input_unregister_polled_device); 363