1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Analog Devices Generic AXI ADC IP core 4 * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip 5 * 6 * Copyright 2012-2020 Analog Devices Inc. 7 */ 8 9#include <linux/bitfield.h> 10#include <linux/clk.h> 11#include <linux/io.h> 12#include <linux/delay.h> 13#include <linux/module.h> 14#include <linux/of_device.h> 15#include <linux/platform_device.h> 16#include <linux/slab.h> 17 18#include <linux/iio/iio.h> 19#include <linux/iio/sysfs.h> 20#include <linux/iio/buffer.h> 21#include <linux/iio/buffer-dmaengine.h> 22 23#include <linux/fpga/adi-axi-common.h> 24#include <linux/iio/adc/adi-axi-adc.h> 25 26/** 27 * Register definitions: 28 * https://wiki.analog.com/resources/fpga/docs/axi_adc_ip#register_map 29 */ 30 31/* ADC controls */ 32 33#define ADI_AXI_REG_RSTN 0x0040 34#define ADI_AXI_REG_RSTN_CE_N BIT(2) 35#define ADI_AXI_REG_RSTN_MMCM_RSTN BIT(1) 36#define ADI_AXI_REG_RSTN_RSTN BIT(0) 37 38/* ADC Channel controls */ 39 40#define ADI_AXI_REG_CHAN_CTRL(c) (0x0400 + (c) * 0x40) 41#define ADI_AXI_REG_CHAN_CTRL_LB_OWR BIT(11) 42#define ADI_AXI_REG_CHAN_CTRL_PN_SEL_OWR BIT(10) 43#define ADI_AXI_REG_CHAN_CTRL_IQCOR_EN BIT(9) 44#define ADI_AXI_REG_CHAN_CTRL_DCFILT_EN BIT(8) 45#define ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT BIT(6) 46#define ADI_AXI_REG_CHAN_CTRL_FMT_TYPE BIT(5) 47#define ADI_AXI_REG_CHAN_CTRL_FMT_EN BIT(4) 48#define ADI_AXI_REG_CHAN_CTRL_PN_TYPE_OWR BIT(1) 49#define ADI_AXI_REG_CHAN_CTRL_ENABLE BIT(0) 50 51#define ADI_AXI_REG_CHAN_CTRL_DEFAULTS \ 52 (ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT | \ 53 ADI_AXI_REG_CHAN_CTRL_FMT_EN | \ 54 ADI_AXI_REG_CHAN_CTRL_ENABLE) 55 56struct adi_axi_adc_core_info { 57 unsigned int version; 58}; 59 60struct adi_axi_adc_state { 61 struct mutex lock; 62 63 struct adi_axi_adc_client *client; 64 void __iomem *regs; 65}; 66 67struct adi_axi_adc_client { 68 struct list_head entry; 69 struct adi_axi_adc_conv conv; 70 struct adi_axi_adc_state *state; 71 struct device *dev; 72 const struct adi_axi_adc_core_info *info; 73}; 74 75static LIST_HEAD(registered_clients); 76static DEFINE_MUTEX(registered_clients_lock); 77 78static struct adi_axi_adc_client *conv_to_client(struct adi_axi_adc_conv *conv) 79{ 80 return container_of(conv, struct adi_axi_adc_client, conv); 81} 82 83void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv) 84{ 85 struct adi_axi_adc_client *cl = conv_to_client(conv); 86 87 return (char *)cl + ALIGN(sizeof(struct adi_axi_adc_client), IIO_ALIGN); 88} 89EXPORT_SYMBOL_GPL(adi_axi_adc_conv_priv); 90 91static void adi_axi_adc_write(struct adi_axi_adc_state *st, 92 unsigned int reg, 93 unsigned int val) 94{ 95 iowrite32(val, st->regs + reg); 96} 97 98static unsigned int adi_axi_adc_read(struct adi_axi_adc_state *st, 99 unsigned int reg) 100{ 101 return ioread32(st->regs + reg); 102} 103 104static int adi_axi_adc_config_dma_buffer(struct device *dev, 105 struct iio_dev *indio_dev) 106{ 107 struct iio_buffer *buffer; 108 const char *dma_name; 109 110 if (!device_property_present(dev, "dmas")) 111 return 0; 112 113 if (device_property_read_string(dev, "dma-names", &dma_name)) 114 dma_name = "rx"; 115 116 buffer = devm_iio_dmaengine_buffer_alloc(indio_dev->dev.parent, 117 dma_name); 118 if (IS_ERR(buffer)) 119 return PTR_ERR(buffer); 120 121 indio_dev->modes |= INDIO_BUFFER_HARDWARE; 122 iio_device_attach_buffer(indio_dev, buffer); 123 124 return 0; 125} 126 127static int adi_axi_adc_read_raw(struct iio_dev *indio_dev, 128 struct iio_chan_spec const *chan, 129 int *val, int *val2, long mask) 130{ 131 struct adi_axi_adc_state *st = iio_priv(indio_dev); 132 struct adi_axi_adc_conv *conv = &st->client->conv; 133 134 if (!conv->read_raw) 135 return -EOPNOTSUPP; 136 137 return conv->read_raw(conv, chan, val, val2, mask); 138} 139 140static int adi_axi_adc_write_raw(struct iio_dev *indio_dev, 141 struct iio_chan_spec const *chan, 142 int val, int val2, long mask) 143{ 144 struct adi_axi_adc_state *st = iio_priv(indio_dev); 145 struct adi_axi_adc_conv *conv = &st->client->conv; 146 147 if (!conv->write_raw) 148 return -EOPNOTSUPP; 149 150 return conv->write_raw(conv, chan, val, val2, mask); 151} 152 153static int adi_axi_adc_read_avail(struct iio_dev *indio_dev, 154 struct iio_chan_spec const *chan, 155 const int **vals, int *type, int *length, 156 long mask) 157{ 158 struct adi_axi_adc_state *st = iio_priv(indio_dev); 159 struct adi_axi_adc_conv *conv = &st->client->conv; 160 161 if (!conv->read_avail) 162 return -EOPNOTSUPP; 163 164 return conv->read_avail(conv, chan, vals, type, length, mask); 165} 166 167static int adi_axi_adc_update_scan_mode(struct iio_dev *indio_dev, 168 const unsigned long *scan_mask) 169{ 170 struct adi_axi_adc_state *st = iio_priv(indio_dev); 171 struct adi_axi_adc_conv *conv = &st->client->conv; 172 unsigned int i, ctrl; 173 174 for (i = 0; i < conv->chip_info->num_channels; i++) { 175 ctrl = adi_axi_adc_read(st, ADI_AXI_REG_CHAN_CTRL(i)); 176 177 if (test_bit(i, scan_mask)) 178 ctrl |= ADI_AXI_REG_CHAN_CTRL_ENABLE; 179 else 180 ctrl &= ~ADI_AXI_REG_CHAN_CTRL_ENABLE; 181 182 adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i), ctrl); 183 } 184 185 return 0; 186} 187 188static struct adi_axi_adc_conv *adi_axi_adc_conv_register(struct device *dev, 189 size_t sizeof_priv) 190{ 191 struct adi_axi_adc_client *cl; 192 size_t alloc_size; 193 194 alloc_size = ALIGN(sizeof(struct adi_axi_adc_client), IIO_ALIGN); 195 if (sizeof_priv) 196 alloc_size += ALIGN(sizeof_priv, IIO_ALIGN); 197 198 cl = kzalloc(alloc_size, GFP_KERNEL); 199 if (!cl) 200 return ERR_PTR(-ENOMEM); 201 202 mutex_lock(®istered_clients_lock); 203 204 cl->dev = get_device(dev); 205 206 list_add_tail(&cl->entry, ®istered_clients); 207 208 mutex_unlock(®istered_clients_lock); 209 210 return &cl->conv; 211} 212 213static void adi_axi_adc_conv_unregister(struct adi_axi_adc_conv *conv) 214{ 215 struct adi_axi_adc_client *cl = conv_to_client(conv); 216 217 mutex_lock(®istered_clients_lock); 218 219 list_del(&cl->entry); 220 put_device(cl->dev); 221 222 mutex_unlock(®istered_clients_lock); 223 224 kfree(cl); 225} 226 227static void devm_adi_axi_adc_conv_release(struct device *dev, void *res) 228{ 229 adi_axi_adc_conv_unregister(*(struct adi_axi_adc_conv **)res); 230} 231 232struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev, 233 size_t sizeof_priv) 234{ 235 struct adi_axi_adc_conv **ptr, *conv; 236 237 ptr = devres_alloc(devm_adi_axi_adc_conv_release, sizeof(*ptr), 238 GFP_KERNEL); 239 if (!ptr) 240 return ERR_PTR(-ENOMEM); 241 242 conv = adi_axi_adc_conv_register(dev, sizeof_priv); 243 if (IS_ERR(conv)) { 244 devres_free(ptr); 245 return ERR_CAST(conv); 246 } 247 248 *ptr = conv; 249 devres_add(dev, ptr); 250 251 return conv; 252} 253EXPORT_SYMBOL_GPL(devm_adi_axi_adc_conv_register); 254 255static const struct iio_info adi_axi_adc_info = { 256 .read_raw = &adi_axi_adc_read_raw, 257 .write_raw = &adi_axi_adc_write_raw, 258 .update_scan_mode = &adi_axi_adc_update_scan_mode, 259 .read_avail = &adi_axi_adc_read_avail, 260}; 261 262static const struct adi_axi_adc_core_info adi_axi_adc_10_0_a_info = { 263 .version = ADI_AXI_PCORE_VER(10, 0, 'a'), 264}; 265 266static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev) 267{ 268 const struct adi_axi_adc_core_info *info; 269 struct adi_axi_adc_client *cl; 270 struct device_node *cln; 271 272 info = of_device_get_match_data(dev); 273 if (!info) 274 return ERR_PTR(-ENODEV); 275 276 cln = of_parse_phandle(dev->of_node, "adi,adc-dev", 0); 277 if (!cln) { 278 dev_err(dev, "No 'adi,adc-dev' node defined\n"); 279 return ERR_PTR(-ENODEV); 280 } 281 282 mutex_lock(®istered_clients_lock); 283 284 list_for_each_entry(cl, ®istered_clients, entry) { 285 if (!cl->dev) 286 continue; 287 288 if (cl->dev->of_node != cln) 289 continue; 290 291 if (!try_module_get(cl->dev->driver->owner)) { 292 mutex_unlock(®istered_clients_lock); 293 of_node_put(cln); 294 return ERR_PTR(-ENODEV); 295 } 296 297 get_device(cl->dev); 298 cl->info = info; 299 mutex_unlock(®istered_clients_lock); 300 of_node_put(cln); 301 return cl; 302 } 303 304 mutex_unlock(®istered_clients_lock); 305 of_node_put(cln); 306 307 return ERR_PTR(-EPROBE_DEFER); 308} 309 310static int adi_axi_adc_setup_channels(struct device *dev, 311 struct adi_axi_adc_state *st) 312{ 313 struct adi_axi_adc_conv *conv = &st->client->conv; 314 int i, ret; 315 316 if (conv->preenable_setup) { 317 ret = conv->preenable_setup(conv); 318 if (ret) 319 return ret; 320 } 321 322 for (i = 0; i < conv->chip_info->num_channels; i++) { 323 adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i), 324 ADI_AXI_REG_CHAN_CTRL_DEFAULTS); 325 } 326 327 return 0; 328} 329 330static void axi_adc_reset(struct adi_axi_adc_state *st) 331{ 332 adi_axi_adc_write(st, ADI_AXI_REG_RSTN, 0); 333 mdelay(10); 334 adi_axi_adc_write(st, ADI_AXI_REG_RSTN, ADI_AXI_REG_RSTN_MMCM_RSTN); 335 mdelay(10); 336 adi_axi_adc_write(st, ADI_AXI_REG_RSTN, 337 ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN); 338} 339 340static void adi_axi_adc_cleanup(void *data) 341{ 342 struct adi_axi_adc_client *cl = data; 343 344 put_device(cl->dev); 345 module_put(cl->dev->driver->owner); 346} 347 348static int adi_axi_adc_probe(struct platform_device *pdev) 349{ 350 struct adi_axi_adc_conv *conv; 351 struct iio_dev *indio_dev; 352 struct adi_axi_adc_client *cl; 353 struct adi_axi_adc_state *st; 354 unsigned int ver; 355 int ret; 356 357 cl = adi_axi_adc_attach_client(&pdev->dev); 358 if (IS_ERR(cl)) 359 return PTR_ERR(cl); 360 361 ret = devm_add_action_or_reset(&pdev->dev, adi_axi_adc_cleanup, cl); 362 if (ret) 363 return ret; 364 365 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st)); 366 if (indio_dev == NULL) 367 return -ENOMEM; 368 369 st = iio_priv(indio_dev); 370 st->client = cl; 371 cl->state = st; 372 mutex_init(&st->lock); 373 374 st->regs = devm_platform_ioremap_resource(pdev, 0); 375 if (IS_ERR(st->regs)) 376 return PTR_ERR(st->regs); 377 378 conv = &st->client->conv; 379 380 axi_adc_reset(st); 381 382 ver = adi_axi_adc_read(st, ADI_AXI_REG_VERSION); 383 384 if (cl->info->version > ver) { 385 dev_err(&pdev->dev, 386 "IP core version is too old. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n", 387 ADI_AXI_PCORE_VER_MAJOR(cl->info->version), 388 ADI_AXI_PCORE_VER_MINOR(cl->info->version), 389 ADI_AXI_PCORE_VER_PATCH(cl->info->version), 390 ADI_AXI_PCORE_VER_MAJOR(ver), 391 ADI_AXI_PCORE_VER_MINOR(ver), 392 ADI_AXI_PCORE_VER_PATCH(ver)); 393 return -ENODEV; 394 } 395 396 indio_dev->info = &adi_axi_adc_info; 397 indio_dev->name = "adi-axi-adc"; 398 indio_dev->modes = INDIO_DIRECT_MODE; 399 indio_dev->num_channels = conv->chip_info->num_channels; 400 indio_dev->channels = conv->chip_info->channels; 401 402 ret = adi_axi_adc_config_dma_buffer(&pdev->dev, indio_dev); 403 if (ret) 404 return ret; 405 406 ret = adi_axi_adc_setup_channels(&pdev->dev, st); 407 if (ret) 408 return ret; 409 410 ret = devm_iio_device_register(&pdev->dev, indio_dev); 411 if (ret) 412 return ret; 413 414 dev_info(&pdev->dev, "AXI ADC IP core (%d.%.2d.%c) probed\n", 415 ADI_AXI_PCORE_VER_MAJOR(ver), 416 ADI_AXI_PCORE_VER_MINOR(ver), 417 ADI_AXI_PCORE_VER_PATCH(ver)); 418 419 return 0; 420} 421 422/* Match table for of_platform binding */ 423static const struct of_device_id adi_axi_adc_of_match[] = { 424 { .compatible = "adi,axi-adc-10.0.a", .data = &adi_axi_adc_10_0_a_info }, 425 { /* end of list */ } 426}; 427MODULE_DEVICE_TABLE(of, adi_axi_adc_of_match); 428 429static struct platform_driver adi_axi_adc_driver = { 430 .driver = { 431 .name = KBUILD_MODNAME, 432 .of_match_table = adi_axi_adc_of_match, 433 }, 434 .probe = adi_axi_adc_probe, 435}; 436module_platform_driver(adi_axi_adc_driver); 437 438MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>"); 439MODULE_DESCRIPTION("Analog Devices Generic AXI ADC IP core driver"); 440MODULE_LICENSE("GPL v2"); 441