1// SPDX-License-Identifier: GPL-2.0-only 2// 3// Copyright(c) 2020 Intel Corporation. All rights reserved. 4// 5// Author: Cezary Rojewski <cezary.rojewski@intel.com> 6// 7 8#include <linux/devcoredump.h> 9#include <linux/dma-mapping.h> 10#include <linux/firmware.h> 11#include <linux/pci.h> 12#include <linux/pxa2xx_ssp.h> 13#include "core.h" 14#include "messages.h" 15#include "registers.h" 16 17static bool catpt_dma_filter(struct dma_chan *chan, void *param) 18{ 19 return param == chan->device->dev; 20} 21 22/* 23 * Either engine 0 or 1 can be used for image loading. 24 * Align with Windows driver equivalent and stick to engine 1. 25 */ 26#define CATPT_DMA_DEVID 1 27#define CATPT_DMA_DSP_ADDR_MASK GENMASK(31, 20) 28 29struct dma_chan *catpt_dma_request_config_chan(struct catpt_dev *cdev) 30{ 31 struct dma_slave_config config; 32 struct dma_chan *chan; 33 dma_cap_mask_t mask; 34 int ret; 35 36 dma_cap_zero(mask); 37 dma_cap_set(DMA_MEMCPY, mask); 38 39 chan = dma_request_channel(mask, catpt_dma_filter, cdev->dev); 40 if (!chan) { 41 dev_err(cdev->dev, "request channel failed\n"); 42 return ERR_PTR(-ENODEV); 43 } 44 45 memset(&config, 0, sizeof(config)); 46 config.direction = DMA_MEM_TO_DEV; 47 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 48 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 49 config.src_maxburst = 16; 50 config.dst_maxburst = 16; 51 52 ret = dmaengine_slave_config(chan, &config); 53 if (ret) { 54 dev_err(cdev->dev, "slave config failed: %d\n", ret); 55 dma_release_channel(chan); 56 return ERR_PTR(ret); 57 } 58 59 return chan; 60} 61 62static int catpt_dma_memcpy(struct catpt_dev *cdev, struct dma_chan *chan, 63 dma_addr_t dst_addr, dma_addr_t src_addr, 64 size_t size) 65{ 66 struct dma_async_tx_descriptor *desc; 67 enum dma_status status; 68 int ret; 69 70 desc = dmaengine_prep_dma_memcpy(chan, dst_addr, src_addr, size, 71 DMA_CTRL_ACK); 72 if (!desc) { 73 dev_err(cdev->dev, "prep dma memcpy failed\n"); 74 return -EIO; 75 } 76 77 /* enable demand mode for dma channel */ 78 catpt_updatel_shim(cdev, HMDC, 79 CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id), 80 CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id)); 81 82 ret = dma_submit_error(dmaengine_submit(desc)); 83 if (ret) { 84 dev_err(cdev->dev, "submit tx failed: %d\n", ret); 85 goto clear_hdda; 86 } 87 88 status = dma_wait_for_async_tx(desc); 89 ret = (status == DMA_COMPLETE) ? 0 : -EPROTO; 90 91clear_hdda: 92 /* regardless of status, disable access to HOST memory in demand mode */ 93 catpt_updatel_shim(cdev, HMDC, 94 CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id), 0); 95 96 return ret; 97} 98 99int catpt_dma_memcpy_todsp(struct catpt_dev *cdev, struct dma_chan *chan, 100 dma_addr_t dst_addr, dma_addr_t src_addr, 101 size_t size) 102{ 103 return catpt_dma_memcpy(cdev, chan, dst_addr | CATPT_DMA_DSP_ADDR_MASK, 104 src_addr, size); 105} 106 107int catpt_dma_memcpy_fromdsp(struct catpt_dev *cdev, struct dma_chan *chan, 108 dma_addr_t dst_addr, dma_addr_t src_addr, 109 size_t size) 110{ 111 return catpt_dma_memcpy(cdev, chan, dst_addr, 112 src_addr | CATPT_DMA_DSP_ADDR_MASK, size); 113} 114 115int catpt_dmac_probe(struct catpt_dev *cdev) 116{ 117 struct dw_dma_chip *dmac; 118 int ret; 119 120 dmac = devm_kzalloc(cdev->dev, sizeof(*dmac), GFP_KERNEL); 121 if (!dmac) 122 return -ENOMEM; 123 124 dmac->regs = cdev->lpe_ba + cdev->spec->host_dma_offset[CATPT_DMA_DEVID]; 125 dmac->dev = cdev->dev; 126 dmac->irq = cdev->irq; 127 128 ret = dma_coerce_mask_and_coherent(cdev->dev, DMA_BIT_MASK(31)); 129 if (ret) 130 return ret; 131 /* 132 * Caller is responsible for putting device in D0 to allow 133 * for I/O and memory access before probing DW. 134 */ 135 ret = dw_dma_probe(dmac); 136 if (ret) 137 return ret; 138 139 cdev->dmac = dmac; 140 return 0; 141} 142 143void catpt_dmac_remove(struct catpt_dev *cdev) 144{ 145 /* 146 * As do_dma_remove() juggles with pm_runtime_get_xxx() and 147 * pm_runtime_put_xxx() while both ADSP and DW 'devices' are part of 148 * the same module, caller makes sure pm_runtime_disable() is invoked 149 * before removing DW to prevent postmortem resume and suspend. 150 */ 151 dw_dma_remove(cdev->dmac); 152} 153 154static void catpt_dsp_set_srampge(struct catpt_dev *cdev, struct resource *sram, 155 unsigned long mask, unsigned long new) 156{ 157 unsigned long old; 158 u32 off = sram->start; 159 u32 b = __ffs(mask); 160 161 old = catpt_readl_pci(cdev, VDRTCTL0) & mask; 162 dev_dbg(cdev->dev, "SRAMPGE [0x%08lx] 0x%08lx -> 0x%08lx", 163 mask, old, new); 164 165 if (old == new) 166 return; 167 168 catpt_updatel_pci(cdev, VDRTCTL0, mask, new); 169 /* wait for SRAM power gating to propagate */ 170 udelay(60); 171 172 /* 173 * Dummy read as the very first access after block enable 174 * to prevent byte loss in future operations. 175 */ 176 for_each_clear_bit_from(b, &new, fls_long(mask)) { 177 u8 buf[4]; 178 179 /* newly enabled: new bit=0 while old bit=1 */ 180 if (test_bit(b, &old)) { 181 dev_dbg(cdev->dev, "sanitize block %ld: off 0x%08x\n", 182 b - __ffs(mask), off); 183 memcpy_fromio(buf, cdev->lpe_ba + off, sizeof(buf)); 184 } 185 off += CATPT_MEMBLOCK_SIZE; 186 } 187} 188 189void catpt_dsp_update_srampge(struct catpt_dev *cdev, struct resource *sram, 190 unsigned long mask) 191{ 192 struct resource *res; 193 unsigned long new = 0; 194 195 /* flag all busy blocks */ 196 for (res = sram->child; res; res = res->sibling) { 197 u32 h, l; 198 199 h = (res->end - sram->start) / CATPT_MEMBLOCK_SIZE; 200 l = (res->start - sram->start) / CATPT_MEMBLOCK_SIZE; 201 new |= GENMASK(h, l); 202 } 203 204 /* offset value given mask's start and invert it as ON=b0 */ 205 new = ~(new << __ffs(mask)) & mask; 206 207 /* disable core clock gating */ 208 catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE, 0); 209 210 catpt_dsp_set_srampge(cdev, sram, mask, new); 211 212 /* enable core clock gating */ 213 catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE, 214 CATPT_VDRTCTL2_DCLCGE); 215} 216 217int catpt_dsp_stall(struct catpt_dev *cdev, bool stall) 218{ 219 u32 reg, val; 220 221 val = stall ? CATPT_CS_STALL : 0; 222 catpt_updatel_shim(cdev, CS1, CATPT_CS_STALL, val); 223 224 return catpt_readl_poll_shim(cdev, CS1, 225 reg, (reg & CATPT_CS_STALL) == val, 226 500, 10000); 227} 228 229static int catpt_dsp_reset(struct catpt_dev *cdev, bool reset) 230{ 231 u32 reg, val; 232 233 val = reset ? CATPT_CS_RST : 0; 234 catpt_updatel_shim(cdev, CS1, CATPT_CS_RST, val); 235 236 return catpt_readl_poll_shim(cdev, CS1, 237 reg, (reg & CATPT_CS_RST) == val, 238 500, 10000); 239} 240 241void lpt_dsp_pll_shutdown(struct catpt_dev *cdev, bool enable) 242{ 243 u32 val; 244 245 val = enable ? LPT_VDRTCTL0_APLLSE : 0; 246 catpt_updatel_pci(cdev, VDRTCTL0, LPT_VDRTCTL0_APLLSE, val); 247} 248 249void wpt_dsp_pll_shutdown(struct catpt_dev *cdev, bool enable) 250{ 251 u32 val; 252 253 val = enable ? WPT_VDRTCTL2_APLLSE : 0; 254 catpt_updatel_pci(cdev, VDRTCTL2, WPT_VDRTCTL2_APLLSE, val); 255} 256 257static int catpt_dsp_select_lpclock(struct catpt_dev *cdev, bool lp, bool waiti) 258{ 259 u32 mask, reg, val; 260 int ret; 261 262 mutex_lock(&cdev->clk_mutex); 263 264 val = lp ? CATPT_CS_LPCS : 0; 265 reg = catpt_readl_shim(cdev, CS1) & CATPT_CS_LPCS; 266 dev_dbg(cdev->dev, "LPCS [0x%08lx] 0x%08x -> 0x%08x", 267 CATPT_CS_LPCS, reg, val); 268 269 if (reg == val) { 270 mutex_unlock(&cdev->clk_mutex); 271 return 0; 272 } 273 274 if (waiti) { 275 /* wait for DSP to signal WAIT state */ 276 ret = catpt_readl_poll_shim(cdev, ISD, 277 reg, (reg & CATPT_ISD_DCPWM), 278 500, 10000); 279 if (ret) { 280 dev_warn(cdev->dev, "await WAITI timeout\n"); 281 /* no signal - only high clock selection allowed */ 282 if (lp) { 283 mutex_unlock(&cdev->clk_mutex); 284 return 0; 285 } 286 } 287 } 288 289 ret = catpt_readl_poll_shim(cdev, CLKCTL, 290 reg, !(reg & CATPT_CLKCTL_CFCIP), 291 500, 10000); 292 if (ret) 293 dev_warn(cdev->dev, "clock change still in progress\n"); 294 295 /* default to DSP core & audio fabric high clock */ 296 val |= CATPT_CS_DCS_HIGH; 297 mask = CATPT_CS_LPCS | CATPT_CS_DCS; 298 catpt_updatel_shim(cdev, CS1, mask, val); 299 300 ret = catpt_readl_poll_shim(cdev, CLKCTL, 301 reg, !(reg & CATPT_CLKCTL_CFCIP), 302 500, 10000); 303 if (ret) 304 dev_warn(cdev->dev, "clock change still in progress\n"); 305 306 /* update PLL accordingly */ 307 cdev->spec->pll_shutdown(cdev, lp); 308 309 mutex_unlock(&cdev->clk_mutex); 310 return 0; 311} 312 313int catpt_dsp_update_lpclock(struct catpt_dev *cdev) 314{ 315 struct catpt_stream_runtime *stream; 316 317 list_for_each_entry(stream, &cdev->stream_list, node) 318 if (stream->prepared) 319 return catpt_dsp_select_lpclock(cdev, false, true); 320 321 return catpt_dsp_select_lpclock(cdev, true, true); 322} 323 324/* bring registers to their defaults as HW won't reset itself */ 325static void catpt_dsp_set_regs_defaults(struct catpt_dev *cdev) 326{ 327 int i; 328 329 catpt_writel_shim(cdev, CS1, CATPT_CS_DEFAULT); 330 catpt_writel_shim(cdev, ISC, CATPT_ISC_DEFAULT); 331 catpt_writel_shim(cdev, ISD, CATPT_ISD_DEFAULT); 332 catpt_writel_shim(cdev, IMC, CATPT_IMC_DEFAULT); 333 catpt_writel_shim(cdev, IMD, CATPT_IMD_DEFAULT); 334 catpt_writel_shim(cdev, IPCC, CATPT_IPCC_DEFAULT); 335 catpt_writel_shim(cdev, IPCD, CATPT_IPCD_DEFAULT); 336 catpt_writel_shim(cdev, CLKCTL, CATPT_CLKCTL_DEFAULT); 337 catpt_writel_shim(cdev, CS2, CATPT_CS2_DEFAULT); 338 catpt_writel_shim(cdev, LTRC, CATPT_LTRC_DEFAULT); 339 catpt_writel_shim(cdev, HMDC, CATPT_HMDC_DEFAULT); 340 341 for (i = 0; i < CATPT_SSP_COUNT; i++) { 342 catpt_writel_ssp(cdev, i, SSCR0, CATPT_SSC0_DEFAULT); 343 catpt_writel_ssp(cdev, i, SSCR1, CATPT_SSC1_DEFAULT); 344 catpt_writel_ssp(cdev, i, SSSR, CATPT_SSS_DEFAULT); 345 catpt_writel_ssp(cdev, i, SSITR, CATPT_SSIT_DEFAULT); 346 catpt_writel_ssp(cdev, i, SSDR, CATPT_SSD_DEFAULT); 347 catpt_writel_ssp(cdev, i, SSTO, CATPT_SSTO_DEFAULT); 348 catpt_writel_ssp(cdev, i, SSPSP, CATPT_SSPSP_DEFAULT); 349 catpt_writel_ssp(cdev, i, SSTSA, CATPT_SSTSA_DEFAULT); 350 catpt_writel_ssp(cdev, i, SSRSA, CATPT_SSRSA_DEFAULT); 351 catpt_writel_ssp(cdev, i, SSTSS, CATPT_SSTSS_DEFAULT); 352 catpt_writel_ssp(cdev, i, SSCR2, CATPT_SSCR2_DEFAULT); 353 catpt_writel_ssp(cdev, i, SSPSP2, CATPT_SSPSP2_DEFAULT); 354 } 355} 356 357int lpt_dsp_power_down(struct catpt_dev *cdev) 358{ 359 catpt_dsp_reset(cdev, true); 360 361 /* set 24Mhz clock for both SSPs */ 362 catpt_updatel_shim(cdev, CS1, CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1), 363 CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1)); 364 catpt_dsp_select_lpclock(cdev, true, false); 365 366 /* DRAM power gating all */ 367 catpt_dsp_set_srampge(cdev, &cdev->dram, cdev->spec->dram_mask, 368 cdev->spec->dram_mask); 369 catpt_dsp_set_srampge(cdev, &cdev->iram, cdev->spec->iram_mask, 370 cdev->spec->iram_mask); 371 372 catpt_updatel_pci(cdev, PMCS, PCI_PM_CTRL_STATE_MASK, PCI_D3hot); 373 /* give hw time to drop off */ 374 udelay(50); 375 376 return 0; 377} 378 379int lpt_dsp_power_up(struct catpt_dev *cdev) 380{ 381 /* SRAM power gating none */ 382 catpt_dsp_set_srampge(cdev, &cdev->dram, cdev->spec->dram_mask, 0); 383 catpt_dsp_set_srampge(cdev, &cdev->iram, cdev->spec->iram_mask, 0); 384 385 catpt_updatel_pci(cdev, PMCS, PCI_PM_CTRL_STATE_MASK, PCI_D0); 386 /* give hw time to wake up */ 387 udelay(100); 388 389 catpt_dsp_select_lpclock(cdev, false, false); 390 catpt_updatel_shim(cdev, CS1, 391 CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1), 392 CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1)); 393 /* stagger DSP reset after clock selection */ 394 udelay(50); 395 396 catpt_dsp_reset(cdev, false); 397 /* generate int deassert msg to fix inversed int logic */ 398 catpt_updatel_shim(cdev, IMC, CATPT_IMC_IPCDB | CATPT_IMC_IPCCD, 0); 399 400 return 0; 401} 402 403int wpt_dsp_power_down(struct catpt_dev *cdev) 404{ 405 u32 mask, val; 406 407 /* disable core clock gating */ 408 catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE, 0); 409 410 catpt_dsp_reset(cdev, true); 411 /* set 24Mhz clock for both SSPs */ 412 catpt_updatel_shim(cdev, CS1, CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1), 413 CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1)); 414 catpt_dsp_select_lpclock(cdev, true, false); 415 /* disable MCLK */ 416 catpt_updatel_shim(cdev, CLKCTL, CATPT_CLKCTL_SMOS, 0); 417 418 catpt_dsp_set_regs_defaults(cdev); 419 420 /* switch clock gating */ 421 mask = CATPT_VDRTCTL2_CGEALL & (~CATPT_VDRTCTL2_DCLCGE); 422 val = mask & (~CATPT_VDRTCTL2_DTCGE); 423 catpt_updatel_pci(cdev, VDRTCTL2, mask, val); 424 /* enable DTCGE separatelly */ 425 catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DTCGE, 426 CATPT_VDRTCTL2_DTCGE); 427 428 /* SRAM power gating all */ 429 catpt_dsp_set_srampge(cdev, &cdev->dram, cdev->spec->dram_mask, 430 cdev->spec->dram_mask); 431 catpt_dsp_set_srampge(cdev, &cdev->iram, cdev->spec->iram_mask, 432 cdev->spec->iram_mask); 433 mask = WPT_VDRTCTL0_D3SRAMPGD | WPT_VDRTCTL0_D3PGD; 434 catpt_updatel_pci(cdev, VDRTCTL0, mask, WPT_VDRTCTL0_D3PGD); 435 436 catpt_updatel_pci(cdev, PMCS, PCI_PM_CTRL_STATE_MASK, PCI_D3hot); 437 /* give hw time to drop off */ 438 udelay(50); 439 440 /* enable core clock gating */ 441 catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE, 442 CATPT_VDRTCTL2_DCLCGE); 443 udelay(50); 444 445 return 0; 446} 447 448int wpt_dsp_power_up(struct catpt_dev *cdev) 449{ 450 u32 mask, val; 451 452 /* disable core clock gating */ 453 catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE, 0); 454 455 /* switch clock gating */ 456 mask = CATPT_VDRTCTL2_CGEALL & (~CATPT_VDRTCTL2_DCLCGE); 457 val = mask & (~CATPT_VDRTCTL2_DTCGE); 458 catpt_updatel_pci(cdev, VDRTCTL2, mask, val); 459 460 catpt_updatel_pci(cdev, PMCS, PCI_PM_CTRL_STATE_MASK, PCI_D0); 461 462 /* SRAM power gating none */ 463 mask = WPT_VDRTCTL0_D3SRAMPGD | WPT_VDRTCTL0_D3PGD; 464 catpt_updatel_pci(cdev, VDRTCTL0, mask, mask); 465 catpt_dsp_set_srampge(cdev, &cdev->dram, cdev->spec->dram_mask, 0); 466 catpt_dsp_set_srampge(cdev, &cdev->iram, cdev->spec->iram_mask, 0); 467 468 catpt_dsp_set_regs_defaults(cdev); 469 470 /* restore MCLK */ 471 catpt_updatel_shim(cdev, CLKCTL, CATPT_CLKCTL_SMOS, CATPT_CLKCTL_SMOS); 472 catpt_dsp_select_lpclock(cdev, false, false); 473 /* set 24Mhz clock for both SSPs */ 474 catpt_updatel_shim(cdev, CS1, CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1), 475 CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1)); 476 catpt_dsp_reset(cdev, false); 477 478 /* enable core clock gating */ 479 catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE, 480 CATPT_VDRTCTL2_DCLCGE); 481 482 /* generate int deassert msg to fix inversed int logic */ 483 catpt_updatel_shim(cdev, IMC, CATPT_IMC_IPCDB | CATPT_IMC_IPCCD, 0); 484 485 return 0; 486} 487 488#define CATPT_DUMP_MAGIC 0xcd42 489#define CATPT_DUMP_SECTION_ID_FILE 0x00 490#define CATPT_DUMP_SECTION_ID_IRAM 0x01 491#define CATPT_DUMP_SECTION_ID_DRAM 0x02 492#define CATPT_DUMP_SECTION_ID_REGS 0x03 493#define CATPT_DUMP_HASH_SIZE 20 494 495struct catpt_dump_section_hdr { 496 u16 magic; 497 u8 core_id; 498 u8 section_id; 499 u32 size; 500}; 501 502int catpt_coredump(struct catpt_dev *cdev) 503{ 504 struct catpt_dump_section_hdr *hdr; 505 size_t dump_size, regs_size; 506 u8 *dump, *pos; 507 const char *eof; 508 char *info; 509 int i; 510 511 regs_size = CATPT_SHIM_REGS_SIZE; 512 regs_size += CATPT_DMA_COUNT * CATPT_DMA_REGS_SIZE; 513 regs_size += CATPT_SSP_COUNT * CATPT_SSP_REGS_SIZE; 514 dump_size = resource_size(&cdev->dram); 515 dump_size += resource_size(&cdev->iram); 516 dump_size += regs_size; 517 /* account for header of each section and hash chunk */ 518 dump_size += 4 * sizeof(*hdr) + CATPT_DUMP_HASH_SIZE; 519 520 dump = vzalloc(dump_size); 521 if (!dump) 522 return -ENOMEM; 523 524 pos = dump; 525 526 hdr = (struct catpt_dump_section_hdr *)pos; 527 hdr->magic = CATPT_DUMP_MAGIC; 528 hdr->core_id = cdev->spec->core_id; 529 hdr->section_id = CATPT_DUMP_SECTION_ID_FILE; 530 hdr->size = dump_size - sizeof(*hdr); 531 pos += sizeof(*hdr); 532 533 info = cdev->ipc.config.fw_info; 534 eof = info + FW_INFO_SIZE_MAX; 535 /* navigate to fifth info segment (fw hash) */ 536 for (i = 0; i < 4 && info < eof; i++, info++) { 537 /* info segments are separated by space each */ 538 info = strnchr(info, eof - info, ' '); 539 if (!info) 540 break; 541 } 542 543 if (i == 4 && info) 544 memcpy(pos, info, min_t(u32, eof - info, CATPT_DUMP_HASH_SIZE)); 545 pos += CATPT_DUMP_HASH_SIZE; 546 547 hdr = (struct catpt_dump_section_hdr *)pos; 548 hdr->magic = CATPT_DUMP_MAGIC; 549 hdr->core_id = cdev->spec->core_id; 550 hdr->section_id = CATPT_DUMP_SECTION_ID_IRAM; 551 hdr->size = resource_size(&cdev->iram); 552 pos += sizeof(*hdr); 553 554 memcpy_fromio(pos, cdev->lpe_ba + cdev->iram.start, hdr->size); 555 pos += hdr->size; 556 557 hdr = (struct catpt_dump_section_hdr *)pos; 558 hdr->magic = CATPT_DUMP_MAGIC; 559 hdr->core_id = cdev->spec->core_id; 560 hdr->section_id = CATPT_DUMP_SECTION_ID_DRAM; 561 hdr->size = resource_size(&cdev->dram); 562 pos += sizeof(*hdr); 563 564 memcpy_fromio(pos, cdev->lpe_ba + cdev->dram.start, hdr->size); 565 pos += hdr->size; 566 567 hdr = (struct catpt_dump_section_hdr *)pos; 568 hdr->magic = CATPT_DUMP_MAGIC; 569 hdr->core_id = cdev->spec->core_id; 570 hdr->section_id = CATPT_DUMP_SECTION_ID_REGS; 571 hdr->size = regs_size; 572 pos += sizeof(*hdr); 573 574 memcpy_fromio(pos, catpt_shim_addr(cdev), CATPT_SHIM_REGS_SIZE); 575 pos += CATPT_SHIM_REGS_SIZE; 576 577 for (i = 0; i < CATPT_SSP_COUNT; i++) { 578 memcpy_fromio(pos, catpt_ssp_addr(cdev, i), 579 CATPT_SSP_REGS_SIZE); 580 pos += CATPT_SSP_REGS_SIZE; 581 } 582 for (i = 0; i < CATPT_DMA_COUNT; i++) { 583 memcpy_fromio(pos, catpt_dma_addr(cdev, i), 584 CATPT_DMA_REGS_SIZE); 585 pos += CATPT_DMA_REGS_SIZE; 586 } 587 588 dev_coredumpv(cdev->dev, dump, dump_size, GFP_KERNEL); 589 590 return 0; 591} 592