1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Digital Audio (PCM) abstract layer 4 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 5 */ 6 7#include <linux/compat.h> 8#include <linux/mm.h> 9#include <linux/module.h> 10#include <linux/file.h> 11#include <linux/slab.h> 12#include <linux/sched/signal.h> 13#include <linux/time.h> 14#include <linux/pm_qos.h> 15#include <linux/io.h> 16#include <linux/dma-mapping.h> 17#include <linux/vmalloc.h> 18#include <sound/core.h> 19#include <sound/control.h> 20#include <sound/info.h> 21#include <sound/pcm.h> 22#include <sound/pcm_params.h> 23#include <sound/timer.h> 24#include <sound/minors.h> 25#include <linux/uio.h> 26#include <linux/delay.h> 27 28#include "pcm_local.h" 29 30#ifdef CONFIG_SND_DEBUG 31#define CREATE_TRACE_POINTS 32#include "pcm_param_trace.h" 33#else 34#define trace_hw_mask_param_enabled() 0 35#define trace_hw_interval_param_enabled() 0 36#define trace_hw_mask_param(substream, type, index, prev, curr) 37#define trace_hw_interval_param(substream, type, index, prev, curr) 38#endif 39 40/* 41 * Compatibility 42 */ 43 44struct snd_pcm_hw_params_old { 45 unsigned int flags; 46 unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT - 47 SNDRV_PCM_HW_PARAM_ACCESS + 1]; 48 struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME - 49 SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1]; 50 unsigned int rmask; 51 unsigned int cmask; 52 unsigned int info; 53 unsigned int msbits; 54 unsigned int rate_num; 55 unsigned int rate_den; 56 snd_pcm_uframes_t fifo_size; 57 unsigned char reserved[64]; 58}; 59 60#ifdef CONFIG_SND_SUPPORT_OLD_API 61#define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old) 62#define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old) 63 64static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream, 65 struct snd_pcm_hw_params_old __user * _oparams); 66static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream, 67 struct snd_pcm_hw_params_old __user * _oparams); 68#endif 69static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream); 70 71/* 72 * 73 */ 74 75static DECLARE_RWSEM(snd_pcm_link_rwsem); 76 77void snd_pcm_group_init(struct snd_pcm_group *group) 78{ 79 spin_lock_init(&group->lock); 80 mutex_init(&group->mutex); 81 INIT_LIST_HEAD(&group->substreams); 82 refcount_set(&group->refs, 1); 83} 84 85/* define group lock helpers */ 86#define DEFINE_PCM_GROUP_LOCK(action, mutex_action) \ 87static void snd_pcm_group_ ## action(struct snd_pcm_group *group, bool nonatomic) \ 88{ \ 89 if (nonatomic) \ 90 mutex_ ## mutex_action(&group->mutex); \ 91 else \ 92 spin_ ## action(&group->lock); \ 93} 94 95DEFINE_PCM_GROUP_LOCK(lock, lock); 96DEFINE_PCM_GROUP_LOCK(unlock, unlock); 97DEFINE_PCM_GROUP_LOCK(lock_irq, lock); 98DEFINE_PCM_GROUP_LOCK(unlock_irq, unlock); 99 100/** 101 * snd_pcm_stream_lock - Lock the PCM stream 102 * @substream: PCM substream 103 * 104 * This locks the PCM stream's spinlock or mutex depending on the nonatomic 105 * flag of the given substream. This also takes the global link rw lock 106 * (or rw sem), too, for avoiding the race with linked streams. 107 */ 108void snd_pcm_stream_lock(struct snd_pcm_substream *substream) 109{ 110 snd_pcm_group_lock(&substream->self_group, substream->pcm->nonatomic); 111} 112EXPORT_SYMBOL_GPL(snd_pcm_stream_lock); 113 114/** 115 * snd_pcm_stream_unlock - Unlock the PCM stream 116 * @substream: PCM substream 117 * 118 * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock(). 119 */ 120void snd_pcm_stream_unlock(struct snd_pcm_substream *substream) 121{ 122 snd_pcm_group_unlock(&substream->self_group, substream->pcm->nonatomic); 123} 124EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock); 125 126/** 127 * snd_pcm_stream_lock_irq - Lock the PCM stream 128 * @substream: PCM substream 129 * 130 * This locks the PCM stream like snd_pcm_stream_lock() and disables the local 131 * IRQ (only when nonatomic is false). In nonatomic case, this is identical 132 * as snd_pcm_stream_lock(). 133 */ 134void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) 135{ 136 snd_pcm_group_lock_irq(&substream->self_group, 137 substream->pcm->nonatomic); 138} 139EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq); 140 141static void snd_pcm_stream_lock_nested(struct snd_pcm_substream *substream) 142{ 143 struct snd_pcm_group *group = &substream->self_group; 144 145 if (substream->pcm->nonatomic) 146 mutex_lock_nested(&group->mutex, SINGLE_DEPTH_NESTING); 147 else 148 spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING); 149} 150 151/** 152 * snd_pcm_stream_unlock_irq - Unlock the PCM stream 153 * @substream: PCM substream 154 * 155 * This is a counter-part of snd_pcm_stream_lock_irq(). 156 */ 157void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream) 158{ 159 snd_pcm_group_unlock_irq(&substream->self_group, 160 substream->pcm->nonatomic); 161} 162EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq); 163 164unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream) 165{ 166 unsigned long flags = 0; 167 if (substream->pcm->nonatomic) 168 mutex_lock(&substream->self_group.mutex); 169 else 170 spin_lock_irqsave(&substream->self_group.lock, flags); 171 return flags; 172} 173EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave); 174 175/** 176 * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream 177 * @substream: PCM substream 178 * @flags: irq flags 179 * 180 * This is a counter-part of snd_pcm_stream_lock_irqsave(). 181 */ 182void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream, 183 unsigned long flags) 184{ 185 if (substream->pcm->nonatomic) 186 mutex_unlock(&substream->self_group.mutex); 187 else 188 spin_unlock_irqrestore(&substream->self_group.lock, flags); 189} 190EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore); 191 192/* Run PCM ioctl ops */ 193static int snd_pcm_ops_ioctl(struct snd_pcm_substream *substream, 194 unsigned cmd, void *arg) 195{ 196 if (substream->ops->ioctl) 197 return substream->ops->ioctl(substream, cmd, arg); 198 else 199 return snd_pcm_lib_ioctl(substream, cmd, arg); 200} 201 202int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info) 203{ 204 struct snd_pcm *pcm = substream->pcm; 205 struct snd_pcm_str *pstr = substream->pstr; 206 207 memset(info, 0, sizeof(*info)); 208 info->card = pcm->card->number; 209 info->device = pcm->device; 210 info->stream = substream->stream; 211 info->subdevice = substream->number; 212 strlcpy(info->id, pcm->id, sizeof(info->id)); 213 strlcpy(info->name, pcm->name, sizeof(info->name)); 214 info->dev_class = pcm->dev_class; 215 info->dev_subclass = pcm->dev_subclass; 216 info->subdevices_count = pstr->substream_count; 217 info->subdevices_avail = pstr->substream_count - pstr->substream_opened; 218 strlcpy(info->subname, substream->name, sizeof(info->subname)); 219 220 return 0; 221} 222 223int snd_pcm_info_user(struct snd_pcm_substream *substream, 224 struct snd_pcm_info __user * _info) 225{ 226 struct snd_pcm_info *info; 227 int err; 228 229 info = kmalloc(sizeof(*info), GFP_KERNEL); 230 if (! info) 231 return -ENOMEM; 232 err = snd_pcm_info(substream, info); 233 if (err >= 0) { 234 if (copy_to_user(_info, info, sizeof(*info))) 235 err = -EFAULT; 236 } 237 kfree(info); 238 return err; 239} 240 241/* macro for simplified cast */ 242#define PARAM_MASK_BIT(b) (1U << (__force int)(b)) 243 244static bool hw_support_mmap(struct snd_pcm_substream *substream) 245{ 246 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP)) 247 return false; 248 249 if (substream->ops->mmap || substream->ops->page) 250 return true; 251 252 switch (substream->dma_buffer.dev.type) { 253 case SNDRV_DMA_TYPE_UNKNOWN: 254 /* we can't know the device, so just assume that the driver does 255 * everything right 256 */ 257 return true; 258 case SNDRV_DMA_TYPE_CONTINUOUS: 259 case SNDRV_DMA_TYPE_VMALLOC: 260 return true; 261 default: 262 return dma_can_mmap(substream->dma_buffer.dev.dev); 263 } 264} 265 266static int constrain_mask_params(struct snd_pcm_substream *substream, 267 struct snd_pcm_hw_params *params) 268{ 269 struct snd_pcm_hw_constraints *constrs = 270 &substream->runtime->hw_constraints; 271 struct snd_mask *m; 272 unsigned int k; 273 struct snd_mask old_mask; 274 int changed; 275 276 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) { 277 m = hw_param_mask(params, k); 278 if (snd_mask_empty(m)) 279 return -EINVAL; 280 281 /* This parameter is not requested to change by a caller. */ 282 if (!(params->rmask & PARAM_MASK_BIT(k))) 283 continue; 284 285 if (trace_hw_mask_param_enabled()) 286 old_mask = *m; 287 288 changed = snd_mask_refine(m, constrs_mask(constrs, k)); 289 if (changed < 0) 290 return changed; 291 if (changed == 0) 292 continue; 293 294 /* Set corresponding flag so that the caller gets it. */ 295 trace_hw_mask_param(substream, k, 0, &old_mask, m); 296 params->cmask |= PARAM_MASK_BIT(k); 297 } 298 299 return 0; 300} 301 302static int constrain_interval_params(struct snd_pcm_substream *substream, 303 struct snd_pcm_hw_params *params) 304{ 305 struct snd_pcm_hw_constraints *constrs = 306 &substream->runtime->hw_constraints; 307 struct snd_interval *i; 308 unsigned int k; 309 struct snd_interval old_interval; 310 int changed; 311 312 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) { 313 i = hw_param_interval(params, k); 314 if (snd_interval_empty(i)) 315 return -EINVAL; 316 317 /* This parameter is not requested to change by a caller. */ 318 if (!(params->rmask & PARAM_MASK_BIT(k))) 319 continue; 320 321 if (trace_hw_interval_param_enabled()) 322 old_interval = *i; 323 324 changed = snd_interval_refine(i, constrs_interval(constrs, k)); 325 if (changed < 0) 326 return changed; 327 if (changed == 0) 328 continue; 329 330 /* Set corresponding flag so that the caller gets it. */ 331 trace_hw_interval_param(substream, k, 0, &old_interval, i); 332 params->cmask |= PARAM_MASK_BIT(k); 333 } 334 335 return 0; 336} 337 338static int constrain_params_by_rules(struct snd_pcm_substream *substream, 339 struct snd_pcm_hw_params *params) 340{ 341 struct snd_pcm_hw_constraints *constrs = 342 &substream->runtime->hw_constraints; 343 unsigned int k; 344 unsigned int *rstamps; 345 unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1]; 346 unsigned int stamp; 347 struct snd_pcm_hw_rule *r; 348 unsigned int d; 349 struct snd_mask old_mask; 350 struct snd_interval old_interval; 351 bool again; 352 int changed, err = 0; 353 354 /* 355 * Each application of rule has own sequence number. 356 * 357 * Each member of 'rstamps' array represents the sequence number of 358 * recent application of corresponding rule. 359 */ 360 rstamps = kcalloc(constrs->rules_num, sizeof(unsigned int), GFP_KERNEL); 361 if (!rstamps) 362 return -ENOMEM; 363 364 /* 365 * Each member of 'vstamps' array represents the sequence number of 366 * recent application of rule in which corresponding parameters were 367 * changed. 368 * 369 * In initial state, elements corresponding to parameters requested by 370 * a caller is 1. For unrequested parameters, corresponding members 371 * have 0 so that the parameters are never changed anymore. 372 */ 373 for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) 374 vstamps[k] = (params->rmask & PARAM_MASK_BIT(k)) ? 1 : 0; 375 376 /* Due to the above design, actual sequence number starts at 2. */ 377 stamp = 2; 378retry: 379 /* Apply all rules in order. */ 380 again = false; 381 for (k = 0; k < constrs->rules_num; k++) { 382 r = &constrs->rules[k]; 383 384 /* 385 * Check condition bits of this rule. When the rule has 386 * some condition bits, parameter without the bits is 387 * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP 388 * is an example of the condition bits. 389 */ 390 if (r->cond && !(r->cond & params->flags)) 391 continue; 392 393 /* 394 * The 'deps' array includes maximum three dependencies 395 * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth 396 * member of this array is a sentinel and should be 397 * negative value. 398 * 399 * This rule should be processed in this time when dependent 400 * parameters were changed at former applications of the other 401 * rules. 402 */ 403 for (d = 0; r->deps[d] >= 0; d++) { 404 if (vstamps[r->deps[d]] > rstamps[k]) 405 break; 406 } 407 if (r->deps[d] < 0) 408 continue; 409 410 if (trace_hw_mask_param_enabled()) { 411 if (hw_is_mask(r->var)) 412 old_mask = *hw_param_mask(params, r->var); 413 } 414 if (trace_hw_interval_param_enabled()) { 415 if (hw_is_interval(r->var)) 416 old_interval = *hw_param_interval(params, r->var); 417 } 418 419 changed = r->func(params, r); 420 if (changed < 0) { 421 err = changed; 422 goto out; 423 } 424 425 /* 426 * When the parameter is changed, notify it to the caller 427 * by corresponding returned bit, then preparing for next 428 * iteration. 429 */ 430 if (changed && r->var >= 0) { 431 if (hw_is_mask(r->var)) { 432 trace_hw_mask_param(substream, r->var, 433 k + 1, &old_mask, 434 hw_param_mask(params, r->var)); 435 } 436 if (hw_is_interval(r->var)) { 437 trace_hw_interval_param(substream, r->var, 438 k + 1, &old_interval, 439 hw_param_interval(params, r->var)); 440 } 441 442 params->cmask |= PARAM_MASK_BIT(r->var); 443 vstamps[r->var] = stamp; 444 again = true; 445 } 446 447 rstamps[k] = stamp++; 448 } 449 450 /* Iterate to evaluate all rules till no parameters are changed. */ 451 if (again) 452 goto retry; 453 454 out: 455 kfree(rstamps); 456 return err; 457} 458 459static int fixup_unreferenced_params(struct snd_pcm_substream *substream, 460 struct snd_pcm_hw_params *params) 461{ 462 const struct snd_interval *i; 463 const struct snd_mask *m; 464 int err; 465 466 if (!params->msbits) { 467 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); 468 if (snd_interval_single(i)) 469 params->msbits = snd_interval_value(i); 470 } 471 472 if (!params->rate_den) { 473 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); 474 if (snd_interval_single(i)) { 475 params->rate_num = snd_interval_value(i); 476 params->rate_den = 1; 477 } 478 } 479 480 if (!params->fifo_size) { 481 m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT); 482 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS); 483 if (snd_mask_single(m) && snd_interval_single(i)) { 484 err = snd_pcm_ops_ioctl(substream, 485 SNDRV_PCM_IOCTL1_FIFO_SIZE, 486 params); 487 if (err < 0) 488 return err; 489 } 490 } 491 492 if (!params->info) { 493 params->info = substream->runtime->hw.info; 494 params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES | 495 SNDRV_PCM_INFO_DRAIN_TRIGGER); 496 if (!hw_support_mmap(substream)) 497 params->info &= ~(SNDRV_PCM_INFO_MMAP | 498 SNDRV_PCM_INFO_MMAP_VALID); 499 } 500 501 return 0; 502} 503 504int snd_pcm_hw_refine(struct snd_pcm_substream *substream, 505 struct snd_pcm_hw_params *params) 506{ 507 int err; 508 509 params->info = 0; 510 params->fifo_size = 0; 511 if (params->rmask & PARAM_MASK_BIT(SNDRV_PCM_HW_PARAM_SAMPLE_BITS)) 512 params->msbits = 0; 513 if (params->rmask & PARAM_MASK_BIT(SNDRV_PCM_HW_PARAM_RATE)) { 514 params->rate_num = 0; 515 params->rate_den = 0; 516 } 517 518 err = constrain_mask_params(substream, params); 519 if (err < 0) 520 return err; 521 522 err = constrain_interval_params(substream, params); 523 if (err < 0) 524 return err; 525 526 err = constrain_params_by_rules(substream, params); 527 if (err < 0) 528 return err; 529 530 params->rmask = 0; 531 532 return 0; 533} 534EXPORT_SYMBOL(snd_pcm_hw_refine); 535 536static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream, 537 struct snd_pcm_hw_params __user * _params) 538{ 539 struct snd_pcm_hw_params *params; 540 int err; 541 542 params = memdup_user(_params, sizeof(*params)); 543 if (IS_ERR(params)) 544 return PTR_ERR(params); 545 546 err = snd_pcm_hw_refine(substream, params); 547 if (err < 0) 548 goto end; 549 550 err = fixup_unreferenced_params(substream, params); 551 if (err < 0) 552 goto end; 553 554 if (copy_to_user(_params, params, sizeof(*params))) 555 err = -EFAULT; 556end: 557 kfree(params); 558 return err; 559} 560 561static int period_to_usecs(struct snd_pcm_runtime *runtime) 562{ 563 int usecs; 564 565 if (! runtime->rate) 566 return -1; /* invalid */ 567 568 /* take 75% of period time as the deadline */ 569 usecs = (750000 / runtime->rate) * runtime->period_size; 570 usecs += ((750000 % runtime->rate) * runtime->period_size) / 571 runtime->rate; 572 573 return usecs; 574} 575 576static void snd_pcm_set_state(struct snd_pcm_substream *substream, 577 snd_pcm_state_t state) 578{ 579 snd_pcm_stream_lock_irq(substream); 580 if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED) 581 substream->runtime->status->state = state; 582 snd_pcm_stream_unlock_irq(substream); 583} 584 585static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream, 586 int event) 587{ 588#ifdef CONFIG_SND_PCM_TIMER 589 if (substream->timer) 590 snd_timer_notify(substream->timer, event, 591 &substream->runtime->trigger_tstamp); 592#endif 593} 594 595void snd_pcm_sync_stop(struct snd_pcm_substream *substream, bool sync_irq) 596{ 597 if (substream->runtime && substream->runtime->stop_operating) { 598 substream->runtime->stop_operating = false; 599 if (substream->ops && substream->ops->sync_stop) 600 substream->ops->sync_stop(substream); 601 else if (sync_irq && substream->pcm->card->sync_irq > 0) 602 synchronize_irq(substream->pcm->card->sync_irq); 603 } 604} 605 606/** 607 * snd_pcm_hw_params_choose - choose a configuration defined by @params 608 * @pcm: PCM instance 609 * @params: the hw_params instance 610 * 611 * Choose one configuration from configuration space defined by @params. 612 * The configuration chosen is that obtained fixing in this order: 613 * first access, first format, first subformat, min channels, 614 * min rate, min period time, max buffer size, min tick time 615 * 616 * Return: Zero if successful, or a negative error code on failure. 617 */ 618static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm, 619 struct snd_pcm_hw_params *params) 620{ 621 static const int vars[] = { 622 SNDRV_PCM_HW_PARAM_ACCESS, 623 SNDRV_PCM_HW_PARAM_FORMAT, 624 SNDRV_PCM_HW_PARAM_SUBFORMAT, 625 SNDRV_PCM_HW_PARAM_CHANNELS, 626 SNDRV_PCM_HW_PARAM_RATE, 627 SNDRV_PCM_HW_PARAM_PERIOD_TIME, 628 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 629 SNDRV_PCM_HW_PARAM_TICK_TIME, 630 -1 631 }; 632 const int *v; 633 struct snd_mask old_mask; 634 struct snd_interval old_interval; 635 int changed; 636 637 for (v = vars; *v != -1; v++) { 638 /* Keep old parameter to trace. */ 639 if (trace_hw_mask_param_enabled()) { 640 if (hw_is_mask(*v)) 641 old_mask = *hw_param_mask(params, *v); 642 } 643 if (trace_hw_interval_param_enabled()) { 644 if (hw_is_interval(*v)) 645 old_interval = *hw_param_interval(params, *v); 646 } 647 if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE) 648 changed = snd_pcm_hw_param_first(pcm, params, *v, NULL); 649 else 650 changed = snd_pcm_hw_param_last(pcm, params, *v, NULL); 651 if (changed < 0) 652 return changed; 653 if (changed == 0) 654 continue; 655 656 /* Trace the changed parameter. */ 657 if (hw_is_mask(*v)) { 658 trace_hw_mask_param(pcm, *v, 0, &old_mask, 659 hw_param_mask(params, *v)); 660 } 661 if (hw_is_interval(*v)) { 662 trace_hw_interval_param(pcm, *v, 0, &old_interval, 663 hw_param_interval(params, *v)); 664 } 665 } 666 667 return 0; 668} 669 670/* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise 671 * block the further r/w operations 672 */ 673static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime) 674{ 675 if (!atomic_dec_unless_positive(&runtime->buffer_accessing)) 676 return -EBUSY; 677 mutex_lock(&runtime->buffer_mutex); 678 return 0; /* keep buffer_mutex, unlocked by below */ 679} 680 681/* release buffer_mutex and clear r/w access flag */ 682static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime) 683{ 684 mutex_unlock(&runtime->buffer_mutex); 685 atomic_inc(&runtime->buffer_accessing); 686} 687 688#if IS_ENABLED(CONFIG_SND_PCM_OSS) 689#define is_oss_stream(substream) ((substream)->oss.oss) 690#else 691#define is_oss_stream(substream) false 692#endif 693 694static int snd_pcm_hw_params(struct snd_pcm_substream *substream, 695 struct snd_pcm_hw_params *params) 696{ 697 struct snd_pcm_runtime *runtime; 698 int err, usecs; 699 unsigned int bits; 700 snd_pcm_uframes_t frames; 701 702 if (PCM_RUNTIME_CHECK(substream)) 703 return -ENXIO; 704 runtime = substream->runtime; 705 err = snd_pcm_buffer_access_lock(runtime); 706 if (err < 0) 707 return err; 708 snd_pcm_stream_lock_irq(substream); 709 switch (runtime->status->state) { 710 case SNDRV_PCM_STATE_OPEN: 711 case SNDRV_PCM_STATE_SETUP: 712 case SNDRV_PCM_STATE_PREPARED: 713 if (!is_oss_stream(substream) && 714 atomic_read(&substream->mmap_count)) 715 err = -EBADFD; 716 break; 717 default: 718 err = -EBADFD; 719 break; 720 } 721 snd_pcm_stream_unlock_irq(substream); 722 if (err) 723 goto unlock; 724 725 snd_pcm_sync_stop(substream, true); 726 727 params->rmask = ~0U; 728 err = snd_pcm_hw_refine(substream, params); 729 if (err < 0) 730 goto _error; 731 732 err = snd_pcm_hw_params_choose(substream, params); 733 if (err < 0) 734 goto _error; 735 736 err = fixup_unreferenced_params(substream, params); 737 if (err < 0) 738 goto _error; 739 740 if (substream->managed_buffer_alloc) { 741 err = snd_pcm_lib_malloc_pages(substream, 742 params_buffer_bytes(params)); 743 if (err < 0) 744 goto _error; 745 runtime->buffer_changed = err > 0; 746 } 747 748 if (substream->ops->hw_params != NULL) { 749 err = substream->ops->hw_params(substream, params); 750 if (err < 0) 751 goto _error; 752 } 753 754 runtime->access = params_access(params); 755 runtime->format = params_format(params); 756 runtime->subformat = params_subformat(params); 757 runtime->channels = params_channels(params); 758 runtime->rate = params_rate(params); 759 runtime->period_size = params_period_size(params); 760 runtime->periods = params_periods(params); 761 runtime->buffer_size = params_buffer_size(params); 762 runtime->info = params->info; 763 runtime->rate_num = params->rate_num; 764 runtime->rate_den = params->rate_den; 765 runtime->no_period_wakeup = 766 (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) && 767 (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP); 768 769 bits = snd_pcm_format_physical_width(runtime->format); 770 runtime->sample_bits = bits; 771 bits *= runtime->channels; 772 runtime->frame_bits = bits; 773 frames = 1; 774 while (bits % 8 != 0) { 775 bits *= 2; 776 frames *= 2; 777 } 778 runtime->byte_align = bits / 8; 779 runtime->min_align = frames; 780 781 /* Default sw params */ 782 runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE; 783 runtime->period_step = 1; 784 runtime->control->avail_min = runtime->period_size; 785 runtime->start_threshold = 1; 786 runtime->stop_threshold = runtime->buffer_size; 787 runtime->silence_threshold = 0; 788 runtime->silence_size = 0; 789 runtime->boundary = runtime->buffer_size; 790 while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size) 791 runtime->boundary *= 2; 792 793 /* clear the buffer for avoiding possible kernel info leaks */ 794 if (runtime->dma_area && !substream->ops->copy_user) { 795 size_t size = runtime->dma_bytes; 796 797 if (runtime->info & SNDRV_PCM_INFO_MMAP) 798 size = PAGE_ALIGN(size); 799 memset(runtime->dma_area, 0, size); 800 } 801 802 snd_pcm_timer_resolution_change(substream); 803 snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP); 804 805 if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req)) 806 cpu_latency_qos_remove_request(&substream->latency_pm_qos_req); 807 if ((usecs = period_to_usecs(runtime)) >= 0) 808 cpu_latency_qos_add_request(&substream->latency_pm_qos_req, 809 usecs); 810 err = 0; 811 _error: 812 if (err) { 813 /* hardware might be unusable from this time, 814 * so we force application to retry to set 815 * the correct hardware parameter settings 816 */ 817 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); 818 if (substream->ops->hw_free != NULL) 819 substream->ops->hw_free(substream); 820 if (substream->managed_buffer_alloc) 821 snd_pcm_lib_free_pages(substream); 822 } 823 unlock: 824 snd_pcm_buffer_access_unlock(runtime); 825 return err; 826} 827 828static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream, 829 struct snd_pcm_hw_params __user * _params) 830{ 831 struct snd_pcm_hw_params *params; 832 int err; 833 834 params = memdup_user(_params, sizeof(*params)); 835 if (IS_ERR(params)) 836 return PTR_ERR(params); 837 838 err = snd_pcm_hw_params(substream, params); 839 if (err < 0) 840 goto end; 841 842 if (copy_to_user(_params, params, sizeof(*params))) 843 err = -EFAULT; 844end: 845 kfree(params); 846 return err; 847} 848 849static int do_hw_free(struct snd_pcm_substream *substream) 850{ 851 int result = 0; 852 853 snd_pcm_sync_stop(substream, true); 854 if (substream->ops->hw_free) 855 result = substream->ops->hw_free(substream); 856 if (substream->managed_buffer_alloc) 857 snd_pcm_lib_free_pages(substream); 858 return result; 859} 860 861static int snd_pcm_hw_free(struct snd_pcm_substream *substream) 862{ 863 struct snd_pcm_runtime *runtime; 864 int result = 0; 865 866 if (PCM_RUNTIME_CHECK(substream)) 867 return -ENXIO; 868 runtime = substream->runtime; 869 result = snd_pcm_buffer_access_lock(runtime); 870 if (result < 0) 871 return result; 872 snd_pcm_stream_lock_irq(substream); 873 switch (runtime->status->state) { 874 case SNDRV_PCM_STATE_SETUP: 875 case SNDRV_PCM_STATE_PREPARED: 876 if (atomic_read(&substream->mmap_count)) 877 result = -EBADFD; 878 break; 879 default: 880 result = -EBADFD; 881 break; 882 } 883 snd_pcm_stream_unlock_irq(substream); 884 if (result) 885 goto unlock; 886 result = do_hw_free(substream); 887 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); 888 cpu_latency_qos_remove_request(&substream->latency_pm_qos_req); 889 unlock: 890 snd_pcm_buffer_access_unlock(runtime); 891 return result; 892} 893 894static int snd_pcm_sw_params(struct snd_pcm_substream *substream, 895 struct snd_pcm_sw_params *params) 896{ 897 struct snd_pcm_runtime *runtime; 898 int err; 899 900 if (PCM_RUNTIME_CHECK(substream)) 901 return -ENXIO; 902 runtime = substream->runtime; 903 snd_pcm_stream_lock_irq(substream); 904 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { 905 snd_pcm_stream_unlock_irq(substream); 906 return -EBADFD; 907 } 908 snd_pcm_stream_unlock_irq(substream); 909 910 if (params->tstamp_mode < 0 || 911 params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST) 912 return -EINVAL; 913 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) && 914 params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST) 915 return -EINVAL; 916 if (params->avail_min == 0) 917 return -EINVAL; 918 if (params->silence_size >= runtime->boundary) { 919 if (params->silence_threshold != 0) 920 return -EINVAL; 921 } else { 922 if (params->silence_size > params->silence_threshold) 923 return -EINVAL; 924 if (params->silence_threshold > runtime->buffer_size) 925 return -EINVAL; 926 } 927 err = 0; 928 snd_pcm_stream_lock_irq(substream); 929 runtime->tstamp_mode = params->tstamp_mode; 930 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12)) 931 runtime->tstamp_type = params->tstamp_type; 932 runtime->period_step = params->period_step; 933 runtime->control->avail_min = params->avail_min; 934 runtime->start_threshold = params->start_threshold; 935 runtime->stop_threshold = params->stop_threshold; 936 runtime->silence_threshold = params->silence_threshold; 937 runtime->silence_size = params->silence_size; 938 params->boundary = runtime->boundary; 939 if (snd_pcm_running(substream)) { 940 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 941 runtime->silence_size > 0) 942 snd_pcm_playback_silence(substream, ULONG_MAX); 943 err = snd_pcm_update_state(substream, runtime); 944 } 945 snd_pcm_stream_unlock_irq(substream); 946 return err; 947} 948 949static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream, 950 struct snd_pcm_sw_params __user * _params) 951{ 952 struct snd_pcm_sw_params params; 953 int err; 954 if (copy_from_user(¶ms, _params, sizeof(params))) 955 return -EFAULT; 956 err = snd_pcm_sw_params(substream, ¶ms); 957 if (copy_to_user(_params, ¶ms, sizeof(params))) 958 return -EFAULT; 959 return err; 960} 961 962static inline snd_pcm_uframes_t 963snd_pcm_calc_delay(struct snd_pcm_substream *substream) 964{ 965 snd_pcm_uframes_t delay; 966 967 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 968 delay = snd_pcm_playback_hw_avail(substream->runtime); 969 else 970 delay = snd_pcm_capture_avail(substream->runtime); 971 return delay + substream->runtime->delay; 972} 973 974int snd_pcm_status64(struct snd_pcm_substream *substream, 975 struct snd_pcm_status64 *status) 976{ 977 struct snd_pcm_runtime *runtime = substream->runtime; 978 979 snd_pcm_stream_lock_irq(substream); 980 981 snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data, 982 &runtime->audio_tstamp_config); 983 984 /* backwards compatible behavior */ 985 if (runtime->audio_tstamp_config.type_requested == 986 SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) { 987 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK) 988 runtime->audio_tstamp_config.type_requested = 989 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK; 990 else 991 runtime->audio_tstamp_config.type_requested = 992 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT; 993 runtime->audio_tstamp_report.valid = 0; 994 } else 995 runtime->audio_tstamp_report.valid = 1; 996 997 status->state = runtime->status->state; 998 status->suspended_state = runtime->status->suspended_state; 999 if (status->state == SNDRV_PCM_STATE_OPEN) 1000 goto _end; 1001 status->trigger_tstamp_sec = runtime->trigger_tstamp.tv_sec; 1002 status->trigger_tstamp_nsec = runtime->trigger_tstamp.tv_nsec; 1003 if (snd_pcm_running(substream)) { 1004 snd_pcm_update_hw_ptr(substream); 1005 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { 1006 status->tstamp_sec = runtime->status->tstamp.tv_sec; 1007 status->tstamp_nsec = 1008 runtime->status->tstamp.tv_nsec; 1009 status->driver_tstamp_sec = 1010 runtime->driver_tstamp.tv_sec; 1011 status->driver_tstamp_nsec = 1012 runtime->driver_tstamp.tv_nsec; 1013 status->audio_tstamp_sec = 1014 runtime->status->audio_tstamp.tv_sec; 1015 status->audio_tstamp_nsec = 1016 runtime->status->audio_tstamp.tv_nsec; 1017 if (runtime->audio_tstamp_report.valid == 1) 1018 /* backwards compatibility, no report provided in COMPAT mode */ 1019 snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data, 1020 &status->audio_tstamp_accuracy, 1021 &runtime->audio_tstamp_report); 1022 1023 goto _tstamp_end; 1024 } 1025 } else { 1026 /* get tstamp only in fallback mode and only if enabled */ 1027 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { 1028 struct timespec64 tstamp; 1029 1030 snd_pcm_gettime(runtime, &tstamp); 1031 status->tstamp_sec = tstamp.tv_sec; 1032 status->tstamp_nsec = tstamp.tv_nsec; 1033 } 1034 } 1035 _tstamp_end: 1036 status->appl_ptr = runtime->control->appl_ptr; 1037 status->hw_ptr = runtime->status->hw_ptr; 1038 status->avail = snd_pcm_avail(substream); 1039 status->delay = snd_pcm_running(substream) ? 1040 snd_pcm_calc_delay(substream) : 0; 1041 status->avail_max = runtime->avail_max; 1042 status->overrange = runtime->overrange; 1043 runtime->avail_max = 0; 1044 runtime->overrange = 0; 1045 _end: 1046 snd_pcm_stream_unlock_irq(substream); 1047 return 0; 1048} 1049 1050static int snd_pcm_status_user64(struct snd_pcm_substream *substream, 1051 struct snd_pcm_status64 __user * _status, 1052 bool ext) 1053{ 1054 struct snd_pcm_status64 status; 1055 int res; 1056 1057 memset(&status, 0, sizeof(status)); 1058 /* 1059 * with extension, parameters are read/write, 1060 * get audio_tstamp_data from user, 1061 * ignore rest of status structure 1062 */ 1063 if (ext && get_user(status.audio_tstamp_data, 1064 (u32 __user *)(&_status->audio_tstamp_data))) 1065 return -EFAULT; 1066 res = snd_pcm_status64(substream, &status); 1067 if (res < 0) 1068 return res; 1069 if (copy_to_user(_status, &status, sizeof(status))) 1070 return -EFAULT; 1071 return 0; 1072} 1073 1074static int snd_pcm_status_user32(struct snd_pcm_substream *substream, 1075 struct snd_pcm_status32 __user * _status, 1076 bool ext) 1077{ 1078 struct snd_pcm_status64 status64; 1079 struct snd_pcm_status32 status32; 1080 int res; 1081 1082 memset(&status64, 0, sizeof(status64)); 1083 memset(&status32, 0, sizeof(status32)); 1084 /* 1085 * with extension, parameters are read/write, 1086 * get audio_tstamp_data from user, 1087 * ignore rest of status structure 1088 */ 1089 if (ext && get_user(status64.audio_tstamp_data, 1090 (u32 __user *)(&_status->audio_tstamp_data))) 1091 return -EFAULT; 1092 res = snd_pcm_status64(substream, &status64); 1093 if (res < 0) 1094 return res; 1095 1096 status32 = (struct snd_pcm_status32) { 1097 .state = status64.state, 1098 .trigger_tstamp_sec = status64.trigger_tstamp_sec, 1099 .trigger_tstamp_nsec = status64.trigger_tstamp_nsec, 1100 .tstamp_sec = status64.tstamp_sec, 1101 .tstamp_nsec = status64.tstamp_nsec, 1102 .appl_ptr = status64.appl_ptr, 1103 .hw_ptr = status64.hw_ptr, 1104 .delay = status64.delay, 1105 .avail = status64.avail, 1106 .avail_max = status64.avail_max, 1107 .overrange = status64.overrange, 1108 .suspended_state = status64.suspended_state, 1109 .audio_tstamp_data = status64.audio_tstamp_data, 1110 .audio_tstamp_sec = status64.audio_tstamp_sec, 1111 .audio_tstamp_nsec = status64.audio_tstamp_nsec, 1112 .driver_tstamp_sec = status64.audio_tstamp_sec, 1113 .driver_tstamp_nsec = status64.audio_tstamp_nsec, 1114 .audio_tstamp_accuracy = status64.audio_tstamp_accuracy, 1115 }; 1116 1117 if (copy_to_user(_status, &status32, sizeof(status32))) 1118 return -EFAULT; 1119 1120 return 0; 1121} 1122 1123static int snd_pcm_channel_info(struct snd_pcm_substream *substream, 1124 struct snd_pcm_channel_info * info) 1125{ 1126 struct snd_pcm_runtime *runtime; 1127 unsigned int channel; 1128 1129 channel = info->channel; 1130 runtime = substream->runtime; 1131 snd_pcm_stream_lock_irq(substream); 1132 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { 1133 snd_pcm_stream_unlock_irq(substream); 1134 return -EBADFD; 1135 } 1136 snd_pcm_stream_unlock_irq(substream); 1137 if (channel >= runtime->channels) 1138 return -EINVAL; 1139 memset(info, 0, sizeof(*info)); 1140 info->channel = channel; 1141 return snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info); 1142} 1143 1144static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream, 1145 struct snd_pcm_channel_info __user * _info) 1146{ 1147 struct snd_pcm_channel_info info; 1148 int res; 1149 1150 if (copy_from_user(&info, _info, sizeof(info))) 1151 return -EFAULT; 1152 res = snd_pcm_channel_info(substream, &info); 1153 if (res < 0) 1154 return res; 1155 if (copy_to_user(_info, &info, sizeof(info))) 1156 return -EFAULT; 1157 return 0; 1158} 1159 1160static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream) 1161{ 1162 struct snd_pcm_runtime *runtime = substream->runtime; 1163 if (runtime->trigger_master == NULL) 1164 return; 1165 if (runtime->trigger_master == substream) { 1166 if (!runtime->trigger_tstamp_latched) 1167 snd_pcm_gettime(runtime, &runtime->trigger_tstamp); 1168 } else { 1169 snd_pcm_trigger_tstamp(runtime->trigger_master); 1170 runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp; 1171 } 1172 runtime->trigger_master = NULL; 1173} 1174 1175#define ACTION_ARG_IGNORE (__force snd_pcm_state_t)0 1176 1177struct action_ops { 1178 int (*pre_action)(struct snd_pcm_substream *substream, 1179 snd_pcm_state_t state); 1180 int (*do_action)(struct snd_pcm_substream *substream, 1181 snd_pcm_state_t state); 1182 void (*undo_action)(struct snd_pcm_substream *substream, 1183 snd_pcm_state_t state); 1184 void (*post_action)(struct snd_pcm_substream *substream, 1185 snd_pcm_state_t state); 1186}; 1187 1188/* 1189 * this functions is core for handling of linked stream 1190 * Note: the stream state might be changed also on failure 1191 * Note2: call with calling stream lock + link lock 1192 */ 1193static int snd_pcm_action_group(const struct action_ops *ops, 1194 struct snd_pcm_substream *substream, 1195 snd_pcm_state_t state, 1196 bool stream_lock) 1197{ 1198 struct snd_pcm_substream *s = NULL; 1199 struct snd_pcm_substream *s1; 1200 int res = 0, depth = 1; 1201 1202 snd_pcm_group_for_each_entry(s, substream) { 1203 if (s != substream) { 1204 if (!stream_lock) 1205 mutex_lock_nested(&s->runtime->buffer_mutex, depth); 1206 else if (s->pcm->nonatomic) 1207 mutex_lock_nested(&s->self_group.mutex, depth); 1208 else 1209 spin_lock_nested(&s->self_group.lock, depth); 1210 depth++; 1211 } 1212 res = ops->pre_action(s, state); 1213 if (res < 0) 1214 goto _unlock; 1215 } 1216 snd_pcm_group_for_each_entry(s, substream) { 1217 res = ops->do_action(s, state); 1218 if (res < 0) { 1219 if (ops->undo_action) { 1220 snd_pcm_group_for_each_entry(s1, substream) { 1221 if (s1 == s) /* failed stream */ 1222 break; 1223 ops->undo_action(s1, state); 1224 } 1225 } 1226 s = NULL; /* unlock all */ 1227 goto _unlock; 1228 } 1229 } 1230 snd_pcm_group_for_each_entry(s, substream) { 1231 ops->post_action(s, state); 1232 } 1233 _unlock: 1234 /* unlock streams */ 1235 snd_pcm_group_for_each_entry(s1, substream) { 1236 if (s1 != substream) { 1237 if (!stream_lock) 1238 mutex_unlock(&s1->runtime->buffer_mutex); 1239 else if (s1->pcm->nonatomic) 1240 mutex_unlock(&s1->self_group.mutex); 1241 else 1242 spin_unlock(&s1->self_group.lock); 1243 } 1244 if (s1 == s) /* end */ 1245 break; 1246 } 1247 return res; 1248} 1249 1250/* 1251 * Note: call with stream lock 1252 */ 1253static int snd_pcm_action_single(const struct action_ops *ops, 1254 struct snd_pcm_substream *substream, 1255 snd_pcm_state_t state) 1256{ 1257 int res; 1258 1259 res = ops->pre_action(substream, state); 1260 if (res < 0) 1261 return res; 1262 res = ops->do_action(substream, state); 1263 if (res == 0) 1264 ops->post_action(substream, state); 1265 else if (ops->undo_action) 1266 ops->undo_action(substream, state); 1267 return res; 1268} 1269 1270static void snd_pcm_group_assign(struct snd_pcm_substream *substream, 1271 struct snd_pcm_group *new_group) 1272{ 1273 substream->group = new_group; 1274 list_move(&substream->link_list, &new_group->substreams); 1275} 1276 1277/* 1278 * Unref and unlock the group, but keep the stream lock; 1279 * when the group becomes empty and no longer referred, destroy itself 1280 */ 1281static void snd_pcm_group_unref(struct snd_pcm_group *group, 1282 struct snd_pcm_substream *substream) 1283{ 1284 bool do_free; 1285 1286 if (!group) 1287 return; 1288 do_free = refcount_dec_and_test(&group->refs); 1289 snd_pcm_group_unlock(group, substream->pcm->nonatomic); 1290 if (do_free) 1291 kfree(group); 1292} 1293 1294/* 1295 * Lock the group inside a stream lock and reference it; 1296 * return the locked group object, or NULL if not linked 1297 */ 1298static struct snd_pcm_group * 1299snd_pcm_stream_group_ref(struct snd_pcm_substream *substream) 1300{ 1301 bool nonatomic = substream->pcm->nonatomic; 1302 struct snd_pcm_group *group; 1303 bool trylock; 1304 1305 for (;;) { 1306 if (!snd_pcm_stream_linked(substream)) 1307 return NULL; 1308 group = substream->group; 1309 /* block freeing the group object */ 1310 refcount_inc(&group->refs); 1311 1312 trylock = nonatomic ? mutex_trylock(&group->mutex) : 1313 spin_trylock(&group->lock); 1314 if (trylock) 1315 break; /* OK */ 1316 1317 /* re-lock for avoiding ABBA deadlock */ 1318 snd_pcm_stream_unlock(substream); 1319 snd_pcm_group_lock(group, nonatomic); 1320 snd_pcm_stream_lock(substream); 1321 1322 /* check the group again; the above opens a small race window */ 1323 if (substream->group == group) 1324 break; /* OK */ 1325 /* group changed, try again */ 1326 snd_pcm_group_unref(group, substream); 1327 } 1328 return group; 1329} 1330 1331/* 1332 * Note: call with stream lock 1333 */ 1334static int snd_pcm_action(const struct action_ops *ops, 1335 struct snd_pcm_substream *substream, 1336 snd_pcm_state_t state) 1337{ 1338 struct snd_pcm_group *group; 1339 int res; 1340 1341 group = snd_pcm_stream_group_ref(substream); 1342 if (group) 1343 res = snd_pcm_action_group(ops, substream, state, true); 1344 else 1345 res = snd_pcm_action_single(ops, substream, state); 1346 snd_pcm_group_unref(group, substream); 1347 return res; 1348} 1349 1350/* 1351 * Note: don't use any locks before 1352 */ 1353static int snd_pcm_action_lock_irq(const struct action_ops *ops, 1354 struct snd_pcm_substream *substream, 1355 snd_pcm_state_t state) 1356{ 1357 int res; 1358 1359 snd_pcm_stream_lock_irq(substream); 1360 res = snd_pcm_action(ops, substream, state); 1361 snd_pcm_stream_unlock_irq(substream); 1362 return res; 1363} 1364 1365/* 1366 */ 1367static int snd_pcm_action_nonatomic(const struct action_ops *ops, 1368 struct snd_pcm_substream *substream, 1369 snd_pcm_state_t state) 1370{ 1371 int res; 1372 1373 /* Guarantee the group members won't change during non-atomic action */ 1374 down_read(&snd_pcm_link_rwsem); 1375 res = snd_pcm_buffer_access_lock(substream->runtime); 1376 if (res < 0) 1377 goto unlock; 1378 if (snd_pcm_stream_linked(substream)) 1379 res = snd_pcm_action_group(ops, substream, state, false); 1380 else 1381 res = snd_pcm_action_single(ops, substream, state); 1382 snd_pcm_buffer_access_unlock(substream->runtime); 1383 unlock: 1384 up_read(&snd_pcm_link_rwsem); 1385 return res; 1386} 1387 1388/* 1389 * start callbacks 1390 */ 1391static int snd_pcm_pre_start(struct snd_pcm_substream *substream, 1392 snd_pcm_state_t state) 1393{ 1394 struct snd_pcm_runtime *runtime = substream->runtime; 1395 if (runtime->status->state != SNDRV_PCM_STATE_PREPARED) 1396 return -EBADFD; 1397 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 1398 !snd_pcm_playback_data(substream)) 1399 return -EPIPE; 1400 runtime->trigger_tstamp_latched = false; 1401 runtime->trigger_master = substream; 1402 return 0; 1403} 1404 1405static int snd_pcm_do_start(struct snd_pcm_substream *substream, 1406 snd_pcm_state_t state) 1407{ 1408 if (substream->runtime->trigger_master != substream) 1409 return 0; 1410 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START); 1411} 1412 1413static void snd_pcm_undo_start(struct snd_pcm_substream *substream, 1414 snd_pcm_state_t state) 1415{ 1416 if (substream->runtime->trigger_master == substream) { 1417 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); 1418 substream->runtime->stop_operating = true; 1419 } 1420} 1421 1422static void snd_pcm_post_start(struct snd_pcm_substream *substream, 1423 snd_pcm_state_t state) 1424{ 1425 struct snd_pcm_runtime *runtime = substream->runtime; 1426 snd_pcm_trigger_tstamp(substream); 1427 runtime->hw_ptr_jiffies = jiffies; 1428 runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) / 1429 runtime->rate; 1430 runtime->status->state = state; 1431 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 1432 runtime->silence_size > 0) 1433 snd_pcm_playback_silence(substream, ULONG_MAX); 1434 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTART); 1435} 1436 1437static const struct action_ops snd_pcm_action_start = { 1438 .pre_action = snd_pcm_pre_start, 1439 .do_action = snd_pcm_do_start, 1440 .undo_action = snd_pcm_undo_start, 1441 .post_action = snd_pcm_post_start 1442}; 1443 1444/** 1445 * snd_pcm_start - start all linked streams 1446 * @substream: the PCM substream instance 1447 * 1448 * Return: Zero if successful, or a negative error code. 1449 * The stream lock must be acquired before calling this function. 1450 */ 1451int snd_pcm_start(struct snd_pcm_substream *substream) 1452{ 1453 return snd_pcm_action(&snd_pcm_action_start, substream, 1454 SNDRV_PCM_STATE_RUNNING); 1455} 1456 1457/* take the stream lock and start the streams */ 1458static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream) 1459{ 1460 return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream, 1461 SNDRV_PCM_STATE_RUNNING); 1462} 1463 1464/* 1465 * stop callbacks 1466 */ 1467static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, 1468 snd_pcm_state_t state) 1469{ 1470 struct snd_pcm_runtime *runtime = substream->runtime; 1471 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 1472 return -EBADFD; 1473 runtime->trigger_master = substream; 1474 return 0; 1475} 1476 1477static int snd_pcm_do_stop(struct snd_pcm_substream *substream, 1478 snd_pcm_state_t state) 1479{ 1480 if (substream->runtime->trigger_master == substream && 1481 snd_pcm_running(substream)) { 1482 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); 1483 substream->runtime->stop_operating = true; 1484 } 1485 return 0; /* unconditonally stop all substreams */ 1486} 1487 1488static void snd_pcm_post_stop(struct snd_pcm_substream *substream, 1489 snd_pcm_state_t state) 1490{ 1491 struct snd_pcm_runtime *runtime = substream->runtime; 1492 if (runtime->status->state != state) { 1493 snd_pcm_trigger_tstamp(substream); 1494 runtime->status->state = state; 1495 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP); 1496 } 1497 wake_up(&runtime->sleep); 1498 wake_up(&runtime->tsleep); 1499} 1500 1501static const struct action_ops snd_pcm_action_stop = { 1502 .pre_action = snd_pcm_pre_stop, 1503 .do_action = snd_pcm_do_stop, 1504 .post_action = snd_pcm_post_stop 1505}; 1506 1507/** 1508 * snd_pcm_stop - try to stop all running streams in the substream group 1509 * @substream: the PCM substream instance 1510 * @state: PCM state after stopping the stream 1511 * 1512 * The state of each stream is then changed to the given state unconditionally. 1513 * 1514 * Return: Zero if successful, or a negative error code. 1515 */ 1516int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state) 1517{ 1518 return snd_pcm_action(&snd_pcm_action_stop, substream, state); 1519} 1520EXPORT_SYMBOL(snd_pcm_stop); 1521 1522/** 1523 * snd_pcm_drain_done - stop the DMA only when the given stream is playback 1524 * @substream: the PCM substream 1525 * 1526 * After stopping, the state is changed to SETUP. 1527 * Unlike snd_pcm_stop(), this affects only the given stream. 1528 * 1529 * Return: Zero if succesful, or a negative error code. 1530 */ 1531int snd_pcm_drain_done(struct snd_pcm_substream *substream) 1532{ 1533 return snd_pcm_action_single(&snd_pcm_action_stop, substream, 1534 SNDRV_PCM_STATE_SETUP); 1535} 1536 1537/** 1538 * snd_pcm_stop_xrun - stop the running streams as XRUN 1539 * @substream: the PCM substream instance 1540 * 1541 * This stops the given running substream (and all linked substreams) as XRUN. 1542 * Unlike snd_pcm_stop(), this function takes the substream lock by itself. 1543 * 1544 * Return: Zero if successful, or a negative error code. 1545 */ 1546int snd_pcm_stop_xrun(struct snd_pcm_substream *substream) 1547{ 1548 unsigned long flags; 1549 1550 snd_pcm_stream_lock_irqsave(substream, flags); 1551 if (substream->runtime && snd_pcm_running(substream)) 1552 __snd_pcm_xrun(substream); 1553 snd_pcm_stream_unlock_irqrestore(substream, flags); 1554 return 0; 1555} 1556EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun); 1557 1558/* 1559 * pause callbacks: pass boolean (to start pause or resume) as state argument 1560 */ 1561#define pause_pushed(state) (__force bool)(state) 1562 1563static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, 1564 snd_pcm_state_t state) 1565{ 1566 struct snd_pcm_runtime *runtime = substream->runtime; 1567 if (!(runtime->info & SNDRV_PCM_INFO_PAUSE)) 1568 return -ENOSYS; 1569 if (pause_pushed(state)) { 1570 if (runtime->status->state != SNDRV_PCM_STATE_RUNNING) 1571 return -EBADFD; 1572 } else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED) 1573 return -EBADFD; 1574 runtime->trigger_master = substream; 1575 return 0; 1576} 1577 1578static int snd_pcm_do_pause(struct snd_pcm_substream *substream, 1579 snd_pcm_state_t state) 1580{ 1581 if (substream->runtime->trigger_master != substream) 1582 return 0; 1583 /* some drivers might use hw_ptr to recover from the pause - 1584 update the hw_ptr now */ 1585 if (pause_pushed(state)) 1586 snd_pcm_update_hw_ptr(substream); 1587 /* The jiffies check in snd_pcm_update_hw_ptr*() is done by 1588 * a delta between the current jiffies, this gives a large enough 1589 * delta, effectively to skip the check once. 1590 */ 1591 substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000; 1592 return substream->ops->trigger(substream, 1593 pause_pushed(state) ? 1594 SNDRV_PCM_TRIGGER_PAUSE_PUSH : 1595 SNDRV_PCM_TRIGGER_PAUSE_RELEASE); 1596} 1597 1598static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, 1599 snd_pcm_state_t state) 1600{ 1601 if (substream->runtime->trigger_master == substream) 1602 substream->ops->trigger(substream, 1603 pause_pushed(state) ? 1604 SNDRV_PCM_TRIGGER_PAUSE_RELEASE : 1605 SNDRV_PCM_TRIGGER_PAUSE_PUSH); 1606} 1607 1608static void snd_pcm_post_pause(struct snd_pcm_substream *substream, 1609 snd_pcm_state_t state) 1610{ 1611 struct snd_pcm_runtime *runtime = substream->runtime; 1612 snd_pcm_trigger_tstamp(substream); 1613 if (pause_pushed(state)) { 1614 runtime->status->state = SNDRV_PCM_STATE_PAUSED; 1615 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE); 1616 wake_up(&runtime->sleep); 1617 wake_up(&runtime->tsleep); 1618 } else { 1619 runtime->status->state = SNDRV_PCM_STATE_RUNNING; 1620 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MCONTINUE); 1621 } 1622} 1623 1624static const struct action_ops snd_pcm_action_pause = { 1625 .pre_action = snd_pcm_pre_pause, 1626 .do_action = snd_pcm_do_pause, 1627 .undo_action = snd_pcm_undo_pause, 1628 .post_action = snd_pcm_post_pause 1629}; 1630 1631/* 1632 * Push/release the pause for all linked streams. 1633 */ 1634static int snd_pcm_pause(struct snd_pcm_substream *substream, bool push) 1635{ 1636 return snd_pcm_action(&snd_pcm_action_pause, substream, 1637 (__force snd_pcm_state_t)push); 1638} 1639 1640static int snd_pcm_pause_lock_irq(struct snd_pcm_substream *substream, 1641 bool push) 1642{ 1643 return snd_pcm_action_lock_irq(&snd_pcm_action_pause, substream, 1644 (__force snd_pcm_state_t)push); 1645} 1646 1647#ifdef CONFIG_PM 1648/* suspend callback: state argument ignored */ 1649 1650static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, 1651 snd_pcm_state_t state) 1652{ 1653 struct snd_pcm_runtime *runtime = substream->runtime; 1654 switch (runtime->status->state) { 1655 case SNDRV_PCM_STATE_SUSPENDED: 1656 return -EBUSY; 1657 /* unresumable PCM state; return -EBUSY for skipping suspend */ 1658 case SNDRV_PCM_STATE_OPEN: 1659 case SNDRV_PCM_STATE_SETUP: 1660 case SNDRV_PCM_STATE_DISCONNECTED: 1661 return -EBUSY; 1662 } 1663 runtime->trigger_master = substream; 1664 return 0; 1665} 1666 1667static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, 1668 snd_pcm_state_t state) 1669{ 1670 struct snd_pcm_runtime *runtime = substream->runtime; 1671 if (runtime->trigger_master != substream) 1672 return 0; 1673 if (! snd_pcm_running(substream)) 1674 return 0; 1675 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND); 1676 runtime->stop_operating = true; 1677 return 0; /* suspend unconditionally */ 1678} 1679 1680static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, 1681 snd_pcm_state_t state) 1682{ 1683 struct snd_pcm_runtime *runtime = substream->runtime; 1684 snd_pcm_trigger_tstamp(substream); 1685 runtime->status->suspended_state = runtime->status->state; 1686 runtime->status->state = SNDRV_PCM_STATE_SUSPENDED; 1687 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSUSPEND); 1688 wake_up(&runtime->sleep); 1689 wake_up(&runtime->tsleep); 1690} 1691 1692static const struct action_ops snd_pcm_action_suspend = { 1693 .pre_action = snd_pcm_pre_suspend, 1694 .do_action = snd_pcm_do_suspend, 1695 .post_action = snd_pcm_post_suspend 1696}; 1697 1698/* 1699 * snd_pcm_suspend - trigger SUSPEND to all linked streams 1700 * @substream: the PCM substream 1701 * 1702 * After this call, all streams are changed to SUSPENDED state. 1703 * 1704 * Return: Zero if successful, or a negative error code. 1705 */ 1706static int snd_pcm_suspend(struct snd_pcm_substream *substream) 1707{ 1708 int err; 1709 unsigned long flags; 1710 1711 snd_pcm_stream_lock_irqsave(substream, flags); 1712 err = snd_pcm_action(&snd_pcm_action_suspend, substream, 1713 ACTION_ARG_IGNORE); 1714 snd_pcm_stream_unlock_irqrestore(substream, flags); 1715 return err; 1716} 1717 1718/** 1719 * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm 1720 * @pcm: the PCM instance 1721 * 1722 * After this call, all streams are changed to SUSPENDED state. 1723 * 1724 * Return: Zero if successful (or @pcm is %NULL), or a negative error code. 1725 */ 1726int snd_pcm_suspend_all(struct snd_pcm *pcm) 1727{ 1728 struct snd_pcm_substream *substream; 1729 int stream, err = 0; 1730 1731 if (! pcm) 1732 return 0; 1733 1734 for (stream = 0; stream < 2; stream++) { 1735 for (substream = pcm->streams[stream].substream; 1736 substream; substream = substream->next) { 1737 /* FIXME: the open/close code should lock this as well */ 1738 if (substream->runtime == NULL) 1739 continue; 1740 1741 /* 1742 * Skip BE dai link PCM's that are internal and may 1743 * not have their substream ops set. 1744 */ 1745 if (!substream->ops) 1746 continue; 1747 1748 err = snd_pcm_suspend(substream); 1749 if (err < 0 && err != -EBUSY) 1750 return err; 1751 } 1752 } 1753 1754 for (stream = 0; stream < 2; stream++) 1755 for (substream = pcm->streams[stream].substream; 1756 substream; substream = substream->next) 1757 snd_pcm_sync_stop(substream, false); 1758 1759 return 0; 1760} 1761EXPORT_SYMBOL(snd_pcm_suspend_all); 1762 1763/* resume callbacks: state argument ignored */ 1764 1765static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, 1766 snd_pcm_state_t state) 1767{ 1768 struct snd_pcm_runtime *runtime = substream->runtime; 1769 if (!(runtime->info & SNDRV_PCM_INFO_RESUME)) 1770 return -ENOSYS; 1771 runtime->trigger_master = substream; 1772 return 0; 1773} 1774 1775static int snd_pcm_do_resume(struct snd_pcm_substream *substream, 1776 snd_pcm_state_t state) 1777{ 1778 struct snd_pcm_runtime *runtime = substream->runtime; 1779 if (runtime->trigger_master != substream) 1780 return 0; 1781 /* DMA not running previously? */ 1782 if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING && 1783 (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING || 1784 substream->stream != SNDRV_PCM_STREAM_PLAYBACK)) 1785 return 0; 1786 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME); 1787} 1788 1789static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, 1790 snd_pcm_state_t state) 1791{ 1792 if (substream->runtime->trigger_master == substream && 1793 snd_pcm_running(substream)) 1794 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND); 1795} 1796 1797static void snd_pcm_post_resume(struct snd_pcm_substream *substream, 1798 snd_pcm_state_t state) 1799{ 1800 struct snd_pcm_runtime *runtime = substream->runtime; 1801 snd_pcm_trigger_tstamp(substream); 1802 runtime->status->state = runtime->status->suspended_state; 1803 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME); 1804} 1805 1806static const struct action_ops snd_pcm_action_resume = { 1807 .pre_action = snd_pcm_pre_resume, 1808 .do_action = snd_pcm_do_resume, 1809 .undo_action = snd_pcm_undo_resume, 1810 .post_action = snd_pcm_post_resume 1811}; 1812 1813static int snd_pcm_resume(struct snd_pcm_substream *substream) 1814{ 1815 return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 1816 ACTION_ARG_IGNORE); 1817} 1818 1819#else 1820 1821static int snd_pcm_resume(struct snd_pcm_substream *substream) 1822{ 1823 return -ENOSYS; 1824} 1825 1826#endif /* CONFIG_PM */ 1827 1828/* 1829 * xrun ioctl 1830 * 1831 * Change the RUNNING stream(s) to XRUN state. 1832 */ 1833static int snd_pcm_xrun(struct snd_pcm_substream *substream) 1834{ 1835 struct snd_pcm_runtime *runtime = substream->runtime; 1836 int result; 1837 1838 snd_pcm_stream_lock_irq(substream); 1839 switch (runtime->status->state) { 1840 case SNDRV_PCM_STATE_XRUN: 1841 result = 0; /* already there */ 1842 break; 1843 case SNDRV_PCM_STATE_RUNNING: 1844 __snd_pcm_xrun(substream); 1845 result = 0; 1846 break; 1847 default: 1848 result = -EBADFD; 1849 } 1850 snd_pcm_stream_unlock_irq(substream); 1851 return result; 1852} 1853 1854/* 1855 * reset ioctl 1856 */ 1857/* reset callbacks: state argument ignored */ 1858static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, 1859 snd_pcm_state_t state) 1860{ 1861 struct snd_pcm_runtime *runtime = substream->runtime; 1862 switch (runtime->status->state) { 1863 case SNDRV_PCM_STATE_RUNNING: 1864 case SNDRV_PCM_STATE_PREPARED: 1865 case SNDRV_PCM_STATE_PAUSED: 1866 case SNDRV_PCM_STATE_SUSPENDED: 1867 return 0; 1868 default: 1869 return -EBADFD; 1870 } 1871} 1872 1873static int snd_pcm_do_reset(struct snd_pcm_substream *substream, 1874 snd_pcm_state_t state) 1875{ 1876 struct snd_pcm_runtime *runtime = substream->runtime; 1877 int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL); 1878 if (err < 0) 1879 return err; 1880 snd_pcm_stream_lock_irq(substream); 1881 runtime->hw_ptr_base = 0; 1882 runtime->hw_ptr_interrupt = runtime->status->hw_ptr - 1883 runtime->status->hw_ptr % runtime->period_size; 1884 runtime->silence_start = runtime->status->hw_ptr; 1885 runtime->silence_filled = 0; 1886 snd_pcm_stream_unlock_irq(substream); 1887 return 0; 1888} 1889 1890static void snd_pcm_post_reset(struct snd_pcm_substream *substream, 1891 snd_pcm_state_t state) 1892{ 1893 struct snd_pcm_runtime *runtime = substream->runtime; 1894 snd_pcm_stream_lock_irq(substream); 1895 runtime->control->appl_ptr = runtime->status->hw_ptr; 1896 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 1897 runtime->silence_size > 0) 1898 snd_pcm_playback_silence(substream, ULONG_MAX); 1899 snd_pcm_stream_unlock_irq(substream); 1900} 1901 1902static const struct action_ops snd_pcm_action_reset = { 1903 .pre_action = snd_pcm_pre_reset, 1904 .do_action = snd_pcm_do_reset, 1905 .post_action = snd_pcm_post_reset 1906}; 1907 1908static int snd_pcm_reset(struct snd_pcm_substream *substream) 1909{ 1910 return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 1911 ACTION_ARG_IGNORE); 1912} 1913 1914/* 1915 * prepare ioctl 1916 */ 1917/* pass f_flags as state argument */ 1918static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream, 1919 snd_pcm_state_t state) 1920{ 1921 struct snd_pcm_runtime *runtime = substream->runtime; 1922 int f_flags = (__force int)state; 1923 1924 if (runtime->status->state == SNDRV_PCM_STATE_OPEN || 1925 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED) 1926 return -EBADFD; 1927 if (snd_pcm_running(substream)) 1928 return -EBUSY; 1929 substream->f_flags = f_flags; 1930 return 0; 1931} 1932 1933static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, 1934 snd_pcm_state_t state) 1935{ 1936 int err; 1937 snd_pcm_sync_stop(substream, true); 1938 err = substream->ops->prepare(substream); 1939 if (err < 0) 1940 return err; 1941 return snd_pcm_do_reset(substream, state); 1942} 1943 1944static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, 1945 snd_pcm_state_t state) 1946{ 1947 struct snd_pcm_runtime *runtime = substream->runtime; 1948 runtime->control->appl_ptr = runtime->status->hw_ptr; 1949 snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED); 1950} 1951 1952static const struct action_ops snd_pcm_action_prepare = { 1953 .pre_action = snd_pcm_pre_prepare, 1954 .do_action = snd_pcm_do_prepare, 1955 .post_action = snd_pcm_post_prepare 1956}; 1957 1958/** 1959 * snd_pcm_prepare - prepare the PCM substream to be triggerable 1960 * @substream: the PCM substream instance 1961 * @file: file to refer f_flags 1962 * 1963 * Return: Zero if successful, or a negative error code. 1964 */ 1965static int snd_pcm_prepare(struct snd_pcm_substream *substream, 1966 struct file *file) 1967{ 1968 int f_flags; 1969 1970 if (file) 1971 f_flags = file->f_flags; 1972 else 1973 f_flags = substream->f_flags; 1974 1975 snd_pcm_stream_lock_irq(substream); 1976 switch (substream->runtime->status->state) { 1977 case SNDRV_PCM_STATE_PAUSED: 1978 snd_pcm_pause(substream, false); 1979 fallthrough; 1980 case SNDRV_PCM_STATE_SUSPENDED: 1981 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); 1982 break; 1983 } 1984 snd_pcm_stream_unlock_irq(substream); 1985 1986 return snd_pcm_action_nonatomic(&snd_pcm_action_prepare, 1987 substream, 1988 (__force snd_pcm_state_t)f_flags); 1989} 1990 1991/* 1992 * drain ioctl 1993 */ 1994 1995/* drain init callbacks: state argument ignored */ 1996static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, 1997 snd_pcm_state_t state) 1998{ 1999 struct snd_pcm_runtime *runtime = substream->runtime; 2000 switch (runtime->status->state) { 2001 case SNDRV_PCM_STATE_OPEN: 2002 case SNDRV_PCM_STATE_DISCONNECTED: 2003 case SNDRV_PCM_STATE_SUSPENDED: 2004 return -EBADFD; 2005 } 2006 runtime->trigger_master = substream; 2007 return 0; 2008} 2009 2010static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, 2011 snd_pcm_state_t state) 2012{ 2013 struct snd_pcm_runtime *runtime = substream->runtime; 2014 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 2015 switch (runtime->status->state) { 2016 case SNDRV_PCM_STATE_PREPARED: 2017 /* start playback stream if possible */ 2018 if (! snd_pcm_playback_empty(substream)) { 2019 snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING); 2020 snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING); 2021 } else { 2022 runtime->status->state = SNDRV_PCM_STATE_SETUP; 2023 } 2024 break; 2025 case SNDRV_PCM_STATE_RUNNING: 2026 runtime->status->state = SNDRV_PCM_STATE_DRAINING; 2027 break; 2028 case SNDRV_PCM_STATE_XRUN: 2029 runtime->status->state = SNDRV_PCM_STATE_SETUP; 2030 break; 2031 default: 2032 break; 2033 } 2034 } else { 2035 /* stop running stream */ 2036 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) { 2037 snd_pcm_state_t new_state; 2038 2039 new_state = snd_pcm_capture_avail(runtime) > 0 ? 2040 SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP; 2041 snd_pcm_do_stop(substream, new_state); 2042 snd_pcm_post_stop(substream, new_state); 2043 } 2044 } 2045 2046 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING && 2047 runtime->trigger_master == substream && 2048 (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER)) 2049 return substream->ops->trigger(substream, 2050 SNDRV_PCM_TRIGGER_DRAIN); 2051 2052 return 0; 2053} 2054 2055static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, 2056 snd_pcm_state_t state) 2057{ 2058} 2059 2060static const struct action_ops snd_pcm_action_drain_init = { 2061 .pre_action = snd_pcm_pre_drain_init, 2062 .do_action = snd_pcm_do_drain_init, 2063 .post_action = snd_pcm_post_drain_init 2064}; 2065 2066/* 2067 * Drain the stream(s). 2068 * When the substream is linked, sync until the draining of all playback streams 2069 * is finished. 2070 * After this call, all streams are supposed to be either SETUP or DRAINING 2071 * (capture only) state. 2072 */ 2073static int snd_pcm_drain(struct snd_pcm_substream *substream, 2074 struct file *file) 2075{ 2076 struct snd_card *card; 2077 struct snd_pcm_runtime *runtime; 2078 struct snd_pcm_substream *s; 2079 struct snd_pcm_group *group; 2080 wait_queue_entry_t wait; 2081 int result = 0; 2082 int nonblock = 0; 2083 2084 card = substream->pcm->card; 2085 runtime = substream->runtime; 2086 2087 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 2088 return -EBADFD; 2089 2090 if (file) { 2091 if (file->f_flags & O_NONBLOCK) 2092 nonblock = 1; 2093 } else if (substream->f_flags & O_NONBLOCK) 2094 nonblock = 1; 2095 2096 snd_pcm_stream_lock_irq(substream); 2097 /* resume pause */ 2098 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) 2099 snd_pcm_pause(substream, false); 2100 2101 /* pre-start/stop - all running streams are changed to DRAINING state */ 2102 result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 2103 ACTION_ARG_IGNORE); 2104 if (result < 0) 2105 goto unlock; 2106 /* in non-blocking, we don't wait in ioctl but let caller poll */ 2107 if (nonblock) { 2108 result = -EAGAIN; 2109 goto unlock; 2110 } 2111 2112 for (;;) { 2113 long tout; 2114 struct snd_pcm_runtime *to_check; 2115 if (signal_pending(current)) { 2116 result = -ERESTARTSYS; 2117 break; 2118 } 2119 /* find a substream to drain */ 2120 to_check = NULL; 2121 group = snd_pcm_stream_group_ref(substream); 2122 snd_pcm_group_for_each_entry(s, substream) { 2123 if (s->stream != SNDRV_PCM_STREAM_PLAYBACK) 2124 continue; 2125 runtime = s->runtime; 2126 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { 2127 to_check = runtime; 2128 break; 2129 } 2130 } 2131 snd_pcm_group_unref(group, substream); 2132 if (!to_check) 2133 break; /* all drained */ 2134 init_waitqueue_entry(&wait, current); 2135 set_current_state(TASK_INTERRUPTIBLE); 2136 add_wait_queue(&to_check->sleep, &wait); 2137 snd_pcm_stream_unlock_irq(substream); 2138 if (runtime->no_period_wakeup) 2139 tout = MAX_SCHEDULE_TIMEOUT; 2140 else { 2141 tout = 10; 2142 if (runtime->rate) { 2143 long t = runtime->period_size * 2 / runtime->rate; 2144 tout = max(t, tout); 2145 } 2146 tout = msecs_to_jiffies(tout * 1000); 2147 } 2148 tout = schedule_timeout(tout); 2149 2150 snd_pcm_stream_lock_irq(substream); 2151 group = snd_pcm_stream_group_ref(substream); 2152 snd_pcm_group_for_each_entry(s, substream) { 2153 if (s->runtime == to_check) { 2154 remove_wait_queue(&to_check->sleep, &wait); 2155 break; 2156 } 2157 } 2158 snd_pcm_group_unref(group, substream); 2159 2160 if (card->shutdown) { 2161 result = -ENODEV; 2162 break; 2163 } 2164 if (tout == 0) { 2165 if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) 2166 result = -ESTRPIPE; 2167 else { 2168 dev_dbg(substream->pcm->card->dev, 2169 "playback drain error (DMA or IRQ trouble?)\n"); 2170 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); 2171 result = -EIO; 2172 } 2173 break; 2174 } 2175 } 2176 2177 unlock: 2178 snd_pcm_stream_unlock_irq(substream); 2179 2180 return result; 2181} 2182 2183/* 2184 * drop ioctl 2185 * 2186 * Immediately put all linked substreams into SETUP state. 2187 */ 2188static int snd_pcm_drop(struct snd_pcm_substream *substream) 2189{ 2190 struct snd_pcm_runtime *runtime; 2191 int result = 0; 2192 2193 if (PCM_RUNTIME_CHECK(substream)) 2194 return -ENXIO; 2195 runtime = substream->runtime; 2196 2197 if (runtime->status->state == SNDRV_PCM_STATE_OPEN || 2198 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED) 2199 return -EBADFD; 2200 2201 snd_pcm_stream_lock_irq(substream); 2202 /* resume pause */ 2203 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) 2204 snd_pcm_pause(substream, false); 2205 2206 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); 2207 /* runtime->control->appl_ptr = runtime->status->hw_ptr; */ 2208 snd_pcm_stream_unlock_irq(substream); 2209 2210 return result; 2211} 2212 2213 2214static bool is_pcm_file(struct file *file) 2215{ 2216 struct inode *inode = file_inode(file); 2217 struct snd_pcm *pcm; 2218 unsigned int minor; 2219 2220 if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major) 2221 return false; 2222 minor = iminor(inode); 2223 pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK); 2224 if (!pcm) 2225 pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE); 2226 if (!pcm) 2227 return false; 2228 snd_card_unref(pcm->card); 2229 return true; 2230} 2231 2232/* 2233 * PCM link handling 2234 */ 2235static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) 2236{ 2237 int res = 0; 2238 struct snd_pcm_file *pcm_file; 2239 struct snd_pcm_substream *substream1; 2240 struct snd_pcm_group *group, *target_group; 2241 bool nonatomic = substream->pcm->nonatomic; 2242 struct fd f = fdget(fd); 2243 2244 if (!f.file) 2245 return -EBADFD; 2246 if (!is_pcm_file(f.file)) { 2247 res = -EBADFD; 2248 goto _badf; 2249 } 2250 pcm_file = f.file->private_data; 2251 substream1 = pcm_file->substream; 2252 2253 if (substream == substream1) { 2254 res = -EINVAL; 2255 goto _badf; 2256 } 2257 2258 group = kzalloc(sizeof(*group), GFP_KERNEL); 2259 if (!group) { 2260 res = -ENOMEM; 2261 goto _nolock; 2262 } 2263 snd_pcm_group_init(group); 2264 2265 down_write(&snd_pcm_link_rwsem); 2266 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN || 2267 substream->runtime->status->state != substream1->runtime->status->state || 2268 substream->pcm->nonatomic != substream1->pcm->nonatomic) { 2269 res = -EBADFD; 2270 goto _end; 2271 } 2272 if (snd_pcm_stream_linked(substream1)) { 2273 res = -EALREADY; 2274 goto _end; 2275 } 2276 2277 snd_pcm_stream_lock_irq(substream); 2278 if (!snd_pcm_stream_linked(substream)) { 2279 snd_pcm_group_assign(substream, group); 2280 group = NULL; /* assigned, don't free this one below */ 2281 } 2282 target_group = substream->group; 2283 snd_pcm_stream_unlock_irq(substream); 2284 2285 snd_pcm_group_lock_irq(target_group, nonatomic); 2286 snd_pcm_stream_lock_nested(substream1); 2287 snd_pcm_group_assign(substream1, target_group); 2288 refcount_inc(&target_group->refs); 2289 snd_pcm_stream_unlock(substream1); 2290 snd_pcm_group_unlock_irq(target_group, nonatomic); 2291 _end: 2292 up_write(&snd_pcm_link_rwsem); 2293 _nolock: 2294 kfree(group); 2295 _badf: 2296 fdput(f); 2297 return res; 2298} 2299 2300static void relink_to_local(struct snd_pcm_substream *substream) 2301{ 2302 snd_pcm_stream_lock_nested(substream); 2303 snd_pcm_group_assign(substream, &substream->self_group); 2304 snd_pcm_stream_unlock(substream); 2305} 2306 2307static int snd_pcm_unlink(struct snd_pcm_substream *substream) 2308{ 2309 struct snd_pcm_group *group; 2310 bool nonatomic = substream->pcm->nonatomic; 2311 bool do_free = false; 2312 int res = 0; 2313 2314 down_write(&snd_pcm_link_rwsem); 2315 2316 if (!snd_pcm_stream_linked(substream)) { 2317 res = -EALREADY; 2318 goto _end; 2319 } 2320 2321 group = substream->group; 2322 snd_pcm_group_lock_irq(group, nonatomic); 2323 2324 relink_to_local(substream); 2325 refcount_dec(&group->refs); 2326 2327 /* detach the last stream, too */ 2328 if (list_is_singular(&group->substreams)) { 2329 relink_to_local(list_first_entry(&group->substreams, 2330 struct snd_pcm_substream, 2331 link_list)); 2332 do_free = refcount_dec_and_test(&group->refs); 2333 } 2334 2335 snd_pcm_group_unlock_irq(group, nonatomic); 2336 if (do_free) 2337 kfree(group); 2338 2339 _end: 2340 up_write(&snd_pcm_link_rwsem); 2341 return res; 2342} 2343 2344/* 2345 * hw configurator 2346 */ 2347static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params, 2348 struct snd_pcm_hw_rule *rule) 2349{ 2350 struct snd_interval t; 2351 snd_interval_mul(hw_param_interval_c(params, rule->deps[0]), 2352 hw_param_interval_c(params, rule->deps[1]), &t); 2353 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 2354} 2355 2356static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params, 2357 struct snd_pcm_hw_rule *rule) 2358{ 2359 struct snd_interval t; 2360 snd_interval_div(hw_param_interval_c(params, rule->deps[0]), 2361 hw_param_interval_c(params, rule->deps[1]), &t); 2362 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 2363} 2364 2365static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params, 2366 struct snd_pcm_hw_rule *rule) 2367{ 2368 struct snd_interval t; 2369 snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]), 2370 hw_param_interval_c(params, rule->deps[1]), 2371 (unsigned long) rule->private, &t); 2372 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 2373} 2374 2375static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params, 2376 struct snd_pcm_hw_rule *rule) 2377{ 2378 struct snd_interval t; 2379 snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]), 2380 (unsigned long) rule->private, 2381 hw_param_interval_c(params, rule->deps[1]), &t); 2382 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 2383} 2384 2385static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params, 2386 struct snd_pcm_hw_rule *rule) 2387{ 2388 snd_pcm_format_t k; 2389 const struct snd_interval *i = 2390 hw_param_interval_c(params, rule->deps[0]); 2391 struct snd_mask m; 2392 struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); 2393 snd_mask_any(&m); 2394 pcm_for_each_format(k) { 2395 int bits; 2396 if (!snd_mask_test_format(mask, k)) 2397 continue; 2398 bits = snd_pcm_format_physical_width(k); 2399 if (bits <= 0) 2400 continue; /* ignore invalid formats */ 2401 if ((unsigned)bits < i->min || (unsigned)bits > i->max) 2402 snd_mask_reset(&m, (__force unsigned)k); 2403 } 2404 return snd_mask_refine(mask, &m); 2405} 2406 2407static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params, 2408 struct snd_pcm_hw_rule *rule) 2409{ 2410 struct snd_interval t; 2411 snd_pcm_format_t k; 2412 2413 t.min = UINT_MAX; 2414 t.max = 0; 2415 t.openmin = 0; 2416 t.openmax = 0; 2417 pcm_for_each_format(k) { 2418 int bits; 2419 if (!snd_mask_test_format(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k)) 2420 continue; 2421 bits = snd_pcm_format_physical_width(k); 2422 if (bits <= 0) 2423 continue; /* ignore invalid formats */ 2424 if (t.min > (unsigned)bits) 2425 t.min = bits; 2426 if (t.max < (unsigned)bits) 2427 t.max = bits; 2428 } 2429 t.integer = 1; 2430 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 2431} 2432 2433#if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12 2434#error "Change this table" 2435#endif 2436 2437static const unsigned int rates[] = { 2438 5512, 8000, 11025, 16000, 22050, 32000, 44100, 2439 48000, 64000, 88200, 96000, 176400, 192000, 352800, 384000 2440}; 2441 2442const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = { 2443 .count = ARRAY_SIZE(rates), 2444 .list = rates, 2445}; 2446 2447static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params, 2448 struct snd_pcm_hw_rule *rule) 2449{ 2450 struct snd_pcm_hardware *hw = rule->private; 2451 return snd_interval_list(hw_param_interval(params, rule->var), 2452 snd_pcm_known_rates.count, 2453 snd_pcm_known_rates.list, hw->rates); 2454} 2455 2456static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params, 2457 struct snd_pcm_hw_rule *rule) 2458{ 2459 struct snd_interval t; 2460 struct snd_pcm_substream *substream = rule->private; 2461 t.min = 0; 2462 t.max = substream->buffer_bytes_max; 2463 t.openmin = 0; 2464 t.openmax = 0; 2465 t.integer = 1; 2466 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 2467} 2468 2469static int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream) 2470{ 2471 struct snd_pcm_runtime *runtime = substream->runtime; 2472 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 2473 int k, err; 2474 2475 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) { 2476 snd_mask_any(constrs_mask(constrs, k)); 2477 } 2478 2479 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) { 2480 snd_interval_any(constrs_interval(constrs, k)); 2481 } 2482 2483 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS)); 2484 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE)); 2485 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES)); 2486 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS)); 2487 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS)); 2488 2489 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, 2490 snd_pcm_hw_rule_format, NULL, 2491 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); 2492 if (err < 0) 2493 return err; 2494 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, 2495 snd_pcm_hw_rule_sample_bits, NULL, 2496 SNDRV_PCM_HW_PARAM_FORMAT, 2497 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); 2498 if (err < 0) 2499 return err; 2500 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, 2501 snd_pcm_hw_rule_div, NULL, 2502 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1); 2503 if (err < 0) 2504 return err; 2505 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, 2506 snd_pcm_hw_rule_mul, NULL, 2507 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1); 2508 if (err < 0) 2509 return err; 2510 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, 2511 snd_pcm_hw_rule_mulkdiv, (void*) 8, 2512 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1); 2513 if (err < 0) 2514 return err; 2515 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, 2516 snd_pcm_hw_rule_mulkdiv, (void*) 8, 2517 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1); 2518 if (err < 0) 2519 return err; 2520 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, 2521 snd_pcm_hw_rule_div, NULL, 2522 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); 2523 if (err < 0) 2524 return err; 2525 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, 2526 snd_pcm_hw_rule_mulkdiv, (void*) 1000000, 2527 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1); 2528 if (err < 0) 2529 return err; 2530 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, 2531 snd_pcm_hw_rule_mulkdiv, (void*) 1000000, 2532 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1); 2533 if (err < 0) 2534 return err; 2535 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS, 2536 snd_pcm_hw_rule_div, NULL, 2537 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1); 2538 if (err < 0) 2539 return err; 2540 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2541 snd_pcm_hw_rule_div, NULL, 2542 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1); 2543 if (err < 0) 2544 return err; 2545 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2546 snd_pcm_hw_rule_mulkdiv, (void*) 8, 2547 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); 2548 if (err < 0) 2549 return err; 2550 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2551 snd_pcm_hw_rule_muldivk, (void*) 1000000, 2552 SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1); 2553 if (err < 0) 2554 return err; 2555 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 2556 snd_pcm_hw_rule_mul, NULL, 2557 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1); 2558 if (err < 0) 2559 return err; 2560 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 2561 snd_pcm_hw_rule_mulkdiv, (void*) 8, 2562 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); 2563 if (err < 0) 2564 return err; 2565 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 2566 snd_pcm_hw_rule_muldivk, (void*) 1000000, 2567 SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1); 2568 if (err < 0) 2569 return err; 2570 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 2571 snd_pcm_hw_rule_muldivk, (void*) 8, 2572 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); 2573 if (err < 0) 2574 return err; 2575 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 2576 snd_pcm_hw_rule_muldivk, (void*) 8, 2577 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); 2578 if (err < 0) 2579 return err; 2580 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 2581 snd_pcm_hw_rule_mulkdiv, (void*) 1000000, 2582 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1); 2583 if (err < 0) 2584 return err; 2585 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME, 2586 snd_pcm_hw_rule_mulkdiv, (void*) 1000000, 2587 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1); 2588 if (err < 0) 2589 return err; 2590 return 0; 2591} 2592 2593static int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream) 2594{ 2595 struct snd_pcm_runtime *runtime = substream->runtime; 2596 struct snd_pcm_hardware *hw = &runtime->hw; 2597 int err; 2598 unsigned int mask = 0; 2599 2600 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED) 2601 mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_RW_INTERLEAVED); 2602 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED) 2603 mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_RW_NONINTERLEAVED); 2604 if (hw_support_mmap(substream)) { 2605 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED) 2606 mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_INTERLEAVED); 2607 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED) 2608 mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED); 2609 if (hw->info & SNDRV_PCM_INFO_COMPLEX) 2610 mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_COMPLEX); 2611 } 2612 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask); 2613 if (err < 0) 2614 return err; 2615 2616 err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats); 2617 if (err < 0) 2618 return err; 2619 2620 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 2621 PARAM_MASK_BIT(SNDRV_PCM_SUBFORMAT_STD)); 2622 if (err < 0) 2623 return err; 2624 2625 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS, 2626 hw->channels_min, hw->channels_max); 2627 if (err < 0) 2628 return err; 2629 2630 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE, 2631 hw->rate_min, hw->rate_max); 2632 if (err < 0) 2633 return err; 2634 2635 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 2636 hw->period_bytes_min, hw->period_bytes_max); 2637 if (err < 0) 2638 return err; 2639 2640 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS, 2641 hw->periods_min, hw->periods_max); 2642 if (err < 0) 2643 return err; 2644 2645 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 2646 hw->period_bytes_min, hw->buffer_bytes_max); 2647 if (err < 0) 2648 return err; 2649 2650 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 2651 snd_pcm_hw_rule_buffer_bytes_max, substream, 2652 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1); 2653 if (err < 0) 2654 return err; 2655 2656 /* FIXME: remove */ 2657 if (runtime->dma_bytes) { 2658 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes); 2659 if (err < 0) 2660 return err; 2661 } 2662 2663 if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) { 2664 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, 2665 snd_pcm_hw_rule_rate, hw, 2666 SNDRV_PCM_HW_PARAM_RATE, -1); 2667 if (err < 0) 2668 return err; 2669 } 2670 2671 /* FIXME: this belong to lowlevel */ 2672 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE); 2673 2674 return 0; 2675} 2676 2677static void pcm_release_private(struct snd_pcm_substream *substream) 2678{ 2679 if (snd_pcm_stream_linked(substream)) 2680 snd_pcm_unlink(substream); 2681} 2682 2683void snd_pcm_release_substream(struct snd_pcm_substream *substream) 2684{ 2685 substream->ref_count--; 2686 if (substream->ref_count > 0) 2687 return; 2688 2689 snd_pcm_drop(substream); 2690 if (substream->hw_opened) { 2691 if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN) 2692 do_hw_free(substream); 2693 substream->ops->close(substream); 2694 substream->hw_opened = 0; 2695 } 2696 if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req)) 2697 cpu_latency_qos_remove_request(&substream->latency_pm_qos_req); 2698 if (substream->pcm_release) { 2699 substream->pcm_release(substream); 2700 substream->pcm_release = NULL; 2701 } 2702 snd_pcm_detach_substream(substream); 2703} 2704EXPORT_SYMBOL(snd_pcm_release_substream); 2705 2706int snd_pcm_open_substream(struct snd_pcm *pcm, int stream, 2707 struct file *file, 2708 struct snd_pcm_substream **rsubstream) 2709{ 2710 struct snd_pcm_substream *substream; 2711 int err; 2712 2713 err = snd_pcm_attach_substream(pcm, stream, file, &substream); 2714 if (err < 0) 2715 return err; 2716 if (substream->ref_count > 1) { 2717 *rsubstream = substream; 2718 return 0; 2719 } 2720 2721 err = snd_pcm_hw_constraints_init(substream); 2722 if (err < 0) { 2723 pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n"); 2724 goto error; 2725 } 2726 2727 if ((err = substream->ops->open(substream)) < 0) 2728 goto error; 2729 2730 substream->hw_opened = 1; 2731 2732 err = snd_pcm_hw_constraints_complete(substream); 2733 if (err < 0) { 2734 pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n"); 2735 goto error; 2736 } 2737 2738 *rsubstream = substream; 2739 return 0; 2740 2741 error: 2742 snd_pcm_release_substream(substream); 2743 return err; 2744} 2745EXPORT_SYMBOL(snd_pcm_open_substream); 2746 2747static int snd_pcm_open_file(struct file *file, 2748 struct snd_pcm *pcm, 2749 int stream) 2750{ 2751 struct snd_pcm_file *pcm_file; 2752 struct snd_pcm_substream *substream; 2753 int err; 2754 2755 err = snd_pcm_open_substream(pcm, stream, file, &substream); 2756 if (err < 0) 2757 return err; 2758 2759 pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL); 2760 if (pcm_file == NULL) { 2761 snd_pcm_release_substream(substream); 2762 return -ENOMEM; 2763 } 2764 pcm_file->substream = substream; 2765 if (substream->ref_count == 1) 2766 substream->pcm_release = pcm_release_private; 2767 file->private_data = pcm_file; 2768 2769 return 0; 2770} 2771 2772static int snd_pcm_playback_open(struct inode *inode, struct file *file) 2773{ 2774 struct snd_pcm *pcm; 2775 int err = nonseekable_open(inode, file); 2776 if (err < 0) 2777 return err; 2778 pcm = snd_lookup_minor_data(iminor(inode), 2779 SNDRV_DEVICE_TYPE_PCM_PLAYBACK); 2780 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK); 2781 if (pcm) 2782 snd_card_unref(pcm->card); 2783 return err; 2784} 2785 2786static int snd_pcm_capture_open(struct inode *inode, struct file *file) 2787{ 2788 struct snd_pcm *pcm; 2789 int err = nonseekable_open(inode, file); 2790 if (err < 0) 2791 return err; 2792 pcm = snd_lookup_minor_data(iminor(inode), 2793 SNDRV_DEVICE_TYPE_PCM_CAPTURE); 2794 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE); 2795 if (pcm) 2796 snd_card_unref(pcm->card); 2797 return err; 2798} 2799 2800static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream) 2801{ 2802 int err; 2803 wait_queue_entry_t wait; 2804 2805 if (pcm == NULL) { 2806 err = -ENODEV; 2807 goto __error1; 2808 } 2809 err = snd_card_file_add(pcm->card, file); 2810 if (err < 0) 2811 goto __error1; 2812 if (!try_module_get(pcm->card->module)) { 2813 err = -EFAULT; 2814 goto __error2; 2815 } 2816 init_waitqueue_entry(&wait, current); 2817 add_wait_queue(&pcm->open_wait, &wait); 2818 mutex_lock(&pcm->open_mutex); 2819 while (1) { 2820 err = snd_pcm_open_file(file, pcm, stream); 2821 if (err >= 0) 2822 break; 2823 if (err == -EAGAIN) { 2824 if (file->f_flags & O_NONBLOCK) { 2825 err = -EBUSY; 2826 break; 2827 } 2828 } else 2829 break; 2830 set_current_state(TASK_INTERRUPTIBLE); 2831 mutex_unlock(&pcm->open_mutex); 2832 schedule(); 2833 mutex_lock(&pcm->open_mutex); 2834 if (pcm->card->shutdown) { 2835 err = -ENODEV; 2836 break; 2837 } 2838 if (signal_pending(current)) { 2839 err = -ERESTARTSYS; 2840 break; 2841 } 2842 } 2843 remove_wait_queue(&pcm->open_wait, &wait); 2844 mutex_unlock(&pcm->open_mutex); 2845 if (err < 0) 2846 goto __error; 2847 return err; 2848 2849 __error: 2850 module_put(pcm->card->module); 2851 __error2: 2852 snd_card_file_remove(pcm->card, file); 2853 __error1: 2854 return err; 2855} 2856 2857static int snd_pcm_release(struct inode *inode, struct file *file) 2858{ 2859 struct snd_pcm *pcm; 2860 struct snd_pcm_substream *substream; 2861 struct snd_pcm_file *pcm_file; 2862 2863 pcm_file = file->private_data; 2864 substream = pcm_file->substream; 2865 if (snd_BUG_ON(!substream)) 2866 return -ENXIO; 2867 pcm = substream->pcm; 2868 mutex_lock(&pcm->open_mutex); 2869 snd_pcm_release_substream(substream); 2870 kfree(pcm_file); 2871 mutex_unlock(&pcm->open_mutex); 2872 wake_up(&pcm->open_wait); 2873 module_put(pcm->card->module); 2874 snd_card_file_remove(pcm->card, file); 2875 return 0; 2876} 2877 2878/* check and update PCM state; return 0 or a negative error 2879 * call this inside PCM lock 2880 */ 2881static int do_pcm_hwsync(struct snd_pcm_substream *substream) 2882{ 2883 switch (substream->runtime->status->state) { 2884 case SNDRV_PCM_STATE_DRAINING: 2885 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) 2886 return -EBADFD; 2887 fallthrough; 2888 case SNDRV_PCM_STATE_RUNNING: 2889 return snd_pcm_update_hw_ptr(substream); 2890 case SNDRV_PCM_STATE_PREPARED: 2891 case SNDRV_PCM_STATE_PAUSED: 2892 return 0; 2893 case SNDRV_PCM_STATE_SUSPENDED: 2894 return -ESTRPIPE; 2895 case SNDRV_PCM_STATE_XRUN: 2896 return -EPIPE; 2897 default: 2898 return -EBADFD; 2899 } 2900} 2901 2902/* increase the appl_ptr; returns the processed frames or a negative error */ 2903static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream, 2904 snd_pcm_uframes_t frames, 2905 snd_pcm_sframes_t avail) 2906{ 2907 struct snd_pcm_runtime *runtime = substream->runtime; 2908 snd_pcm_sframes_t appl_ptr; 2909 int ret; 2910 2911 if (avail <= 0) 2912 return 0; 2913 if (frames > (snd_pcm_uframes_t)avail) 2914 frames = avail; 2915 appl_ptr = runtime->control->appl_ptr + frames; 2916 if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary) 2917 appl_ptr -= runtime->boundary; 2918 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr); 2919 return ret < 0 ? ret : frames; 2920} 2921 2922/* decrease the appl_ptr; returns the processed frames or zero for error */ 2923static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream, 2924 snd_pcm_uframes_t frames, 2925 snd_pcm_sframes_t avail) 2926{ 2927 struct snd_pcm_runtime *runtime = substream->runtime; 2928 snd_pcm_sframes_t appl_ptr; 2929 int ret; 2930 2931 if (avail <= 0) 2932 return 0; 2933 if (frames > (snd_pcm_uframes_t)avail) 2934 frames = avail; 2935 appl_ptr = runtime->control->appl_ptr - frames; 2936 if (appl_ptr < 0) 2937 appl_ptr += runtime->boundary; 2938 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr); 2939 /* NOTE: we return zero for errors because PulseAudio gets depressed 2940 * upon receiving an error from rewind ioctl and stops processing 2941 * any longer. Returning zero means that no rewind is done, so 2942 * it's not absolutely wrong to answer like that. 2943 */ 2944 return ret < 0 ? 0 : frames; 2945} 2946 2947static snd_pcm_sframes_t snd_pcm_rewind(struct snd_pcm_substream *substream, 2948 snd_pcm_uframes_t frames) 2949{ 2950 snd_pcm_sframes_t ret; 2951 2952 if (frames == 0) 2953 return 0; 2954 2955 snd_pcm_stream_lock_irq(substream); 2956 ret = do_pcm_hwsync(substream); 2957 if (!ret) 2958 ret = rewind_appl_ptr(substream, frames, 2959 snd_pcm_hw_avail(substream)); 2960 snd_pcm_stream_unlock_irq(substream); 2961 return ret; 2962} 2963 2964static snd_pcm_sframes_t snd_pcm_forward(struct snd_pcm_substream *substream, 2965 snd_pcm_uframes_t frames) 2966{ 2967 snd_pcm_sframes_t ret; 2968 2969 if (frames == 0) 2970 return 0; 2971 2972 snd_pcm_stream_lock_irq(substream); 2973 ret = do_pcm_hwsync(substream); 2974 if (!ret) 2975 ret = forward_appl_ptr(substream, frames, 2976 snd_pcm_avail(substream)); 2977 snd_pcm_stream_unlock_irq(substream); 2978 return ret; 2979} 2980 2981static int snd_pcm_hwsync(struct snd_pcm_substream *substream) 2982{ 2983 int err; 2984 2985 snd_pcm_stream_lock_irq(substream); 2986 err = do_pcm_hwsync(substream); 2987 snd_pcm_stream_unlock_irq(substream); 2988 return err; 2989} 2990 2991static int snd_pcm_delay(struct snd_pcm_substream *substream, 2992 snd_pcm_sframes_t *delay) 2993{ 2994 int err; 2995 snd_pcm_sframes_t n = 0; 2996 2997 snd_pcm_stream_lock_irq(substream); 2998 err = do_pcm_hwsync(substream); 2999 if (!err) 3000 n = snd_pcm_calc_delay(substream); 3001 snd_pcm_stream_unlock_irq(substream); 3002 if (!err) 3003 *delay = n; 3004 return err; 3005} 3006 3007static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream, 3008 struct snd_pcm_sync_ptr __user *_sync_ptr) 3009{ 3010 struct snd_pcm_runtime *runtime = substream->runtime; 3011 struct snd_pcm_sync_ptr sync_ptr; 3012 volatile struct snd_pcm_mmap_status *status; 3013 volatile struct snd_pcm_mmap_control *control; 3014 int err; 3015 3016 memset(&sync_ptr, 0, sizeof(sync_ptr)); 3017 if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags))) 3018 return -EFAULT; 3019 if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control))) 3020 return -EFAULT; 3021 status = runtime->status; 3022 control = runtime->control; 3023 if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) { 3024 err = snd_pcm_hwsync(substream); 3025 if (err < 0) 3026 return err; 3027 } 3028 snd_pcm_stream_lock_irq(substream); 3029 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) { 3030 err = pcm_lib_apply_appl_ptr(substream, 3031 sync_ptr.c.control.appl_ptr); 3032 if (err < 0) { 3033 snd_pcm_stream_unlock_irq(substream); 3034 return err; 3035 } 3036 } else { 3037 sync_ptr.c.control.appl_ptr = control->appl_ptr; 3038 } 3039 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN)) 3040 control->avail_min = sync_ptr.c.control.avail_min; 3041 else 3042 sync_ptr.c.control.avail_min = control->avail_min; 3043 sync_ptr.s.status.state = status->state; 3044 sync_ptr.s.status.hw_ptr = status->hw_ptr; 3045 sync_ptr.s.status.tstamp = status->tstamp; 3046 sync_ptr.s.status.suspended_state = status->suspended_state; 3047 sync_ptr.s.status.audio_tstamp = status->audio_tstamp; 3048 snd_pcm_stream_unlock_irq(substream); 3049 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr))) 3050 return -EFAULT; 3051 return 0; 3052} 3053 3054struct snd_pcm_mmap_status32 { 3055 snd_pcm_state_t state; 3056 s32 pad1; 3057 u32 hw_ptr; 3058 s32 tstamp_sec; 3059 s32 tstamp_nsec; 3060 snd_pcm_state_t suspended_state; 3061 s32 audio_tstamp_sec; 3062 s32 audio_tstamp_nsec; 3063} __attribute__((packed)); 3064 3065struct snd_pcm_mmap_control32 { 3066 u32 appl_ptr; 3067 u32 avail_min; 3068}; 3069 3070struct snd_pcm_sync_ptr32 { 3071 u32 flags; 3072 union { 3073 struct snd_pcm_mmap_status32 status; 3074 unsigned char reserved[64]; 3075 } s; 3076 union { 3077 struct snd_pcm_mmap_control32 control; 3078 unsigned char reserved[64]; 3079 } c; 3080} __attribute__((packed)); 3081 3082/* recalcuate the boundary within 32bit */ 3083static snd_pcm_uframes_t recalculate_boundary(struct snd_pcm_runtime *runtime) 3084{ 3085 snd_pcm_uframes_t boundary; 3086 3087 if (! runtime->buffer_size) 3088 return 0; 3089 boundary = runtime->buffer_size; 3090 while (boundary * 2 <= 0x7fffffffUL - runtime->buffer_size) 3091 boundary *= 2; 3092 return boundary; 3093} 3094 3095static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream, 3096 struct snd_pcm_sync_ptr32 __user *src) 3097{ 3098 struct snd_pcm_runtime *runtime = substream->runtime; 3099 volatile struct snd_pcm_mmap_status *status; 3100 volatile struct snd_pcm_mmap_control *control; 3101 u32 sflags; 3102 struct snd_pcm_mmap_control scontrol; 3103 struct snd_pcm_mmap_status sstatus; 3104 snd_pcm_uframes_t boundary; 3105 int err; 3106 3107 if (snd_BUG_ON(!runtime)) 3108 return -EINVAL; 3109 3110 if (get_user(sflags, &src->flags) || 3111 get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) || 3112 get_user(scontrol.avail_min, &src->c.control.avail_min)) 3113 return -EFAULT; 3114 if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) { 3115 err = snd_pcm_hwsync(substream); 3116 if (err < 0) 3117 return err; 3118 } 3119 status = runtime->status; 3120 control = runtime->control; 3121 boundary = recalculate_boundary(runtime); 3122 if (! boundary) 3123 boundary = 0x7fffffff; 3124 snd_pcm_stream_lock_irq(substream); 3125 /* FIXME: we should consider the boundary for the sync from app */ 3126 if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) { 3127 err = pcm_lib_apply_appl_ptr(substream, 3128 scontrol.appl_ptr); 3129 if (err < 0) { 3130 snd_pcm_stream_unlock_irq(substream); 3131 return err; 3132 } 3133 } else 3134 scontrol.appl_ptr = control->appl_ptr % boundary; 3135 if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN)) 3136 control->avail_min = scontrol.avail_min; 3137 else 3138 scontrol.avail_min = control->avail_min; 3139 sstatus.state = status->state; 3140 sstatus.hw_ptr = status->hw_ptr % boundary; 3141 sstatus.tstamp = status->tstamp; 3142 sstatus.suspended_state = status->suspended_state; 3143 sstatus.audio_tstamp = status->audio_tstamp; 3144 snd_pcm_stream_unlock_irq(substream); 3145 if (put_user(sstatus.state, &src->s.status.state) || 3146 put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) || 3147 put_user(sstatus.tstamp.tv_sec, &src->s.status.tstamp_sec) || 3148 put_user(sstatus.tstamp.tv_nsec, &src->s.status.tstamp_nsec) || 3149 put_user(sstatus.suspended_state, &src->s.status.suspended_state) || 3150 put_user(sstatus.audio_tstamp.tv_sec, &src->s.status.audio_tstamp_sec) || 3151 put_user(sstatus.audio_tstamp.tv_nsec, &src->s.status.audio_tstamp_nsec) || 3152 put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) || 3153 put_user(scontrol.avail_min, &src->c.control.avail_min)) 3154 return -EFAULT; 3155 3156 return 0; 3157} 3158#define __SNDRV_PCM_IOCTL_SYNC_PTR32 _IOWR('A', 0x23, struct snd_pcm_sync_ptr32) 3159 3160static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg) 3161{ 3162 struct snd_pcm_runtime *runtime = substream->runtime; 3163 int arg; 3164 3165 if (get_user(arg, _arg)) 3166 return -EFAULT; 3167 if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST) 3168 return -EINVAL; 3169 runtime->tstamp_type = arg; 3170 return 0; 3171} 3172 3173static int snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream *substream, 3174 struct snd_xferi __user *_xferi) 3175{ 3176 struct snd_xferi xferi; 3177 struct snd_pcm_runtime *runtime = substream->runtime; 3178 snd_pcm_sframes_t result; 3179 3180 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 3181 return -EBADFD; 3182 if (put_user(0, &_xferi->result)) 3183 return -EFAULT; 3184 if (copy_from_user(&xferi, _xferi, sizeof(xferi))) 3185 return -EFAULT; 3186 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 3187 result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames); 3188 else 3189 result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames); 3190 if (put_user(result, &_xferi->result)) 3191 return -EFAULT; 3192 return result < 0 ? result : 0; 3193} 3194 3195static int snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream *substream, 3196 struct snd_xfern __user *_xfern) 3197{ 3198 struct snd_xfern xfern; 3199 struct snd_pcm_runtime *runtime = substream->runtime; 3200 void *bufs; 3201 snd_pcm_sframes_t result; 3202 3203 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 3204 return -EBADFD; 3205 if (runtime->channels > 128) 3206 return -EINVAL; 3207 if (put_user(0, &_xfern->result)) 3208 return -EFAULT; 3209 if (copy_from_user(&xfern, _xfern, sizeof(xfern))) 3210 return -EFAULT; 3211 3212 bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels); 3213 if (IS_ERR(bufs)) 3214 return PTR_ERR(bufs); 3215 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 3216 result = snd_pcm_lib_writev(substream, bufs, xfern.frames); 3217 else 3218 result = snd_pcm_lib_readv(substream, bufs, xfern.frames); 3219 kfree(bufs); 3220 if (put_user(result, &_xfern->result)) 3221 return -EFAULT; 3222 return result < 0 ? result : 0; 3223} 3224 3225static int snd_pcm_rewind_ioctl(struct snd_pcm_substream *substream, 3226 snd_pcm_uframes_t __user *_frames) 3227{ 3228 snd_pcm_uframes_t frames; 3229 snd_pcm_sframes_t result; 3230 3231 if (get_user(frames, _frames)) 3232 return -EFAULT; 3233 if (put_user(0, _frames)) 3234 return -EFAULT; 3235 result = snd_pcm_rewind(substream, frames); 3236 if (put_user(result, _frames)) 3237 return -EFAULT; 3238 return result < 0 ? result : 0; 3239} 3240 3241static int snd_pcm_forward_ioctl(struct snd_pcm_substream *substream, 3242 snd_pcm_uframes_t __user *_frames) 3243{ 3244 snd_pcm_uframes_t frames; 3245 snd_pcm_sframes_t result; 3246 3247 if (get_user(frames, _frames)) 3248 return -EFAULT; 3249 if (put_user(0, _frames)) 3250 return -EFAULT; 3251 result = snd_pcm_forward(substream, frames); 3252 if (put_user(result, _frames)) 3253 return -EFAULT; 3254 return result < 0 ? result : 0; 3255} 3256 3257static int snd_pcm_common_ioctl(struct file *file, 3258 struct snd_pcm_substream *substream, 3259 unsigned int cmd, void __user *arg) 3260{ 3261 struct snd_pcm_file *pcm_file = file->private_data; 3262 int res; 3263 3264 if (PCM_RUNTIME_CHECK(substream)) 3265 return -ENXIO; 3266 3267 res = snd_power_wait(substream->pcm->card, SNDRV_CTL_POWER_D0); 3268 if (res < 0) 3269 return res; 3270 3271 switch (cmd) { 3272 case SNDRV_PCM_IOCTL_PVERSION: 3273 return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0; 3274 case SNDRV_PCM_IOCTL_INFO: 3275 return snd_pcm_info_user(substream, arg); 3276 case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */ 3277 return 0; 3278 case SNDRV_PCM_IOCTL_TTSTAMP: 3279 return snd_pcm_tstamp(substream, arg); 3280 case SNDRV_PCM_IOCTL_USER_PVERSION: 3281 if (get_user(pcm_file->user_pversion, 3282 (unsigned int __user *)arg)) 3283 return -EFAULT; 3284 return 0; 3285 case SNDRV_PCM_IOCTL_HW_REFINE: 3286 return snd_pcm_hw_refine_user(substream, arg); 3287 case SNDRV_PCM_IOCTL_HW_PARAMS: 3288 return snd_pcm_hw_params_user(substream, arg); 3289 case SNDRV_PCM_IOCTL_HW_FREE: 3290 return snd_pcm_hw_free(substream); 3291 case SNDRV_PCM_IOCTL_SW_PARAMS: 3292 return snd_pcm_sw_params_user(substream, arg); 3293 case SNDRV_PCM_IOCTL_STATUS32: 3294 return snd_pcm_status_user32(substream, arg, false); 3295 case SNDRV_PCM_IOCTL_STATUS_EXT32: 3296 return snd_pcm_status_user32(substream, arg, true); 3297 case SNDRV_PCM_IOCTL_STATUS64: 3298 return snd_pcm_status_user64(substream, arg, false); 3299 case SNDRV_PCM_IOCTL_STATUS_EXT64: 3300 return snd_pcm_status_user64(substream, arg, true); 3301 case SNDRV_PCM_IOCTL_CHANNEL_INFO: 3302 return snd_pcm_channel_info_user(substream, arg); 3303 case SNDRV_PCM_IOCTL_PREPARE: 3304 return snd_pcm_prepare(substream, file); 3305 case SNDRV_PCM_IOCTL_RESET: 3306 return snd_pcm_reset(substream); 3307 case SNDRV_PCM_IOCTL_START: 3308 return snd_pcm_start_lock_irq(substream); 3309 case SNDRV_PCM_IOCTL_LINK: 3310 return snd_pcm_link(substream, (int)(unsigned long) arg); 3311 case SNDRV_PCM_IOCTL_UNLINK: 3312 return snd_pcm_unlink(substream); 3313 case SNDRV_PCM_IOCTL_RESUME: 3314 return snd_pcm_resume(substream); 3315 case SNDRV_PCM_IOCTL_XRUN: 3316 return snd_pcm_xrun(substream); 3317 case SNDRV_PCM_IOCTL_HWSYNC: 3318 return snd_pcm_hwsync(substream); 3319 case SNDRV_PCM_IOCTL_DELAY: 3320 { 3321 snd_pcm_sframes_t delay; 3322 snd_pcm_sframes_t __user *res = arg; 3323 int err; 3324 3325 err = snd_pcm_delay(substream, &delay); 3326 if (err) 3327 return err; 3328 if (put_user(delay, res)) 3329 return -EFAULT; 3330 return 0; 3331 } 3332 case __SNDRV_PCM_IOCTL_SYNC_PTR32: 3333 return snd_pcm_ioctl_sync_ptr_compat(substream, arg); 3334 case __SNDRV_PCM_IOCTL_SYNC_PTR64: 3335 return snd_pcm_sync_ptr(substream, arg); 3336#ifdef CONFIG_SND_SUPPORT_OLD_API 3337 case SNDRV_PCM_IOCTL_HW_REFINE_OLD: 3338 return snd_pcm_hw_refine_old_user(substream, arg); 3339 case SNDRV_PCM_IOCTL_HW_PARAMS_OLD: 3340 return snd_pcm_hw_params_old_user(substream, arg); 3341#endif 3342 case SNDRV_PCM_IOCTL_DRAIN: 3343 return snd_pcm_drain(substream, file); 3344 case SNDRV_PCM_IOCTL_DROP: 3345 return snd_pcm_drop(substream); 3346 case SNDRV_PCM_IOCTL_PAUSE: 3347 return snd_pcm_pause_lock_irq(substream, (unsigned long)arg); 3348 case SNDRV_PCM_IOCTL_WRITEI_FRAMES: 3349 case SNDRV_PCM_IOCTL_READI_FRAMES: 3350 return snd_pcm_xferi_frames_ioctl(substream, arg); 3351 case SNDRV_PCM_IOCTL_WRITEN_FRAMES: 3352 case SNDRV_PCM_IOCTL_READN_FRAMES: 3353 return snd_pcm_xfern_frames_ioctl(substream, arg); 3354 case SNDRV_PCM_IOCTL_REWIND: 3355 return snd_pcm_rewind_ioctl(substream, arg); 3356 case SNDRV_PCM_IOCTL_FORWARD: 3357 return snd_pcm_forward_ioctl(substream, arg); 3358 } 3359 pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd); 3360 return -ENOTTY; 3361} 3362 3363static long snd_pcm_ioctl(struct file *file, unsigned int cmd, 3364 unsigned long arg) 3365{ 3366 struct snd_pcm_file *pcm_file; 3367 3368 pcm_file = file->private_data; 3369 3370 if (((cmd >> 8) & 0xff) != 'A') 3371 return -ENOTTY; 3372 3373 return snd_pcm_common_ioctl(file, pcm_file->substream, cmd, 3374 (void __user *)arg); 3375} 3376 3377/** 3378 * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space 3379 * @substream: PCM substream 3380 * @cmd: IOCTL cmd 3381 * @arg: IOCTL argument 3382 * 3383 * The function is provided primarily for OSS layer and USB gadget drivers, 3384 * and it allows only the limited set of ioctls (hw_params, sw_params, 3385 * prepare, start, drain, drop, forward). 3386 */ 3387int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, 3388 unsigned int cmd, void *arg) 3389{ 3390 snd_pcm_uframes_t *frames = arg; 3391 snd_pcm_sframes_t result; 3392 3393 switch (cmd) { 3394 case SNDRV_PCM_IOCTL_FORWARD: 3395 { 3396 /* provided only for OSS; capture-only and no value returned */ 3397 if (substream->stream != SNDRV_PCM_STREAM_CAPTURE) 3398 return -EINVAL; 3399 result = snd_pcm_forward(substream, *frames); 3400 return result < 0 ? result : 0; 3401 } 3402 case SNDRV_PCM_IOCTL_HW_PARAMS: 3403 return snd_pcm_hw_params(substream, arg); 3404 case SNDRV_PCM_IOCTL_SW_PARAMS: 3405 return snd_pcm_sw_params(substream, arg); 3406 case SNDRV_PCM_IOCTL_PREPARE: 3407 return snd_pcm_prepare(substream, NULL); 3408 case SNDRV_PCM_IOCTL_START: 3409 return snd_pcm_start_lock_irq(substream); 3410 case SNDRV_PCM_IOCTL_DRAIN: 3411 return snd_pcm_drain(substream, NULL); 3412 case SNDRV_PCM_IOCTL_DROP: 3413 return snd_pcm_drop(substream); 3414 case SNDRV_PCM_IOCTL_DELAY: 3415 return snd_pcm_delay(substream, frames); 3416 default: 3417 return -EINVAL; 3418 } 3419} 3420EXPORT_SYMBOL(snd_pcm_kernel_ioctl); 3421 3422static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count, 3423 loff_t * offset) 3424{ 3425 struct snd_pcm_file *pcm_file; 3426 struct snd_pcm_substream *substream; 3427 struct snd_pcm_runtime *runtime; 3428 snd_pcm_sframes_t result; 3429 3430 pcm_file = file->private_data; 3431 substream = pcm_file->substream; 3432 if (PCM_RUNTIME_CHECK(substream)) 3433 return -ENXIO; 3434 runtime = substream->runtime; 3435 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 3436 return -EBADFD; 3437 if (!frame_aligned(runtime, count)) 3438 return -EINVAL; 3439 count = bytes_to_frames(runtime, count); 3440 result = snd_pcm_lib_read(substream, buf, count); 3441 if (result > 0) 3442 result = frames_to_bytes(runtime, result); 3443 return result; 3444} 3445 3446static ssize_t snd_pcm_write(struct file *file, const char __user *buf, 3447 size_t count, loff_t * offset) 3448{ 3449 struct snd_pcm_file *pcm_file; 3450 struct snd_pcm_substream *substream; 3451 struct snd_pcm_runtime *runtime; 3452 snd_pcm_sframes_t result; 3453 3454 pcm_file = file->private_data; 3455 substream = pcm_file->substream; 3456 if (PCM_RUNTIME_CHECK(substream)) 3457 return -ENXIO; 3458 runtime = substream->runtime; 3459 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 3460 return -EBADFD; 3461 if (!frame_aligned(runtime, count)) 3462 return -EINVAL; 3463 count = bytes_to_frames(runtime, count); 3464 result = snd_pcm_lib_write(substream, buf, count); 3465 if (result > 0) 3466 result = frames_to_bytes(runtime, result); 3467 return result; 3468} 3469 3470static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to) 3471{ 3472 struct snd_pcm_file *pcm_file; 3473 struct snd_pcm_substream *substream; 3474 struct snd_pcm_runtime *runtime; 3475 snd_pcm_sframes_t result; 3476 unsigned long i; 3477 void __user **bufs; 3478 snd_pcm_uframes_t frames; 3479 3480 pcm_file = iocb->ki_filp->private_data; 3481 substream = pcm_file->substream; 3482 if (PCM_RUNTIME_CHECK(substream)) 3483 return -ENXIO; 3484 runtime = substream->runtime; 3485 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 3486 return -EBADFD; 3487 if (!iter_is_iovec(to)) 3488 return -EINVAL; 3489 if (to->nr_segs > 1024 || to->nr_segs != runtime->channels) 3490 return -EINVAL; 3491 if (!frame_aligned(runtime, to->iov->iov_len)) 3492 return -EINVAL; 3493 frames = bytes_to_samples(runtime, to->iov->iov_len); 3494 bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL); 3495 if (bufs == NULL) 3496 return -ENOMEM; 3497 for (i = 0; i < to->nr_segs; ++i) 3498 bufs[i] = to->iov[i].iov_base; 3499 result = snd_pcm_lib_readv(substream, bufs, frames); 3500 if (result > 0) 3501 result = frames_to_bytes(runtime, result); 3502 kfree(bufs); 3503 return result; 3504} 3505 3506static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from) 3507{ 3508 struct snd_pcm_file *pcm_file; 3509 struct snd_pcm_substream *substream; 3510 struct snd_pcm_runtime *runtime; 3511 snd_pcm_sframes_t result; 3512 unsigned long i; 3513 void __user **bufs; 3514 snd_pcm_uframes_t frames; 3515 3516 pcm_file = iocb->ki_filp->private_data; 3517 substream = pcm_file->substream; 3518 if (PCM_RUNTIME_CHECK(substream)) 3519 return -ENXIO; 3520 runtime = substream->runtime; 3521 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 3522 return -EBADFD; 3523 if (!iter_is_iovec(from)) 3524 return -EINVAL; 3525 if (from->nr_segs > 128 || from->nr_segs != runtime->channels || 3526 !frame_aligned(runtime, from->iov->iov_len)) 3527 return -EINVAL; 3528 frames = bytes_to_samples(runtime, from->iov->iov_len); 3529 bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL); 3530 if (bufs == NULL) 3531 return -ENOMEM; 3532 for (i = 0; i < from->nr_segs; ++i) 3533 bufs[i] = from->iov[i].iov_base; 3534 result = snd_pcm_lib_writev(substream, bufs, frames); 3535 if (result > 0) 3536 result = frames_to_bytes(runtime, result); 3537 kfree(bufs); 3538 return result; 3539} 3540 3541static __poll_t snd_pcm_poll(struct file *file, poll_table *wait) 3542{ 3543 struct snd_pcm_file *pcm_file; 3544 struct snd_pcm_substream *substream; 3545 struct snd_pcm_runtime *runtime; 3546 __poll_t mask, ok; 3547 snd_pcm_uframes_t avail; 3548 3549 pcm_file = file->private_data; 3550 3551 substream = pcm_file->substream; 3552 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 3553 ok = EPOLLOUT | EPOLLWRNORM; 3554 else 3555 ok = EPOLLIN | EPOLLRDNORM; 3556 if (PCM_RUNTIME_CHECK(substream)) 3557 return ok | EPOLLERR; 3558 3559 runtime = substream->runtime; 3560 poll_wait(file, &runtime->sleep, wait); 3561 3562 mask = 0; 3563 snd_pcm_stream_lock_irq(substream); 3564 avail = snd_pcm_avail(substream); 3565 switch (runtime->status->state) { 3566 case SNDRV_PCM_STATE_RUNNING: 3567 case SNDRV_PCM_STATE_PREPARED: 3568 case SNDRV_PCM_STATE_PAUSED: 3569 if (avail >= runtime->control->avail_min) 3570 mask = ok; 3571 break; 3572 case SNDRV_PCM_STATE_DRAINING: 3573 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { 3574 mask = ok; 3575 if (!avail) 3576 mask |= EPOLLERR; 3577 } 3578 break; 3579 default: 3580 mask = ok | EPOLLERR; 3581 break; 3582 } 3583 snd_pcm_stream_unlock_irq(substream); 3584 return mask; 3585} 3586 3587/* 3588 * mmap support 3589 */ 3590 3591/* 3592 * Only on coherent architectures, we can mmap the status and the control records 3593 * for effcient data transfer. On others, we have to use HWSYNC ioctl... 3594 */ 3595#if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA) 3596/* 3597 * mmap status record 3598 */ 3599static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf) 3600{ 3601 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3602 struct snd_pcm_runtime *runtime; 3603 3604 if (substream == NULL) 3605 return VM_FAULT_SIGBUS; 3606 runtime = substream->runtime; 3607 vmf->page = virt_to_page(runtime->status); 3608 get_page(vmf->page); 3609 return 0; 3610} 3611 3612static const struct vm_operations_struct snd_pcm_vm_ops_status = 3613{ 3614 .fault = snd_pcm_mmap_status_fault, 3615}; 3616 3617static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, 3618 struct vm_area_struct *area) 3619{ 3620 long size; 3621 if (!(area->vm_flags & VM_READ)) 3622 return -EINVAL; 3623 size = area->vm_end - area->vm_start; 3624 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status))) 3625 return -EINVAL; 3626 area->vm_ops = &snd_pcm_vm_ops_status; 3627 area->vm_private_data = substream; 3628 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3629 return 0; 3630} 3631 3632/* 3633 * mmap control record 3634 */ 3635static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf) 3636{ 3637 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3638 struct snd_pcm_runtime *runtime; 3639 3640 if (substream == NULL) 3641 return VM_FAULT_SIGBUS; 3642 runtime = substream->runtime; 3643 vmf->page = virt_to_page(runtime->control); 3644 get_page(vmf->page); 3645 return 0; 3646} 3647 3648static const struct vm_operations_struct snd_pcm_vm_ops_control = 3649{ 3650 .fault = snd_pcm_mmap_control_fault, 3651}; 3652 3653static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file, 3654 struct vm_area_struct *area) 3655{ 3656 long size; 3657 if (!(area->vm_flags & VM_READ)) 3658 return -EINVAL; 3659 size = area->vm_end - area->vm_start; 3660 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control))) 3661 return -EINVAL; 3662 area->vm_ops = &snd_pcm_vm_ops_control; 3663 area->vm_private_data = substream; 3664 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3665 return 0; 3666} 3667 3668static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file) 3669{ 3670 /* See pcm_control_mmap_allowed() below. 3671 * Since older alsa-lib requires both status and control mmaps to be 3672 * coupled, we have to disable the status mmap for old alsa-lib, too. 3673 */ 3674 if (pcm_file->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 14) && 3675 (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)) 3676 return false; 3677 return true; 3678} 3679 3680static bool pcm_control_mmap_allowed(struct snd_pcm_file *pcm_file) 3681{ 3682 if (pcm_file->no_compat_mmap) 3683 return false; 3684 /* Disallow the control mmap when SYNC_APPLPTR flag is set; 3685 * it enforces the user-space to fall back to snd_pcm_sync_ptr(), 3686 * thus it effectively assures the manual update of appl_ptr. 3687 */ 3688 if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR) 3689 return false; 3690 return true; 3691} 3692 3693#else /* ! coherent mmap */ 3694/* 3695 * don't support mmap for status and control records. 3696 */ 3697#define pcm_status_mmap_allowed(pcm_file) false 3698#define pcm_control_mmap_allowed(pcm_file) false 3699 3700static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, 3701 struct vm_area_struct *area) 3702{ 3703 return -ENXIO; 3704} 3705static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file, 3706 struct vm_area_struct *area) 3707{ 3708 return -ENXIO; 3709} 3710#endif /* coherent mmap */ 3711 3712static inline struct page * 3713snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs) 3714{ 3715 void *vaddr = substream->runtime->dma_area + ofs; 3716 3717 switch (substream->dma_buffer.dev.type) { 3718#ifdef CONFIG_SND_DMA_SGBUF 3719 case SNDRV_DMA_TYPE_DEV_SG: 3720 case SNDRV_DMA_TYPE_DEV_UC_SG: 3721 return snd_pcm_sgbuf_ops_page(substream, ofs); 3722#endif /* CONFIG_SND_DMA_SGBUF */ 3723 case SNDRV_DMA_TYPE_VMALLOC: 3724 return vmalloc_to_page(vaddr); 3725 default: 3726 return virt_to_page(vaddr); 3727 } 3728} 3729 3730/* 3731 * fault callback for mmapping a RAM page 3732 */ 3733static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf) 3734{ 3735 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3736 struct snd_pcm_runtime *runtime; 3737 unsigned long offset; 3738 struct page * page; 3739 size_t dma_bytes; 3740 3741 if (substream == NULL) 3742 return VM_FAULT_SIGBUS; 3743 runtime = substream->runtime; 3744 offset = vmf->pgoff << PAGE_SHIFT; 3745 dma_bytes = PAGE_ALIGN(runtime->dma_bytes); 3746 if (offset > dma_bytes - PAGE_SIZE) 3747 return VM_FAULT_SIGBUS; 3748 if (substream->ops->page) 3749 page = substream->ops->page(substream, offset); 3750 else 3751 page = snd_pcm_default_page_ops(substream, offset); 3752 if (!page) 3753 return VM_FAULT_SIGBUS; 3754 get_page(page); 3755 vmf->page = page; 3756 return 0; 3757} 3758 3759static const struct vm_operations_struct snd_pcm_vm_ops_data = { 3760 .open = snd_pcm_mmap_data_open, 3761 .close = snd_pcm_mmap_data_close, 3762}; 3763 3764static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = { 3765 .open = snd_pcm_mmap_data_open, 3766 .close = snd_pcm_mmap_data_close, 3767 .fault = snd_pcm_mmap_data_fault, 3768}; 3769 3770/* 3771 * mmap the DMA buffer on RAM 3772 */ 3773 3774/** 3775 * snd_pcm_lib_default_mmap - Default PCM data mmap function 3776 * @substream: PCM substream 3777 * @area: VMA 3778 * 3779 * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL, 3780 * this function is invoked implicitly. 3781 */ 3782int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, 3783 struct vm_area_struct *area) 3784{ 3785 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3786#ifdef CONFIG_GENERIC_ALLOCATOR 3787 if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) { 3788 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); 3789 return remap_pfn_range(area, area->vm_start, 3790 substream->dma_buffer.addr >> PAGE_SHIFT, 3791 area->vm_end - area->vm_start, area->vm_page_prot); 3792 } 3793#endif /* CONFIG_GENERIC_ALLOCATOR */ 3794 if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page && 3795 (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV || 3796 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC)) 3797 return dma_mmap_coherent(substream->dma_buffer.dev.dev, 3798 area, 3799 substream->runtime->dma_area, 3800 substream->runtime->dma_addr, 3801 substream->runtime->dma_bytes); 3802 /* mmap with fault handler */ 3803 area->vm_ops = &snd_pcm_vm_ops_data_fault; 3804 return 0; 3805} 3806EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap); 3807 3808/* 3809 * mmap the DMA buffer on I/O memory area 3810 */ 3811#if SNDRV_PCM_INFO_MMAP_IOMEM 3812/** 3813 * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem 3814 * @substream: PCM substream 3815 * @area: VMA 3816 * 3817 * When your hardware uses the iomapped pages as the hardware buffer and 3818 * wants to mmap it, pass this function as mmap pcm_ops. Note that this 3819 * is supposed to work only on limited architectures. 3820 */ 3821int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, 3822 struct vm_area_struct *area) 3823{ 3824 struct snd_pcm_runtime *runtime = substream->runtime; 3825 3826 area->vm_page_prot = pgprot_noncached(area->vm_page_prot); 3827 return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes); 3828} 3829EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); 3830#endif /* SNDRV_PCM_INFO_MMAP */ 3831 3832/* 3833 * mmap DMA buffer 3834 */ 3835int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, 3836 struct vm_area_struct *area) 3837{ 3838 struct snd_pcm_runtime *runtime; 3839 long size; 3840 unsigned long offset; 3841 size_t dma_bytes; 3842 int err; 3843 3844 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 3845 if (!(area->vm_flags & (VM_WRITE|VM_READ))) 3846 return -EINVAL; 3847 } else { 3848 if (!(area->vm_flags & VM_READ)) 3849 return -EINVAL; 3850 } 3851 runtime = substream->runtime; 3852 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 3853 return -EBADFD; 3854 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) 3855 return -ENXIO; 3856 if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || 3857 runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) 3858 return -EINVAL; 3859 size = area->vm_end - area->vm_start; 3860 offset = area->vm_pgoff << PAGE_SHIFT; 3861 dma_bytes = PAGE_ALIGN(runtime->dma_bytes); 3862 if ((size_t)size > dma_bytes) 3863 return -EINVAL; 3864 if (offset > dma_bytes - size) 3865 return -EINVAL; 3866 3867 area->vm_ops = &snd_pcm_vm_ops_data; 3868 area->vm_private_data = substream; 3869 if (substream->ops->mmap) 3870 err = substream->ops->mmap(substream, area); 3871 else 3872 err = snd_pcm_lib_default_mmap(substream, area); 3873 if (!err) 3874 atomic_inc(&substream->mmap_count); 3875 return err; 3876} 3877EXPORT_SYMBOL(snd_pcm_mmap_data); 3878 3879static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area) 3880{ 3881 struct snd_pcm_file * pcm_file; 3882 struct snd_pcm_substream *substream; 3883 unsigned long offset; 3884 3885 pcm_file = file->private_data; 3886 substream = pcm_file->substream; 3887 if (PCM_RUNTIME_CHECK(substream)) 3888 return -ENXIO; 3889 3890 offset = area->vm_pgoff << PAGE_SHIFT; 3891 switch (offset) { 3892 case SNDRV_PCM_MMAP_OFFSET_STATUS_OLD: 3893 if (pcm_file->no_compat_mmap || !IS_ENABLED(CONFIG_64BIT)) 3894 return -ENXIO; 3895 fallthrough; 3896 case SNDRV_PCM_MMAP_OFFSET_STATUS_NEW: 3897 if (!pcm_status_mmap_allowed(pcm_file)) 3898 return -ENXIO; 3899 return snd_pcm_mmap_status(substream, file, area); 3900 case SNDRV_PCM_MMAP_OFFSET_CONTROL_OLD: 3901 if (pcm_file->no_compat_mmap || !IS_ENABLED(CONFIG_64BIT)) 3902 return -ENXIO; 3903 fallthrough; 3904 case SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW: 3905 if (!pcm_control_mmap_allowed(pcm_file)) 3906 return -ENXIO; 3907 return snd_pcm_mmap_control(substream, file, area); 3908 default: 3909 return snd_pcm_mmap_data(substream, file, area); 3910 } 3911 return 0; 3912} 3913 3914static int snd_pcm_fasync(int fd, struct file * file, int on) 3915{ 3916 struct snd_pcm_file * pcm_file; 3917 struct snd_pcm_substream *substream; 3918 struct snd_pcm_runtime *runtime; 3919 3920 pcm_file = file->private_data; 3921 substream = pcm_file->substream; 3922 if (PCM_RUNTIME_CHECK(substream)) 3923 return -ENXIO; 3924 runtime = substream->runtime; 3925 return fasync_helper(fd, file, on, &runtime->fasync); 3926} 3927 3928/* 3929 * ioctl32 compat 3930 */ 3931#ifdef CONFIG_COMPAT 3932#include "pcm_compat.c" 3933#else 3934#define snd_pcm_ioctl_compat NULL 3935#endif 3936 3937/* 3938 * To be removed helpers to keep binary compatibility 3939 */ 3940 3941#ifdef CONFIG_SND_SUPPORT_OLD_API 3942#define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5)) 3943#define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5)) 3944 3945static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params, 3946 struct snd_pcm_hw_params_old *oparams) 3947{ 3948 unsigned int i; 3949 3950 memset(params, 0, sizeof(*params)); 3951 params->flags = oparams->flags; 3952 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++) 3953 params->masks[i].bits[0] = oparams->masks[i]; 3954 memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals)); 3955 params->rmask = __OLD_TO_NEW_MASK(oparams->rmask); 3956 params->cmask = __OLD_TO_NEW_MASK(oparams->cmask); 3957 params->info = oparams->info; 3958 params->msbits = oparams->msbits; 3959 params->rate_num = oparams->rate_num; 3960 params->rate_den = oparams->rate_den; 3961 params->fifo_size = oparams->fifo_size; 3962} 3963 3964static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams, 3965 struct snd_pcm_hw_params *params) 3966{ 3967 unsigned int i; 3968 3969 memset(oparams, 0, sizeof(*oparams)); 3970 oparams->flags = params->flags; 3971 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++) 3972 oparams->masks[i] = params->masks[i].bits[0]; 3973 memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals)); 3974 oparams->rmask = __NEW_TO_OLD_MASK(params->rmask); 3975 oparams->cmask = __NEW_TO_OLD_MASK(params->cmask); 3976 oparams->info = params->info; 3977 oparams->msbits = params->msbits; 3978 oparams->rate_num = params->rate_num; 3979 oparams->rate_den = params->rate_den; 3980 oparams->fifo_size = params->fifo_size; 3981} 3982 3983static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream, 3984 struct snd_pcm_hw_params_old __user * _oparams) 3985{ 3986 struct snd_pcm_hw_params *params; 3987 struct snd_pcm_hw_params_old *oparams = NULL; 3988 int err; 3989 3990 params = kmalloc(sizeof(*params), GFP_KERNEL); 3991 if (!params) 3992 return -ENOMEM; 3993 3994 oparams = memdup_user(_oparams, sizeof(*oparams)); 3995 if (IS_ERR(oparams)) { 3996 err = PTR_ERR(oparams); 3997 goto out; 3998 } 3999 snd_pcm_hw_convert_from_old_params(params, oparams); 4000 err = snd_pcm_hw_refine(substream, params); 4001 if (err < 0) 4002 goto out_old; 4003 4004 err = fixup_unreferenced_params(substream, params); 4005 if (err < 0) 4006 goto out_old; 4007 4008 snd_pcm_hw_convert_to_old_params(oparams, params); 4009 if (copy_to_user(_oparams, oparams, sizeof(*oparams))) 4010 err = -EFAULT; 4011out_old: 4012 kfree(oparams); 4013out: 4014 kfree(params); 4015 return err; 4016} 4017 4018static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream, 4019 struct snd_pcm_hw_params_old __user * _oparams) 4020{ 4021 struct snd_pcm_hw_params *params; 4022 struct snd_pcm_hw_params_old *oparams = NULL; 4023 int err; 4024 4025 params = kmalloc(sizeof(*params), GFP_KERNEL); 4026 if (!params) 4027 return -ENOMEM; 4028 4029 oparams = memdup_user(_oparams, sizeof(*oparams)); 4030 if (IS_ERR(oparams)) { 4031 err = PTR_ERR(oparams); 4032 goto out; 4033 } 4034 4035 snd_pcm_hw_convert_from_old_params(params, oparams); 4036 err = snd_pcm_hw_params(substream, params); 4037 if (err < 0) 4038 goto out_old; 4039 4040 snd_pcm_hw_convert_to_old_params(oparams, params); 4041 if (copy_to_user(_oparams, oparams, sizeof(*oparams))) 4042 err = -EFAULT; 4043out_old: 4044 kfree(oparams); 4045out: 4046 kfree(params); 4047 return err; 4048} 4049#endif /* CONFIG_SND_SUPPORT_OLD_API */ 4050 4051#ifndef CONFIG_MMU 4052static unsigned long snd_pcm_get_unmapped_area(struct file *file, 4053 unsigned long addr, 4054 unsigned long len, 4055 unsigned long pgoff, 4056 unsigned long flags) 4057{ 4058 struct snd_pcm_file *pcm_file = file->private_data; 4059 struct snd_pcm_substream *substream = pcm_file->substream; 4060 struct snd_pcm_runtime *runtime = substream->runtime; 4061 unsigned long offset = pgoff << PAGE_SHIFT; 4062 4063 switch (offset) { 4064 case SNDRV_PCM_MMAP_OFFSET_STATUS_NEW: 4065 return (unsigned long)runtime->status; 4066 case SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW: 4067 return (unsigned long)runtime->control; 4068 default: 4069 return (unsigned long)runtime->dma_area + offset; 4070 } 4071} 4072#else 4073# define snd_pcm_get_unmapped_area NULL 4074#endif 4075 4076/* 4077 * Register section 4078 */ 4079 4080const struct file_operations snd_pcm_f_ops[2] = { 4081 { 4082 .owner = THIS_MODULE, 4083 .write = snd_pcm_write, 4084 .write_iter = snd_pcm_writev, 4085 .open = snd_pcm_playback_open, 4086 .release = snd_pcm_release, 4087 .llseek = no_llseek, 4088 .poll = snd_pcm_poll, 4089 .unlocked_ioctl = snd_pcm_ioctl, 4090 .compat_ioctl = snd_pcm_ioctl_compat, 4091 .mmap = snd_pcm_mmap, 4092 .fasync = snd_pcm_fasync, 4093 .get_unmapped_area = snd_pcm_get_unmapped_area, 4094 }, 4095 { 4096 .owner = THIS_MODULE, 4097 .read = snd_pcm_read, 4098 .read_iter = snd_pcm_readv, 4099 .open = snd_pcm_capture_open, 4100 .release = snd_pcm_release, 4101 .llseek = no_llseek, 4102 .poll = snd_pcm_poll, 4103 .unlocked_ioctl = snd_pcm_ioctl, 4104 .compat_ioctl = snd_pcm_ioctl_compat, 4105 .mmap = snd_pcm_mmap, 4106 .fasync = snd_pcm_fasync, 4107 .get_unmapped_area = snd_pcm_get_unmapped_area, 4108 } 4109}; 4110