Lines Matching refs:afe

3  * mtk-afe-fe-dais.c  --  Mediatek afe fe dai operator
14 #include "mtk-afe-platform-driver.h"
16 #include "mtk-afe-fe-dai.h"
17 #include "mtk-base-afe.h"
41 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
44 struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
45 const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware;
53 mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
71 dev_err(afe->dev, "hw_constraint_minmax failed\n");
79 dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
83 int irq_id = mtk_dynamic_irq_acquire(afe);
85 if (irq_id != afe->irqs_size) {
89 dev_err(afe->dev, "%s() error: no more asys irq\n",
102 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
103 struct mtk_base_afe_memif *memif = &afe->memif[asoc_rtd_to_cpu(rtd, 0)->id];
108 mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
112 mtk_dynamic_irq_release(afe, irq_id);
124 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
126 struct mtk_base_afe_memif *memif = &afe->memif[id];
132 if (afe->request_dram_resource)
133 afe->request_dram_resource(afe->dev);
135 dev_dbg(afe->dev, "%s(), %s, ch %d, rate %d, fmt %d, dma_addr %pad, dma_area %p, dma_bytes 0x%zx\n",
146 ret = mtk_memif_set_addr(afe, id,
151 dev_err(afe->dev, "%s(), error, id %d, set addr, ret %d\n",
157 ret = mtk_memif_set_channel(afe, id, channels);
159 dev_err(afe->dev, "%s(), error, id %d, set channel %d, ret %d\n",
167 dev_err(afe->dev, "%s(), error, id %d, set rate %d, ret %d\n",
173 ret = mtk_memif_set_format(afe, id, format);
175 dev_err(afe->dev, "%s(), error, id %d, set format %d, ret %d\n",
187 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
189 if (afe->release_dram_resource)
190 afe->release_dram_resource(afe->dev);
201 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
203 struct mtk_base_afe_memif *memif = &afe->memif[id];
204 struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage];
210 dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
215 ret = mtk_memif_set_enable(afe, id);
217 dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n",
223 mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
228 fs = afe->irq_fs(substream, runtime->rate);
233 mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
238 mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
244 ret = mtk_memif_set_disable(afe, id);
246 dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n",
251 mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
254 mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg,
267 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
272 if (afe->get_memif_pbuf_size) {
273 pbuf_size = afe->get_memif_pbuf_size(substream);
274 mtk_memif_set_pbuf_size(afe, id, pbuf_size);
291 int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe)
295 mutex_lock(&afe->irq_alloc_lock);
296 for (i = 0; i < afe->irqs_size; ++i) {
297 if (afe->irqs[i].irq_occupyed == 0) {
298 afe->irqs[i].irq_occupyed = 1;
299 mutex_unlock(&afe->irq_alloc_lock);
303 mutex_unlock(&afe->irq_alloc_lock);
304 return afe->irqs_size;
308 int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id)
310 mutex_lock(&afe->irq_alloc_lock);
311 if (irq_id >= 0 && irq_id < afe->irqs_size) {
312 afe->irqs[irq_id].irq_occupyed = 0;
313 mutex_unlock(&afe->irq_alloc_lock);
316 mutex_unlock(&afe->irq_alloc_lock);
323 struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
324 struct device *dev = afe->dev;
325 struct regmap *regmap = afe->regmap;
328 if (pm_runtime_status_suspended(dev) || afe->suspended)
331 if (!afe->reg_back_up)
332 afe->reg_back_up =
333 devm_kcalloc(dev, afe->reg_back_up_list_num,
336 if (afe->reg_back_up) {
337 for (i = 0; i < afe->reg_back_up_list_num; i++)
338 regmap_read(regmap, afe->reg_back_up_list[i],
339 &afe->reg_back_up[i]);
342 afe->suspended = true;
343 afe->runtime_suspend(dev);
350 struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
351 struct device *dev = afe->dev;
352 struct regmap *regmap = afe->regmap;
355 if (pm_runtime_status_suspended(dev) || !afe->suspended)
358 afe->runtime_resume(dev);
360 if (!afe->reg_back_up) {
363 for (i = 0; i < afe->reg_back_up_list_num; i++)
364 mtk_regmap_write(regmap, afe->reg_back_up_list[i],
365 afe->reg_back_up[i]);
368 afe->suspended = false;
373 int mtk_memif_set_enable(struct mtk_base_afe *afe, int id)
375 struct mtk_base_afe_memif *memif = &afe->memif[id];
378 dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n",
382 return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
387 int mtk_memif_set_disable(struct mtk_base_afe *afe, int id)
389 struct mtk_base_afe_memif *memif = &afe->memif[id];
392 dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n",
396 return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
401 int mtk_memif_set_addr(struct mtk_base_afe *afe, int id,
406 struct mtk_base_afe_memif *memif = &afe->memif[id];
416 mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
420 mtk_regmap_write(afe->regmap,
424 mtk_regmap_write(afe->regmap,
431 mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base_msb,
433 mtk_regmap_write(afe->regmap,
443 mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
448 mtk_regmap_update_bits(afe->regmap, memif->data->msb_end_reg,
456 int mtk_memif_set_channel(struct mtk_base_afe *afe,
459 struct mtk_base_afe_memif *memif = &afe->memif[id];
468 mtk_regmap_update_bits(afe->regmap, memif->data->quad_ch_reg,
480 mtk_regmap_update_bits(afe->regmap,
485 return mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
490 static int mtk_memif_set_rate_fs(struct mtk_base_afe *afe,
493 struct mtk_base_afe_memif *memif = &afe->memif[id];
496 mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
503 int mtk_memif_set_rate(struct mtk_base_afe *afe,
508 if (!afe->get_dai_fs) {
509 dev_err(afe->dev, "%s(), error, afe->get_dai_fs == NULL\n",
514 fs = afe->get_dai_fs(afe, id, rate);
519 return mtk_memif_set_rate_fs(afe, id, fs);
529 struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
533 if (!afe->memif_fs) {
534 dev_err(afe->dev, "%s(), error, afe->memif_fs == NULL\n",
539 fs = afe->memif_fs(substream, rate);
544 return mtk_memif_set_rate_fs(afe, id, fs);
548 int mtk_memif_set_format(struct mtk_base_afe *afe,
551 struct mtk_base_afe_memif *memif = &afe->memif[id];
563 if (afe->memif_32bit_supported) {
576 dev_err(afe->dev, "%s() error: unsupported format %d\n",
581 mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
584 mtk_regmap_update_bits(afe->regmap, memif->data->hd_align_reg,
591 int mtk_memif_set_pbuf_size(struct mtk_base_afe *afe,
594 const struct mtk_base_memif_data *memif_data = afe->memif[id].data;
599 mtk_regmap_update_bits(afe->regmap, memif_data->pbuf_reg,
603 mtk_regmap_update_bits(afe->regmap, memif_data->minlen_reg,