Lines Matching refs:emu

21 #define __set_ptb_entry(emu,page,addr) \
22 (((__le32 *)(emu)->ptb_pages.area)[page] = \
23 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
24 #define __get_ptb_entry(emu, page) \
25 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
37 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
39 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
42 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
47 __set_ptb_entry(emu, page, addr);
48 dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page,
49 (unsigned int)__get_ptb_entry(emu, page));
53 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
59 __set_ptb_entry(emu, page, emu->silent_page.addr);
60 dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n",
61 page, (unsigned int)__get_ptb_entry(emu, page));
95 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
100 struct list_head *candidate = &emu->mapped_link_head;
103 list_for_each (pos, &emu->mapped_link_head) {
120 size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
134 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
139 page = search_empty_map_area(emu, blk->pages, &next);
143 dev_err(emu->card->dev, "trying to map zero (reserved) page\n");
149 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
153 set_ptb_entry(emu, page, emu->page_addr_table[pg]);
165 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
172 if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
177 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
181 end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
189 set_silent_ptb(emu, mpage);
202 search_empty(struct snd_emu10k1 *emu, int size)
210 list_for_each(p, &emu->memhdr->block) {
216 if (page + psize > emu->max_cache_pages)
221 blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
233 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
235 if (addr & ~emu->dma_mask) {
236 dev_err_ratelimited(emu->card->dev,
238 emu->dma_mask, (unsigned long)addr);
242 dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
254 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
262 spin_lock_irqsave(&emu->memblk_lock, flags);
266 &emu->mapped_order_link_head);
267 spin_unlock_irqrestore(&emu->memblk_lock, flags);
270 if ((err = map_memblk(emu, blk)) < 0) {
273 p = emu->mapped_order_link_head.next;
274 for (; p != &emu->mapped_order_link_head; p = nextp) {
279 size = unmap_memblk(emu, deleted);
282 err = map_memblk(emu, blk);
287 spin_unlock_irqrestore(&emu->memblk_lock, flags);
297 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
304 if (snd_BUG_ON(!emu))
307 runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
309 hdr = emu->memhdr;
314 (emu->delay_pcm_irq * 2) : 0;
316 blk = search_empty(emu, runtime->dma_bytes + idx);
329 addr = emu->silent_page.addr;
332 if (! is_valid_page(emu, addr)) {
333 dev_err_ratelimited(emu->card->dev,
334 "emu: failure page = %d\n", idx);
338 emu->page_addr_table[page] = addr;
339 emu->page_ptr_table[page] = NULL;
344 err = snd_emu10k1_memblk_map(emu, blk);
358 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
360 if (snd_BUG_ON(!emu || !blk))
362 return snd_emu10k1_synth_free(emu, blk);
374 int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
377 if (emu->iommu_workaround) {
390 &emu->pci->dev, size, dmab);
429 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
431 struct snd_util_memhdr *hdr = emu->memhdr;
436 spin_lock_irqsave(&emu->memblk_lock, flags);
438 unmap_memblk(emu, blk);
439 spin_unlock_irqrestore(&emu->memblk_lock, flags);
440 synth_free_pages(emu, blk);
473 static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
480 dmab.dev.dev = &emu->pci->dev;
483 if (emu->page_ptr_table[page] == NULL)
485 dmab.area = emu->page_ptr_table[page];
486 dmab.addr = emu->page_addr_table[page];
493 if (emu->iommu_workaround)
497 emu->page_addr_table[page] = 0;
498 emu->page_ptr_table[page] = NULL;
505 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
511 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
514 if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE,
517 if (!is_valid_page(emu, dmab.addr)) {
521 emu->page_addr_table[page] = dmab.addr;
522 emu->page_ptr_table[page] = dmab.area;
529 __synth_free_pages(emu, first_page, last_page);
537 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
541 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
542 __synth_free_pages(emu, first_page, last_page);
547 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
550 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
552 ptr = emu->page_ptr_table[page];
554 dev_err(emu->card->dev,
565 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
581 ptr = offset_ptr(emu, page + p->first_page, offset);
595 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
611 ptr = offset_ptr(emu, page + p->first_page, offset);