Lines Matching defs:len
24 extern void dvma_unmap_iommu(unsigned long baddr, int len);
73 pr_info("dvma entry: %08x len %08lx\n",
151 static inline unsigned long get_baddr(int len, unsigned long align)
172 newlen = len + ((hole->end - len) & (align-1));
174 newlen = len;
205 unsigned long len;
211 len = dvma_entry_use(baddr);
214 dvma_unmap_iommu(baddr, len);
218 dvma_free_bytes += len;
225 hole->end += len;
226 hole->size += len;
228 } else if(hole->start == (baddr + len)) {
230 hole->size += len;
239 hole->end = baddr + len;
240 hole->size = len;
283 unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
289 if(!len)
290 len = 0x800;
292 if(!kaddr || !len) {
293 // pr_err("error: kaddr %lx len %x\n", kaddr, len);
298 pr_debug("dvma_map request %08x bytes from %08lx\n", len, kaddr);
301 len += off;
302 len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
309 baddr = get_baddr(len, align);
312 if(!dvma_map_iommu(kaddr, baddr, len))
315 pr_crit("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr,
316 len);
338 void *dvma_malloc_align(unsigned long len, unsigned long align)
344 if(!len)
347 pr_debug("dvma_malloc request %lx bytes\n", len);
348 len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
350 if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0)
353 if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) {
354 free_pages(kaddr, get_order(len));
360 if(dvma_map_cpu(kaddr, vaddr, len) < 0) {
362 free_pages(kaddr, get_order(len));
366 pr_debug("mapped %08lx bytes %08lx kern -> %08lx bus\n", len, kaddr,