Lines Matching refs:mem
17 * @mem: address space of the endpoint controller
20 * Reimplement get_order() for mem->page_size since the generic get_order
23 static int pci_epc_mem_get_order(struct pci_epc_mem *mem, size_t size)
26 unsigned int page_shift = ilog2(mem->window.page_size);
51 struct pci_epc_mem *mem = NULL;
77 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
78 if (!mem) {
87 kfree(mem);
92 mem->window.phys_base = windows[i].phys_base;
93 mem->window.size = windows[i].size;
94 mem->window.page_size = page_size;
95 mem->bitmap = bitmap;
96 mem->pages = pages;
97 mutex_init(&mem->lock);
98 epc->windows[i] = mem;
101 epc->mem = epc->windows[0];
108 mem = epc->windows[i];
109 kfree(mem->bitmap);
110 kfree(mem);
140 struct pci_epc_mem *mem;
147 mem = epc->windows[i];
148 kfree(mem->bitmap);
149 kfree(mem);
154 epc->mem = NULL;
172 struct pci_epc_mem *mem;
180 mem = epc->windows[i];
181 mutex_lock(&mem->lock);
182 align_size = ALIGN(size, mem->window.page_size);
183 order = pci_epc_mem_get_order(mem, align_size);
185 pageno = bitmap_find_free_region(mem->bitmap, mem->pages,
188 page_shift = ilog2(mem->window.page_size);
189 *phys_addr = mem->window.phys_base +
193 bitmap_release_region(mem->bitmap,
195 mutex_unlock(&mem->lock);
198 mutex_unlock(&mem->lock);
201 mutex_unlock(&mem->lock);
211 struct pci_epc_mem *mem;
215 mem = epc->windows[i];
217 if (phys_addr >= mem->window.phys_base &&
218 phys_addr < (mem->window.phys_base + mem->window.size))
219 return mem;
229 * @virt_addr: virtual address of the allocated mem space
237 struct pci_epc_mem *mem;
243 mem = pci_epc_get_matching_window(epc, phys_addr);
244 if (!mem) {
249 page_size = mem->window.page_size;
252 pageno = (phys_addr - mem->window.phys_base) >> page_shift;
254 order = pci_epc_mem_get_order(mem, size);
255 mutex_lock(&mem->lock);
256 bitmap_release_region(mem->bitmap, pageno, order);
257 mutex_unlock(&mem->lock);