Lines Matching refs:pd

128 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
130 struct drm_device *dev = pd->driver->dev;
135 down_write(&pd->driver->sem);
136 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
138 psb_mmu_flush_pd_locked(pd->driver, 1);
139 pd->hw_context = hw_context;
140 up_write(&pd->driver->sem);
168 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
172 if (!pd)
175 pd->p = alloc_page(GFP_DMA32);
176 if (!pd->p)
178 pd->dummy_pt = alloc_page(GFP_DMA32);
179 if (!pd->dummy_pt)
181 pd->dummy_page = alloc_page(GFP_DMA32);
182 if (!pd->dummy_page)
186 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
188 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
191 pd->invalid_pde = 0;
192 pd->invalid_pte = 0;
195 v = kmap(pd->dummy_pt);
197 v[i] = pd->invalid_pte;
199 kunmap(pd->dummy_pt);
201 v = kmap(pd->p);
203 v[i] = pd->invalid_pde;
205 kunmap(pd->p);
207 clear_page(kmap(pd->dummy_page));
208 kunmap(pd->dummy_page);
210 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
211 if (!pd->tables)
214 pd->hw_context = -1;
215 pd->pd_mask = PSB_PTE_VALID;
216 pd->driver = driver;
218 return pd;
221 __free_page(pd->dummy_page);
223 __free_page(pd->dummy_pt);
225 __free_page(pd->p);
227 kfree(pd);
237 void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
239 struct psb_mmu_driver *driver = pd->driver;
246 if (pd->hw_context != -1) {
247 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
255 pt = pd->tables[i];
260 vfree(pd->tables);
261 __free_page(pd->dummy_page);
262 __free_page(pd->dummy_pt);
263 __free_page(pd->p);
264 kfree(pd);
268 static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
272 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
274 spinlock_t *lock = &pd->driver->lock;
294 *ptes++ = pd->invalid_pte;
297 if (pd->driver->has_clflush && pd->hw_context != -1) {
310 pt->pd = pd;
316 struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
322 spinlock_t *lock = &pd->driver->lock;
325 pt = pd->tables[index];
328 pt = psb_mmu_alloc_pt(pd);
333 if (pd->tables[index]) {
337 pt = pd->tables[index];
341 v = kmap_atomic(pd->p);
342 pd->tables[index] = pt;
343 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
347 if (pd->hw_context != -1) {
348 psb_mmu_clflush(pd->driver, (void *)&v[index]);
349 atomic_set(&pd->driver->needs_tlbflush, 1);
356 static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
361 spinlock_t *lock = &pd->driver->lock;
364 pt = pd->tables[index];
375 struct psb_mmu_pd *pd = pt->pd;
380 v = kmap_atomic(pd->p);
381 v[pt->index] = pd->invalid_pde;
382 pd->tables[pt->index] = NULL;
384 if (pd->hw_context != -1) {
385 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
386 atomic_set(&pd->driver->needs_tlbflush, 1);
389 spin_unlock(&pd->driver->lock);
393 spin_unlock(&pd->driver->lock);
405 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
410 struct psb_mmu_pd *pd;
413 pd = driver->default_pd;
416 return pd;
422 struct psb_mmu_pd *pd;
424 pd = psb_mmu_get_default_pd(driver);
425 return page_to_pfn(pd->p) << PAGE_SHIFT;
499 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
511 unsigned long clflush_add = pd->driver->clflush_add;
512 unsigned long clflush_mask = pd->driver->clflush_mask;
514 if (!pd->driver->has_clflush)
532 pt = psb_mmu_pt_map_lock(pd, addr);
547 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
555 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
564 down_read(&pd->driver->sem);
571 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
583 if (pd->hw_context != -1)
584 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
586 up_read(&pd->driver->sem);
588 if (pd->hw_context != -1)
589 psb_mmu_flush(pd->driver);
594 void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
616 down_read(&pd->driver->sem);
627 pt = psb_mmu_pt_map_lock(pd, addr);
640 if (pd->hw_context != -1)
641 psb_mmu_flush_ptes(pd, f_address, num_pages,
644 up_read(&pd->driver->sem);
646 if (pd->hw_context != -1)
647 psb_mmu_flush(pd->driver);
650 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
662 down_read(&pd->driver->sem);
669 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
685 if (pd->hw_context != -1)
686 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
688 up_read(&pd->driver->sem);
690 if (pd->hw_context != -1)
691 psb_mmu_flush(pd->driver);
696 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
724 down_read(&pd->driver->sem);
733 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
751 if (pd->hw_context != -1)
752 psb_mmu_flush_ptes(pd, f_address, num_pages,
755 up_read(&pd->driver->sem);
757 if (pd->hw_context != -1)
758 psb_mmu_flush(pd->driver);
763 int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
769 spinlock_t *lock = &pd->driver->lock;
771 down_read(&pd->driver->sem);
772 pt = psb_mmu_pt_map_lock(pd, virtual);
777 v = kmap_atomic(pd->p);
782 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
783 !(pd->invalid_pte & PSB_PTE_VALID)) {
788 *pfn = pd->invalid_pte >> PAGE_SHIFT;
800 up_read(&pd->driver->sem);