Lines Matching refs:hp

6  *	Bjorn Helgaas <bjorn.helgaas@hp.com>
76 struct _hp_private *hp = &hp_private;
86 hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG);
87 switch (hp->io_tlb_ps) {
88 case 0: hp->io_tlb_shift = 12; break;
89 case 1: hp->io_tlb_shift = 13; break;
90 case 2: hp->io_tlb_shift = 14; break;
91 case 3: hp->io_tlb_shift = 16; break;
94 "configuration 0x%x\n", hp->io_tlb_ps);
95 hp->gatt = NULL;
96 hp->gatt_entries = 0;
99 hp->io_page_size = 1 << hp->io_tlb_shift;
100 hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
102 hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1;
103 hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
105 hp->gart_size = HP_ZX1_GART_SIZE;
106 hp->gatt_entries = hp->gart_size / hp->io_page_size;
108 hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
109 hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
111 if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
113 hp->gatt = NULL;
114 hp->gatt_entries = 0;
126 struct _hp_private *hp = &hp_private;
134 hp->io_tlb_shift = 16;
135 hp->io_tlb_ps = 3;
137 hp->io_tlb_shift = 14;
138 hp->io_tlb_ps = 2;
140 hp->io_tlb_shift = 13;
141 hp->io_tlb_ps = 1;
143 hp->io_tlb_shift = 12;
144 hp->io_tlb_ps = 0;
146 hp->io_page_size = 1 << hp->io_tlb_shift;
147 hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
149 hp->iova_base = HP_ZX1_IOVA_BASE;
150 hp->gart_size = HP_ZX1_GART_SIZE;
151 hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
153 hp->gatt_entries = hp->gart_size / hp->io_page_size;
154 hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
162 struct _hp_private *hp = &hp_private;
164 hp->ioc_regs = ioremap(hpa, 1024);
165 if (!hp->ioc_regs)
172 hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0;
174 if (hp->io_pdir_owner)
206 struct _hp_private *hp = &hp_private;
209 hp->lba_regs = ioremap(hpa, 256);
210 if (!hp->lba_regs)
213 hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP);
215 cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff;
218 cap, hp->lba_cap_offset);
219 iounmap(hp->lba_regs);
240 struct _hp_private *hp = &hp_private;
242 agp_bridge->gart_bus_addr = hp->gart_base;
243 agp_bridge->capndx = hp->lba_cap_offset;
244 agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
246 if (hp->io_pdir_owner) {
247 writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
248 readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
249 writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
250 readl(hp->ioc_regs+HP_ZX1_TCNFG);
251 writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK);
252 readl(hp->ioc_regs+HP_ZX1_IMASK);
253 writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
254 readl(hp->ioc_regs+HP_ZX1_IBASE);
255 writel(hp->iova_base|ilog2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM);
256 readl(hp->ioc_regs+HP_ZX1_PCOM);
265 struct _hp_private *hp = &hp_private;
267 if (hp->ioc_regs) {
268 if (hp->io_pdir_owner) {
269 writeq(0, hp->ioc_regs+HP_ZX1_IBASE);
270 readq(hp->ioc_regs+HP_ZX1_IBASE);
272 iounmap(hp->ioc_regs);
274 if (hp->lba_regs)
275 iounmap(hp->lba_regs);
281 struct _hp_private *hp = &hp_private;
283 writeq(hp->gart_base | ilog2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM);
284 readq(hp->ioc_regs+HP_ZX1_PCOM);
290 struct _hp_private *hp = &hp_private;
293 if (hp->io_pdir_owner) {
294 hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
295 get_order(hp->io_pdir_size));
296 if (!hp->io_pdir) {
299 hp->gatt = NULL;
300 hp->gatt_entries = 0;
303 memset(hp->io_pdir, 0, hp->io_pdir_size);
305 hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
308 for (i = 0; i < hp->gatt_entries; i++) {
309 hp->gatt[i] = (unsigned long) agp_bridge->scratch_page;
318 struct _hp_private *hp = &hp_private;
320 if (hp->io_pdir_owner)
321 free_pages((unsigned long) hp->io_pdir,
322 get_order(hp->io_pdir_size));
324 hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
331 struct _hp_private *hp = &hp_private;
341 io_pg_start = hp->io_pages_per_kpage * pg_start;
342 io_pg_count = hp->io_pages_per_kpage * mem->page_count;
343 if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
349 if (hp->gatt[j]) {
365 k < hp->io_pages_per_kpage;
366 k++, j++, paddr += hp->io_page_size) {
367 hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr;
378 struct _hp_private *hp = &hp_private;
386 io_pg_start = hp->io_pages_per_kpage * pg_start;
387 io_pg_count = hp->io_pages_per_kpage * mem->page_count;
389 hp->gatt[i] = agp_bridge->scratch_page;
405 struct _hp_private *hp = &hp_private;
408 command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
412 writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND);