1/* savage_bci.c -- BCI support for Savage 2 * 3 * Copyright 2004 Felix Kuehling 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sub license, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial portions 15 * of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR 21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF 22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26#include <linux/delay.h> 27#include <linux/pci.h> 28#include <linux/slab.h> 29#include <linux/uaccess.h> 30 31#include <drm/drm_device.h> 32#include <drm/drm_file.h> 33#include <drm/drm_print.h> 34#include <drm/savage_drm.h> 35 36#include "savage_drv.h" 37 38/* Need a long timeout for shadow status updates can take a while 39 * and so can waiting for events when the queue is full. */ 40#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */ 41#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ 42#define SAVAGE_FREELIST_DEBUG 0 43 44static int savage_do_cleanup_bci(struct drm_device *dev); 45 46static int 47savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n) 48{ 49 uint32_t mask = dev_priv->status_used_mask; 50 uint32_t threshold = dev_priv->bci_threshold_hi; 51 uint32_t status; 52 int i; 53 54#if SAVAGE_BCI_DEBUG 55 if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold) 56 DRM_ERROR("Trying to emit %d words " 57 "(more than guaranteed space in COB)\n", n); 58#endif 59 60 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { 61 mb(); 62 status = dev_priv->status_ptr[0]; 63 if ((status & mask) < threshold) 64 return 0; 65 udelay(1); 66 } 67 68#if SAVAGE_BCI_DEBUG 69 DRM_ERROR("failed!\n"); 70 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); 71#endif 72 return -EBUSY; 73} 74 75static int 76savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n) 77{ 78 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; 79 uint32_t status; 80 int i; 81 82 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { 83 status = SAVAGE_READ(SAVAGE_STATUS_WORD0); 84 if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed) 85 return 0; 86 udelay(1); 87 } 88 89#if SAVAGE_BCI_DEBUG 90 DRM_ERROR("failed!\n"); 91 DRM_INFO(" status=0x%08x\n", status); 92#endif 93 return -EBUSY; 94} 95 96static int 97savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n) 98{ 99 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; 100 uint32_t status; 101 int i; 102 103 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { 104 status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0); 105 if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed) 106 return 0; 107 udelay(1); 108 } 109 110#if SAVAGE_BCI_DEBUG 111 DRM_ERROR("failed!\n"); 112 DRM_INFO(" status=0x%08x\n", status); 113#endif 114 return -EBUSY; 115} 116 117/* 118 * Waiting for events. 119 * 120 * The BIOSresets the event tag to 0 on mode changes. Therefore we 121 * never emit 0 to the event tag. If we find a 0 event tag we know the 122 * BIOS stomped on it and return success assuming that the BIOS waited 123 * for engine idle. 124 * 125 * Note: if the Xserver uses the event tag it has to follow the same 126 * rule. Otherwise there may be glitches every 2^16 events. 127 */ 128static int 129savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e) 130{ 131 uint32_t status; 132 int i; 133 134 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { 135 mb(); 136 status = dev_priv->status_ptr[1]; 137 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || 138 (status & 0xffff) == 0) 139 return 0; 140 udelay(1); 141 } 142 143#if SAVAGE_BCI_DEBUG 144 DRM_ERROR("failed!\n"); 145 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); 146#endif 147 148 return -EBUSY; 149} 150 151static int 152savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e) 153{ 154 uint32_t status; 155 int i; 156 157 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { 158 status = SAVAGE_READ(SAVAGE_STATUS_WORD1); 159 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || 160 (status & 0xffff) == 0) 161 return 0; 162 udelay(1); 163 } 164 165#if SAVAGE_BCI_DEBUG 166 DRM_ERROR("failed!\n"); 167 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); 168#endif 169 170 return -EBUSY; 171} 172 173uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, 174 unsigned int flags) 175{ 176 uint16_t count; 177 BCI_LOCALS; 178 179 if (dev_priv->status_ptr) { 180 /* coordinate with Xserver */ 181 count = dev_priv->status_ptr[1023]; 182 if (count < dev_priv->event_counter) 183 dev_priv->event_wrap++; 184 } else { 185 count = dev_priv->event_counter; 186 } 187 count = (count + 1) & 0xffff; 188 if (count == 0) { 189 count++; /* See the comment above savage_wait_event_*. */ 190 dev_priv->event_wrap++; 191 } 192 dev_priv->event_counter = count; 193 if (dev_priv->status_ptr) 194 dev_priv->status_ptr[1023] = (uint32_t) count; 195 196 if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) { 197 unsigned int wait_cmd = BCI_CMD_WAIT; 198 if ((flags & SAVAGE_WAIT_2D)) 199 wait_cmd |= BCI_CMD_WAIT_2D; 200 if ((flags & SAVAGE_WAIT_3D)) 201 wait_cmd |= BCI_CMD_WAIT_3D; 202 BEGIN_BCI(2); 203 BCI_WRITE(wait_cmd); 204 } else { 205 BEGIN_BCI(1); 206 } 207 BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count); 208 209 return count; 210} 211 212/* 213 * Freelist management 214 */ 215static int savage_freelist_init(struct drm_device * dev) 216{ 217 drm_savage_private_t *dev_priv = dev->dev_private; 218 struct drm_device_dma *dma = dev->dma; 219 struct drm_buf *buf; 220 drm_savage_buf_priv_t *entry; 221 int i; 222 DRM_DEBUG("count=%d\n", dma->buf_count); 223 224 dev_priv->head.next = &dev_priv->tail; 225 dev_priv->head.prev = NULL; 226 dev_priv->head.buf = NULL; 227 228 dev_priv->tail.next = NULL; 229 dev_priv->tail.prev = &dev_priv->head; 230 dev_priv->tail.buf = NULL; 231 232 for (i = 0; i < dma->buf_count; i++) { 233 buf = dma->buflist[i]; 234 entry = buf->dev_private; 235 236 SET_AGE(&entry->age, 0, 0); 237 entry->buf = buf; 238 239 entry->next = dev_priv->head.next; 240 entry->prev = &dev_priv->head; 241 dev_priv->head.next->prev = entry; 242 dev_priv->head.next = entry; 243 } 244 245 return 0; 246} 247 248static struct drm_buf *savage_freelist_get(struct drm_device * dev) 249{ 250 drm_savage_private_t *dev_priv = dev->dev_private; 251 drm_savage_buf_priv_t *tail = dev_priv->tail.prev; 252 uint16_t event; 253 unsigned int wrap; 254 DRM_DEBUG("\n"); 255 256 UPDATE_EVENT_COUNTER(); 257 if (dev_priv->status_ptr) 258 event = dev_priv->status_ptr[1] & 0xffff; 259 else 260 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; 261 wrap = dev_priv->event_wrap; 262 if (event > dev_priv->event_counter) 263 wrap--; /* hardware hasn't passed the last wrap yet */ 264 265 DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap); 266 DRM_DEBUG(" head=0x%04x %d\n", event, wrap); 267 268 if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) { 269 drm_savage_buf_priv_t *next = tail->next; 270 drm_savage_buf_priv_t *prev = tail->prev; 271 prev->next = next; 272 next->prev = prev; 273 tail->next = tail->prev = NULL; 274 return tail->buf; 275 } 276 277 DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf); 278 return NULL; 279} 280 281void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf) 282{ 283 drm_savage_private_t *dev_priv = dev->dev_private; 284 drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next; 285 286 DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap); 287 288 if (entry->next != NULL || entry->prev != NULL) { 289 DRM_ERROR("entry already on freelist.\n"); 290 return; 291 } 292 293 prev = &dev_priv->head; 294 next = prev->next; 295 prev->next = entry; 296 next->prev = entry; 297 entry->prev = prev; 298 entry->next = next; 299} 300 301/* 302 * Command DMA 303 */ 304static int savage_dma_init(drm_savage_private_t * dev_priv) 305{ 306 unsigned int i; 307 308 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / 309 (SAVAGE_DMA_PAGE_SIZE * 4); 310 dev_priv->dma_pages = kmalloc_array(dev_priv->nr_dma_pages, 311 sizeof(drm_savage_dma_page_t), 312 GFP_KERNEL); 313 if (dev_priv->dma_pages == NULL) 314 return -ENOMEM; 315 316 for (i = 0; i < dev_priv->nr_dma_pages; ++i) { 317 SET_AGE(&dev_priv->dma_pages[i].age, 0, 0); 318 dev_priv->dma_pages[i].used = 0; 319 dev_priv->dma_pages[i].flushed = 0; 320 } 321 SET_AGE(&dev_priv->last_dma_age, 0, 0); 322 323 dev_priv->first_dma_page = 0; 324 dev_priv->current_dma_page = 0; 325 326 return 0; 327} 328 329void savage_dma_reset(drm_savage_private_t * dev_priv) 330{ 331 uint16_t event; 332 unsigned int wrap, i; 333 event = savage_bci_emit_event(dev_priv, 0); 334 wrap = dev_priv->event_wrap; 335 for (i = 0; i < dev_priv->nr_dma_pages; ++i) { 336 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); 337 dev_priv->dma_pages[i].used = 0; 338 dev_priv->dma_pages[i].flushed = 0; 339 } 340 SET_AGE(&dev_priv->last_dma_age, event, wrap); 341 dev_priv->first_dma_page = dev_priv->current_dma_page = 0; 342} 343 344void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page) 345{ 346 uint16_t event; 347 unsigned int wrap; 348 349 /* Faked DMA buffer pages don't age. */ 350 if (dev_priv->cmd_dma == &dev_priv->fake_dma) 351 return; 352 353 UPDATE_EVENT_COUNTER(); 354 if (dev_priv->status_ptr) 355 event = dev_priv->status_ptr[1] & 0xffff; 356 else 357 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; 358 wrap = dev_priv->event_wrap; 359 if (event > dev_priv->event_counter) 360 wrap--; /* hardware hasn't passed the last wrap yet */ 361 362 if (dev_priv->dma_pages[page].age.wrap > wrap || 363 (dev_priv->dma_pages[page].age.wrap == wrap && 364 dev_priv->dma_pages[page].age.event > event)) { 365 if (dev_priv->wait_evnt(dev_priv, 366 dev_priv->dma_pages[page].age.event) 367 < 0) 368 DRM_ERROR("wait_evnt failed!\n"); 369 } 370} 371 372uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n) 373{ 374 unsigned int cur = dev_priv->current_dma_page; 375 unsigned int rest = SAVAGE_DMA_PAGE_SIZE - 376 dev_priv->dma_pages[cur].used; 377 unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) / 378 SAVAGE_DMA_PAGE_SIZE; 379 uint32_t *dma_ptr; 380 unsigned int i; 381 382 DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n", 383 cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); 384 385 if (cur + nr_pages < dev_priv->nr_dma_pages) { 386 dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + 387 cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; 388 if (n < rest) 389 rest = n; 390 dev_priv->dma_pages[cur].used += rest; 391 n -= rest; 392 cur++; 393 } else { 394 dev_priv->dma_flush(dev_priv); 395 nr_pages = 396 (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE; 397 for (i = cur; i < dev_priv->nr_dma_pages; ++i) { 398 dev_priv->dma_pages[i].age = dev_priv->last_dma_age; 399 dev_priv->dma_pages[i].used = 0; 400 dev_priv->dma_pages[i].flushed = 0; 401 } 402 dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle; 403 dev_priv->first_dma_page = cur = 0; 404 } 405 for (i = cur; nr_pages > 0; ++i, --nr_pages) { 406#if SAVAGE_DMA_DEBUG 407 if (dev_priv->dma_pages[i].used) { 408 DRM_ERROR("unflushed page %u: used=%u\n", 409 i, dev_priv->dma_pages[i].used); 410 } 411#endif 412 if (n > SAVAGE_DMA_PAGE_SIZE) 413 dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE; 414 else 415 dev_priv->dma_pages[i].used = n; 416 n -= SAVAGE_DMA_PAGE_SIZE; 417 } 418 dev_priv->current_dma_page = --i; 419 420 DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n", 421 i, dev_priv->dma_pages[i].used, n); 422 423 savage_dma_wait(dev_priv, dev_priv->current_dma_page); 424 425 return dma_ptr; 426} 427 428static void savage_dma_flush(drm_savage_private_t * dev_priv) 429{ 430 unsigned int first = dev_priv->first_dma_page; 431 unsigned int cur = dev_priv->current_dma_page; 432 uint16_t event; 433 unsigned int wrap, pad, align, len, i; 434 unsigned long phys_addr; 435 BCI_LOCALS; 436 437 if (first == cur && 438 dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed) 439 return; 440 441 /* pad length to multiples of 2 entries 442 * align start of next DMA block to multiles of 8 entries */ 443 pad = -dev_priv->dma_pages[cur].used & 1; 444 align = -(dev_priv->dma_pages[cur].used + pad) & 7; 445 446 DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, " 447 "pad=%u, align=%u\n", 448 first, cur, dev_priv->dma_pages[first].flushed, 449 dev_priv->dma_pages[cur].used, pad, align); 450 451 /* pad with noops */ 452 if (pad) { 453 uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + 454 cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; 455 dev_priv->dma_pages[cur].used += pad; 456 while (pad != 0) { 457 *dma_ptr++ = BCI_CMD_WAIT; 458 pad--; 459 } 460 } 461 462 mb(); 463 464 /* do flush ... */ 465 phys_addr = dev_priv->cmd_dma->offset + 466 (first * SAVAGE_DMA_PAGE_SIZE + 467 dev_priv->dma_pages[first].flushed) * 4; 468 len = (cur - first) * SAVAGE_DMA_PAGE_SIZE + 469 dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed; 470 471 DRM_DEBUG("phys_addr=%lx, len=%u\n", 472 phys_addr | dev_priv->dma_type, len); 473 474 BEGIN_BCI(3); 475 BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1); 476 BCI_WRITE(phys_addr | dev_priv->dma_type); 477 BCI_DMA(len); 478 479 /* fix alignment of the start of the next block */ 480 dev_priv->dma_pages[cur].used += align; 481 482 /* age DMA pages */ 483 event = savage_bci_emit_event(dev_priv, 0); 484 wrap = dev_priv->event_wrap; 485 for (i = first; i < cur; ++i) { 486 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); 487 dev_priv->dma_pages[i].used = 0; 488 dev_priv->dma_pages[i].flushed = 0; 489 } 490 /* age the current page only when it's full */ 491 if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) { 492 SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap); 493 dev_priv->dma_pages[cur].used = 0; 494 dev_priv->dma_pages[cur].flushed = 0; 495 /* advance to next page */ 496 cur++; 497 if (cur == dev_priv->nr_dma_pages) 498 cur = 0; 499 dev_priv->first_dma_page = dev_priv->current_dma_page = cur; 500 } else { 501 dev_priv->first_dma_page = cur; 502 dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used; 503 } 504 SET_AGE(&dev_priv->last_dma_age, event, wrap); 505 506 DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur, 507 dev_priv->dma_pages[cur].used, 508 dev_priv->dma_pages[cur].flushed); 509} 510 511static void savage_fake_dma_flush(drm_savage_private_t * dev_priv) 512{ 513 unsigned int i, j; 514 BCI_LOCALS; 515 516 if (dev_priv->first_dma_page == dev_priv->current_dma_page && 517 dev_priv->dma_pages[dev_priv->current_dma_page].used == 0) 518 return; 519 520 DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n", 521 dev_priv->first_dma_page, dev_priv->current_dma_page, 522 dev_priv->dma_pages[dev_priv->current_dma_page].used); 523 524 for (i = dev_priv->first_dma_page; 525 i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used; 526 ++i) { 527 uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + 528 i * SAVAGE_DMA_PAGE_SIZE; 529#if SAVAGE_DMA_DEBUG 530 /* Sanity check: all pages except the last one must be full. */ 531 if (i < dev_priv->current_dma_page && 532 dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) { 533 DRM_ERROR("partial DMA page %u: used=%u", 534 i, dev_priv->dma_pages[i].used); 535 } 536#endif 537 BEGIN_BCI(dev_priv->dma_pages[i].used); 538 for (j = 0; j < dev_priv->dma_pages[i].used; ++j) { 539 BCI_WRITE(dma_ptr[j]); 540 } 541 dev_priv->dma_pages[i].used = 0; 542 } 543 544 /* reset to first page */ 545 dev_priv->first_dma_page = dev_priv->current_dma_page = 0; 546} 547 548int savage_driver_load(struct drm_device *dev, unsigned long chipset) 549{ 550 drm_savage_private_t *dev_priv; 551 552 dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL); 553 if (dev_priv == NULL) 554 return -ENOMEM; 555 556 dev->dev_private = (void *)dev_priv; 557 558 dev_priv->chipset = (enum savage_family)chipset; 559 560 pci_set_master(dev->pdev); 561 562 return 0; 563} 564 565 566/* 567 * Initialize mappings. On Savage4 and SavageIX the alignment 568 * and size of the aperture is not suitable for automatic MTRR setup 569 * in drm_legacy_addmap. Therefore we add them manually before the maps are 570 * initialized, and tear them down on last close. 571 */ 572int savage_driver_firstopen(struct drm_device *dev) 573{ 574 drm_savage_private_t *dev_priv = dev->dev_private; 575 unsigned long mmio_base, fb_base, fb_size, aperture_base; 576 /* fb_rsrc and aper_rsrc aren't really used currently, but still exist 577 * in case we decide we need information on the BAR for BSD in the 578 * future. 579 */ 580 unsigned int fb_rsrc, aper_rsrc; 581 int ret = 0; 582 583 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 584 fb_rsrc = 0; 585 fb_base = pci_resource_start(dev->pdev, 0); 586 fb_size = SAVAGE_FB_SIZE_S3; 587 mmio_base = fb_base + SAVAGE_FB_SIZE_S3; 588 aper_rsrc = 0; 589 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; 590 /* this should always be true */ 591 if (pci_resource_len(dev->pdev, 0) == 0x08000000) { 592 /* Don't make MMIO write-cobining! We need 3 593 * MTRRs. */ 594 dev_priv->mtrr_handles[0] = 595 arch_phys_wc_add(fb_base, 0x01000000); 596 dev_priv->mtrr_handles[1] = 597 arch_phys_wc_add(fb_base + 0x02000000, 598 0x02000000); 599 dev_priv->mtrr_handles[2] = 600 arch_phys_wc_add(fb_base + 0x04000000, 601 0x04000000); 602 } else { 603 DRM_ERROR("strange pci_resource_len %08llx\n", 604 (unsigned long long) 605 pci_resource_len(dev->pdev, 0)); 606 } 607 } else if (dev_priv->chipset != S3_SUPERSAVAGE && 608 dev_priv->chipset != S3_SAVAGE2000) { 609 mmio_base = pci_resource_start(dev->pdev, 0); 610 fb_rsrc = 1; 611 fb_base = pci_resource_start(dev->pdev, 1); 612 fb_size = SAVAGE_FB_SIZE_S4; 613 aper_rsrc = 1; 614 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; 615 /* this should always be true */ 616 if (pci_resource_len(dev->pdev, 1) == 0x08000000) { 617 /* Can use one MTRR to cover both fb and 618 * aperture. */ 619 dev_priv->mtrr_handles[0] = 620 arch_phys_wc_add(fb_base, 621 0x08000000); 622 } else { 623 DRM_ERROR("strange pci_resource_len %08llx\n", 624 (unsigned long long) 625 pci_resource_len(dev->pdev, 1)); 626 } 627 } else { 628 mmio_base = pci_resource_start(dev->pdev, 0); 629 fb_rsrc = 1; 630 fb_base = pci_resource_start(dev->pdev, 1); 631 fb_size = pci_resource_len(dev->pdev, 1); 632 aper_rsrc = 2; 633 aperture_base = pci_resource_start(dev->pdev, 2); 634 /* Automatic MTRR setup will do the right thing. */ 635 } 636 637 ret = drm_legacy_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, 638 _DRM_REGISTERS, _DRM_READ_ONLY, 639 &dev_priv->mmio); 640 if (ret) 641 return ret; 642 643 ret = drm_legacy_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER, 644 _DRM_WRITE_COMBINING, &dev_priv->fb); 645 if (ret) 646 return ret; 647 648 ret = drm_legacy_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE, 649 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, 650 &dev_priv->aperture); 651 return ret; 652} 653 654/* 655 * Delete MTRRs and free device-private data. 656 */ 657void savage_driver_lastclose(struct drm_device *dev) 658{ 659 drm_savage_private_t *dev_priv = dev->dev_private; 660 int i; 661 662 for (i = 0; i < 3; ++i) { 663 arch_phys_wc_del(dev_priv->mtrr_handles[i]); 664 dev_priv->mtrr_handles[i] = 0; 665 } 666} 667 668void savage_driver_unload(struct drm_device *dev) 669{ 670 drm_savage_private_t *dev_priv = dev->dev_private; 671 672 kfree(dev_priv); 673} 674 675static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) 676{ 677 drm_savage_private_t *dev_priv = dev->dev_private; 678 679 if (init->fb_bpp != 16 && init->fb_bpp != 32) { 680 DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp); 681 return -EINVAL; 682 } 683 if (init->depth_bpp != 16 && init->depth_bpp != 32) { 684 DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp); 685 return -EINVAL; 686 } 687 if (init->dma_type != SAVAGE_DMA_AGP && 688 init->dma_type != SAVAGE_DMA_PCI) { 689 DRM_ERROR("invalid dma memory type %d!\n", init->dma_type); 690 return -EINVAL; 691 } 692 693 dev_priv->cob_size = init->cob_size; 694 dev_priv->bci_threshold_lo = init->bci_threshold_lo; 695 dev_priv->bci_threshold_hi = init->bci_threshold_hi; 696 dev_priv->dma_type = init->dma_type; 697 698 dev_priv->fb_bpp = init->fb_bpp; 699 dev_priv->front_offset = init->front_offset; 700 dev_priv->front_pitch = init->front_pitch; 701 dev_priv->back_offset = init->back_offset; 702 dev_priv->back_pitch = init->back_pitch; 703 dev_priv->depth_bpp = init->depth_bpp; 704 dev_priv->depth_offset = init->depth_offset; 705 dev_priv->depth_pitch = init->depth_pitch; 706 707 dev_priv->texture_offset = init->texture_offset; 708 dev_priv->texture_size = init->texture_size; 709 710 dev_priv->sarea = drm_legacy_getsarea(dev); 711 if (!dev_priv->sarea) { 712 DRM_ERROR("could not find sarea!\n"); 713 savage_do_cleanup_bci(dev); 714 return -EINVAL; 715 } 716 if (init->status_offset != 0) { 717 dev_priv->status = drm_legacy_findmap(dev, init->status_offset); 718 if (!dev_priv->status) { 719 DRM_ERROR("could not find shadow status region!\n"); 720 savage_do_cleanup_bci(dev); 721 return -EINVAL; 722 } 723 } else { 724 dev_priv->status = NULL; 725 } 726 if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) { 727 dev->agp_buffer_token = init->buffers_offset; 728 dev->agp_buffer_map = drm_legacy_findmap(dev, 729 init->buffers_offset); 730 if (!dev->agp_buffer_map) { 731 DRM_ERROR("could not find DMA buffer region!\n"); 732 savage_do_cleanup_bci(dev); 733 return -EINVAL; 734 } 735 drm_legacy_ioremap(dev->agp_buffer_map, dev); 736 if (!dev->agp_buffer_map->handle) { 737 DRM_ERROR("failed to ioremap DMA buffer region!\n"); 738 savage_do_cleanup_bci(dev); 739 return -ENOMEM; 740 } 741 } 742 if (init->agp_textures_offset) { 743 dev_priv->agp_textures = 744 drm_legacy_findmap(dev, init->agp_textures_offset); 745 if (!dev_priv->agp_textures) { 746 DRM_ERROR("could not find agp texture region!\n"); 747 savage_do_cleanup_bci(dev); 748 return -EINVAL; 749 } 750 } else { 751 dev_priv->agp_textures = NULL; 752 } 753 754 if (init->cmd_dma_offset) { 755 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 756 DRM_ERROR("command DMA not supported on " 757 "Savage3D/MX/IX.\n"); 758 savage_do_cleanup_bci(dev); 759 return -EINVAL; 760 } 761 if (dev->dma && dev->dma->buflist) { 762 DRM_ERROR("command and vertex DMA not supported " 763 "at the same time.\n"); 764 savage_do_cleanup_bci(dev); 765 return -EINVAL; 766 } 767 dev_priv->cmd_dma = drm_legacy_findmap(dev, init->cmd_dma_offset); 768 if (!dev_priv->cmd_dma) { 769 DRM_ERROR("could not find command DMA region!\n"); 770 savage_do_cleanup_bci(dev); 771 return -EINVAL; 772 } 773 if (dev_priv->dma_type == SAVAGE_DMA_AGP) { 774 if (dev_priv->cmd_dma->type != _DRM_AGP) { 775 DRM_ERROR("AGP command DMA region is not a " 776 "_DRM_AGP map!\n"); 777 savage_do_cleanup_bci(dev); 778 return -EINVAL; 779 } 780 drm_legacy_ioremap(dev_priv->cmd_dma, dev); 781 if (!dev_priv->cmd_dma->handle) { 782 DRM_ERROR("failed to ioremap command " 783 "DMA region!\n"); 784 savage_do_cleanup_bci(dev); 785 return -ENOMEM; 786 } 787 } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) { 788 DRM_ERROR("PCI command DMA region is not a " 789 "_DRM_CONSISTENT map!\n"); 790 savage_do_cleanup_bci(dev); 791 return -EINVAL; 792 } 793 } else { 794 dev_priv->cmd_dma = NULL; 795 } 796 797 dev_priv->dma_flush = savage_dma_flush; 798 if (!dev_priv->cmd_dma) { 799 DRM_DEBUG("falling back to faked command DMA.\n"); 800 dev_priv->fake_dma.offset = 0; 801 dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE; 802 dev_priv->fake_dma.type = _DRM_SHM; 803 dev_priv->fake_dma.handle = kmalloc(SAVAGE_FAKE_DMA_SIZE, 804 GFP_KERNEL); 805 if (!dev_priv->fake_dma.handle) { 806 DRM_ERROR("could not allocate faked DMA buffer!\n"); 807 savage_do_cleanup_bci(dev); 808 return -ENOMEM; 809 } 810 dev_priv->cmd_dma = &dev_priv->fake_dma; 811 dev_priv->dma_flush = savage_fake_dma_flush; 812 } 813 814 dev_priv->sarea_priv = 815 (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle + 816 init->sarea_priv_offset); 817 818 /* setup bitmap descriptors */ 819 { 820 unsigned int color_tile_format; 821 unsigned int depth_tile_format; 822 unsigned int front_stride, back_stride, depth_stride; 823 if (dev_priv->chipset <= S3_SAVAGE4) { 824 color_tile_format = dev_priv->fb_bpp == 16 ? 825 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; 826 depth_tile_format = dev_priv->depth_bpp == 16 ? 827 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; 828 } else { 829 color_tile_format = SAVAGE_BD_TILE_DEST; 830 depth_tile_format = SAVAGE_BD_TILE_DEST; 831 } 832 front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8); 833 back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8); 834 depth_stride = 835 dev_priv->depth_pitch / (dev_priv->depth_bpp / 8); 836 837 dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE | 838 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | 839 (color_tile_format << SAVAGE_BD_TILE_SHIFT); 840 841 dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE | 842 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | 843 (color_tile_format << SAVAGE_BD_TILE_SHIFT); 844 845 dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE | 846 (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) | 847 (depth_tile_format << SAVAGE_BD_TILE_SHIFT); 848 } 849 850 /* setup status and bci ptr */ 851 dev_priv->event_counter = 0; 852 dev_priv->event_wrap = 0; 853 dev_priv->bci_ptr = (volatile uint32_t *) 854 ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET); 855 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 856 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D; 857 } else { 858 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4; 859 } 860 if (dev_priv->status != NULL) { 861 dev_priv->status_ptr = 862 (volatile uint32_t *)dev_priv->status->handle; 863 dev_priv->wait_fifo = savage_bci_wait_fifo_shadow; 864 dev_priv->wait_evnt = savage_bci_wait_event_shadow; 865 dev_priv->status_ptr[1023] = dev_priv->event_counter; 866 } else { 867 dev_priv->status_ptr = NULL; 868 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 869 dev_priv->wait_fifo = savage_bci_wait_fifo_s3d; 870 } else { 871 dev_priv->wait_fifo = savage_bci_wait_fifo_s4; 872 } 873 dev_priv->wait_evnt = savage_bci_wait_event_reg; 874 } 875 876 /* cliprect functions */ 877 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) 878 dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d; 879 else 880 dev_priv->emit_clip_rect = savage_emit_clip_rect_s4; 881 882 if (savage_freelist_init(dev) < 0) { 883 DRM_ERROR("could not initialize freelist\n"); 884 savage_do_cleanup_bci(dev); 885 return -ENOMEM; 886 } 887 888 if (savage_dma_init(dev_priv) < 0) { 889 DRM_ERROR("could not initialize command DMA\n"); 890 savage_do_cleanup_bci(dev); 891 return -ENOMEM; 892 } 893 894 return 0; 895} 896 897static int savage_do_cleanup_bci(struct drm_device * dev) 898{ 899 drm_savage_private_t *dev_priv = dev->dev_private; 900 901 if (dev_priv->cmd_dma == &dev_priv->fake_dma) { 902 kfree(dev_priv->fake_dma.handle); 903 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && 904 dev_priv->cmd_dma->type == _DRM_AGP && 905 dev_priv->dma_type == SAVAGE_DMA_AGP) 906 drm_legacy_ioremapfree(dev_priv->cmd_dma, dev); 907 908 if (dev_priv->dma_type == SAVAGE_DMA_AGP && 909 dev->agp_buffer_map && dev->agp_buffer_map->handle) { 910 drm_legacy_ioremapfree(dev->agp_buffer_map, dev); 911 /* make sure the next instance (which may be running 912 * in PCI mode) doesn't try to use an old 913 * agp_buffer_map. */ 914 dev->agp_buffer_map = NULL; 915 } 916 917 kfree(dev_priv->dma_pages); 918 919 return 0; 920} 921 922static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 923{ 924 drm_savage_init_t *init = data; 925 926 LOCK_TEST_WITH_RETURN(dev, file_priv); 927 928 switch (init->func) { 929 case SAVAGE_INIT_BCI: 930 return savage_do_init_bci(dev, init); 931 case SAVAGE_CLEANUP_BCI: 932 return savage_do_cleanup_bci(dev); 933 } 934 935 return -EINVAL; 936} 937 938static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) 939{ 940 drm_savage_private_t *dev_priv = dev->dev_private; 941 drm_savage_event_emit_t *event = data; 942 943 DRM_DEBUG("\n"); 944 945 LOCK_TEST_WITH_RETURN(dev, file_priv); 946 947 event->count = savage_bci_emit_event(dev_priv, event->flags); 948 event->count |= dev_priv->event_wrap << 16; 949 950 return 0; 951} 952 953static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv) 954{ 955 drm_savage_private_t *dev_priv = dev->dev_private; 956 drm_savage_event_wait_t *event = data; 957 unsigned int event_e, hw_e; 958 unsigned int event_w, hw_w; 959 960 DRM_DEBUG("\n"); 961 962 UPDATE_EVENT_COUNTER(); 963 if (dev_priv->status_ptr) 964 hw_e = dev_priv->status_ptr[1] & 0xffff; 965 else 966 hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; 967 hw_w = dev_priv->event_wrap; 968 if (hw_e > dev_priv->event_counter) 969 hw_w--; /* hardware hasn't passed the last wrap yet */ 970 971 event_e = event->count & 0xffff; 972 event_w = event->count >> 16; 973 974 /* Don't need to wait if 975 * - event counter wrapped since the event was emitted or 976 * - the hardware has advanced up to or over the event to wait for. 977 */ 978 if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e)) 979 return 0; 980 else 981 return dev_priv->wait_evnt(dev_priv, event_e); 982} 983 984/* 985 * DMA buffer management 986 */ 987 988static int savage_bci_get_buffers(struct drm_device *dev, 989 struct drm_file *file_priv, 990 struct drm_dma *d) 991{ 992 struct drm_buf *buf; 993 int i; 994 995 for (i = d->granted_count; i < d->request_count; i++) { 996 buf = savage_freelist_get(dev); 997 if (!buf) 998 return -EAGAIN; 999 1000 buf->file_priv = file_priv; 1001 1002 if (copy_to_user(&d->request_indices[i], 1003 &buf->idx, sizeof(buf->idx))) 1004 return -EFAULT; 1005 if (copy_to_user(&d->request_sizes[i], 1006 &buf->total, sizeof(buf->total))) 1007 return -EFAULT; 1008 1009 d->granted_count++; 1010 } 1011 return 0; 1012} 1013 1014int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) 1015{ 1016 struct drm_device_dma *dma = dev->dma; 1017 struct drm_dma *d = data; 1018 int ret = 0; 1019 1020 LOCK_TEST_WITH_RETURN(dev, file_priv); 1021 1022 /* Please don't send us buffers. 1023 */ 1024 if (d->send_count != 0) { 1025 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 1026 task_pid_nr(current), d->send_count); 1027 return -EINVAL; 1028 } 1029 1030 /* We'll send you buffers. 1031 */ 1032 if (d->request_count < 0 || d->request_count > dma->buf_count) { 1033 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 1034 task_pid_nr(current), d->request_count, dma->buf_count); 1035 return -EINVAL; 1036 } 1037 1038 d->granted_count = 0; 1039 1040 if (d->request_count) { 1041 ret = savage_bci_get_buffers(dev, file_priv, d); 1042 } 1043 1044 return ret; 1045} 1046 1047void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) 1048{ 1049 struct drm_device_dma *dma = dev->dma; 1050 drm_savage_private_t *dev_priv = dev->dev_private; 1051 int release_idlelock = 0; 1052 int i; 1053 1054 if (!dma) 1055 return; 1056 if (!dev_priv) 1057 return; 1058 if (!dma->buflist) 1059 return; 1060 1061 if (file_priv->master && file_priv->master->lock.hw_lock) { 1062 drm_legacy_idlelock_take(&file_priv->master->lock); 1063 release_idlelock = 1; 1064 } 1065 1066 for (i = 0; i < dma->buf_count; i++) { 1067 struct drm_buf *buf = dma->buflist[i]; 1068 drm_savage_buf_priv_t *buf_priv = buf->dev_private; 1069 1070 if (buf->file_priv == file_priv && buf_priv && 1071 buf_priv->next == NULL && buf_priv->prev == NULL) { 1072 uint16_t event; 1073 DRM_DEBUG("reclaimed from client\n"); 1074 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); 1075 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); 1076 savage_freelist_put(dev, buf); 1077 } 1078 } 1079 1080 if (release_idlelock) 1081 drm_legacy_idlelock_release(&file_priv->master->lock); 1082} 1083 1084const struct drm_ioctl_desc savage_ioctls[] = { 1085 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1086 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), 1087 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), 1088 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), 1089}; 1090 1091int savage_max_ioctl = ARRAY_SIZE(savage_ioctls); 1092