1/************************************************************************** 2 * 3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27/* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30#ifndef _TTM_BO_DRIVER_H_ 31#define _TTM_BO_DRIVER_H_ 32 33#include <drm/drm_mm.h> 34#include <drm/drm_vma_manager.h> 35#include <linux/workqueue.h> 36#include <linux/fs.h> 37#include <linux/spinlock.h> 38#include <linux/dma-resv.h> 39 40#include "ttm_bo_api.h" 41#include "ttm_memory.h" 42#include "ttm_module.h" 43#include "ttm_placement.h" 44#include "ttm_tt.h" 45 46/** 47 * struct ttm_bo_driver 48 * 49 * @create_ttm_backend_entry: Callback to create a struct ttm_backend. 50 * @evict_flags: Callback to obtain placement flags when a buffer is evicted. 51 * @move: Callback for a driver to hook in accelerated functions to 52 * move a buffer. 53 * If set to NULL, a potentially slow memcpy() move is used. 54 */ 55 56struct ttm_bo_driver { 57 /** 58 * ttm_tt_create 59 * 60 * @bo: The buffer object to create the ttm for. 61 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 62 * 63 * Create a struct ttm_tt to back data with system memory pages. 64 * No pages are actually allocated. 65 * Returns: 66 * NULL: Out of memory. 67 */ 68 struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo, 69 uint32_t page_flags); 70 71 /** 72 * ttm_tt_populate 73 * 74 * @ttm: The struct ttm_tt to contain the backing pages. 75 * 76 * Allocate all backing pages 77 * Returns: 78 * -ENOMEM: Out of memory. 79 */ 80 int (*ttm_tt_populate)(struct ttm_bo_device *bdev, 81 struct ttm_tt *ttm, 82 struct ttm_operation_ctx *ctx); 83 84 /** 85 * ttm_tt_unpopulate 86 * 87 * @ttm: The struct ttm_tt to contain the backing pages. 88 * 89 * Free all backing page 90 */ 91 void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); 92 93 /** 94 * ttm_tt_bind 95 * 96 * @bdev: Pointer to a ttm device 97 * @ttm: Pointer to a struct ttm_tt. 98 * @bo_mem: Pointer to a struct ttm_resource describing the 99 * memory type and location for binding. 100 * 101 * Bind the backend pages into the aperture in the location 102 * indicated by @bo_mem. This function should be able to handle 103 * differences between aperture and system page sizes. 104 */ 105 int (*ttm_tt_bind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem); 106 107 /** 108 * ttm_tt_unbind 109 * 110 * @bdev: Pointer to a ttm device 111 * @ttm: Pointer to a struct ttm_tt. 112 * 113 * Unbind previously bound backend pages. This function should be 114 * able to handle differences between aperture and system page sizes. 115 */ 116 void (*ttm_tt_unbind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); 117 118 /** 119 * ttm_tt_destroy 120 * 121 * @bdev: Pointer to a ttm device 122 * @ttm: Pointer to a struct ttm_tt. 123 * 124 * Destroy the backend. This will be call back from ttm_tt_destroy so 125 * don't call ttm_tt_destroy from the callback or infinite loop. 126 */ 127 void (*ttm_tt_destroy)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); 128 129 /** 130 * struct ttm_bo_driver member eviction_valuable 131 * 132 * @bo: the buffer object to be evicted 133 * @place: placement we need room for 134 * 135 * Check with the driver if it is valuable to evict a BO to make room 136 * for a certain placement. 137 */ 138 bool (*eviction_valuable)(struct ttm_buffer_object *bo, 139 const struct ttm_place *place); 140 /** 141 * struct ttm_bo_driver member evict_flags: 142 * 143 * @bo: the buffer object to be evicted 144 * 145 * Return the bo flags for a buffer which is not mapped to the hardware. 146 * These will be placed in proposed_flags so that when the move is 147 * finished, they'll end up in bo->mem.flags 148 */ 149 150 void (*evict_flags)(struct ttm_buffer_object *bo, 151 struct ttm_placement *placement); 152 153 /** 154 * struct ttm_bo_driver member move: 155 * 156 * @bo: the buffer to move 157 * @evict: whether this motion is evicting the buffer from 158 * the graphics address space 159 * @ctx: context for this move with parameters 160 * @new_mem: the new memory region receiving the buffer 161 * 162 * Move a buffer between two memory regions. 163 */ 164 int (*move)(struct ttm_buffer_object *bo, bool evict, 165 struct ttm_operation_ctx *ctx, 166 struct ttm_resource *new_mem); 167 168 /** 169 * struct ttm_bo_driver_member verify_access 170 * 171 * @bo: Pointer to a buffer object. 172 * @filp: Pointer to a struct file trying to access the object. 173 * 174 * Called from the map / write / read methods to verify that the 175 * caller is permitted to access the buffer object. 176 * This member may be set to NULL, which will refuse this kind of 177 * access for all buffer objects. 178 * This function should return 0 if access is granted, -EPERM otherwise. 179 */ 180 int (*verify_access)(struct ttm_buffer_object *bo, 181 struct file *filp); 182 183 /** 184 * Hook to notify driver about a driver move so it 185 * can do tiling things and book-keeping. 186 * 187 * @evict: whether this move is evicting the buffer from the graphics 188 * address space 189 */ 190 void (*move_notify)(struct ttm_buffer_object *bo, 191 bool evict, 192 struct ttm_resource *new_mem); 193 /* notify the driver we are taking a fault on this BO 194 * and have reserved it */ 195 int (*fault_reserve_notify)(struct ttm_buffer_object *bo); 196 197 /** 198 * notify the driver that we're about to swap out this bo 199 */ 200 void (*swap_notify)(struct ttm_buffer_object *bo); 201 202 /** 203 * Driver callback on when mapping io memory (for bo_move_memcpy 204 * for instance). TTM will take care to call io_mem_free whenever 205 * the mapping is not use anymore. io_mem_reserve & io_mem_free 206 * are balanced. 207 */ 208 int (*io_mem_reserve)(struct ttm_bo_device *bdev, 209 struct ttm_resource *mem); 210 void (*io_mem_free)(struct ttm_bo_device *bdev, 211 struct ttm_resource *mem); 212 213 /** 214 * Return the pfn for a given page_offset inside the BO. 215 * 216 * @bo: the BO to look up the pfn for 217 * @page_offset: the offset to look up 218 */ 219 unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, 220 unsigned long page_offset); 221 222 /** 223 * Read/write memory buffers for ptrace access 224 * 225 * @bo: the BO to access 226 * @offset: the offset from the start of the BO 227 * @buf: pointer to source/destination buffer 228 * @len: number of bytes to copy 229 * @write: whether to read (0) from or write (non-0) to BO 230 * 231 * If successful, this function should return the number of 232 * bytes copied, -EIO otherwise. If the number of bytes 233 * returned is < len, the function may be called again with 234 * the remainder of the buffer to copy. 235 */ 236 int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset, 237 void *buf, int len, int write); 238 239 /** 240 * struct ttm_bo_driver member del_from_lru_notify 241 * 242 * @bo: the buffer object deleted from lru 243 * 244 * notify driver that a BO was deleted from LRU. 245 */ 246 void (*del_from_lru_notify)(struct ttm_buffer_object *bo); 247 248 /** 249 * Notify the driver that we're about to release a BO 250 * 251 * @bo: BO that is about to be released 252 * 253 * Gives the driver a chance to do any cleanup, including 254 * adding fences that may force a delayed delete 255 */ 256 void (*release_notify)(struct ttm_buffer_object *bo); 257}; 258 259/** 260 * struct ttm_bo_global - Buffer object driver global data. 261 * 262 * @dummy_read_page: Pointer to a dummy page used for mapping requests 263 * of unpopulated pages. 264 * @shrink: A shrink callback object used for buffer object swap. 265 * @device_list_mutex: Mutex protecting the device list. 266 * This mutex is held while traversing the device list for pm options. 267 * @lru_lock: Spinlock protecting the bo subsystem lru lists. 268 * @device_list: List of buffer object devices. 269 * @swap_lru: Lru list of buffer objects used for swapping. 270 */ 271 272extern struct ttm_bo_global { 273 274 /** 275 * Constant after init. 276 */ 277 278 struct kobject kobj; 279 struct page *dummy_read_page; 280 spinlock_t lru_lock; 281 282 /** 283 * Protected by ttm_global_mutex. 284 */ 285 struct list_head device_list; 286 287 /** 288 * Protected by the lru_lock. 289 */ 290 struct list_head swap_lru[TTM_MAX_BO_PRIORITY]; 291 292 /** 293 * Internal protection. 294 */ 295 atomic_t bo_count; 296} ttm_bo_glob; 297 298 299#define TTM_NUM_MEM_TYPES 8 300 301/** 302 * struct ttm_bo_device - Buffer object driver device-specific data. 303 * 304 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. 305 * @man: An array of resource_managers. 306 * @vma_manager: Address space manager (pointer) 307 * lru_lock: Spinlock that protects the buffer+device lru lists and 308 * ddestroy lists. 309 * @dev_mapping: A pointer to the struct address_space representing the 310 * device address space. 311 * @wq: Work queue structure for the delayed delete workqueue. 312 * @no_retry: Don't retry allocation if it fails 313 * 314 */ 315 316struct ttm_bo_device { 317 318 /* 319 * Constant after bo device init / atomic. 320 */ 321 struct list_head device_list; 322 struct ttm_bo_driver *driver; 323 /* 324 * access via ttm_manager_type. 325 */ 326 struct ttm_resource_manager sysman; 327 struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES]; 328 /* 329 * Protected by internal locks. 330 */ 331 struct drm_vma_offset_manager *vma_manager; 332 333 /* 334 * Protected by the global:lru lock. 335 */ 336 struct list_head ddestroy; 337 338 /* 339 * Protected by load / firstopen / lastclose /unload sync. 340 */ 341 342 struct address_space *dev_mapping; 343 344 /* 345 * Internal protection. 346 */ 347 348 struct delayed_work wq; 349 350 bool need_dma32; 351 352 bool no_retry; 353}; 354 355static inline struct ttm_resource_manager *ttm_manager_type(struct ttm_bo_device *bdev, 356 int mem_type) 357{ 358 return bdev->man_drv[mem_type]; 359} 360 361static inline void ttm_set_driver_manager(struct ttm_bo_device *bdev, 362 int type, 363 struct ttm_resource_manager *manager) 364{ 365 bdev->man_drv[type] = manager; 366} 367 368/** 369 * struct ttm_lru_bulk_move_pos 370 * 371 * @first: first BO in the bulk move range 372 * @last: last BO in the bulk move range 373 * 374 * Positions for a lru bulk move. 375 */ 376struct ttm_lru_bulk_move_pos { 377 struct ttm_buffer_object *first; 378 struct ttm_buffer_object *last; 379}; 380 381/** 382 * struct ttm_lru_bulk_move 383 * 384 * @tt: first/last lru entry for BOs in the TT domain 385 * @vram: first/last lru entry for BOs in the VRAM domain 386 * @swap: first/last lru entry for BOs on the swap list 387 * 388 * Helper structure for bulk moves on the LRU list. 389 */ 390struct ttm_lru_bulk_move { 391 struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY]; 392 struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY]; 393 struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY]; 394}; 395 396/* 397 * ttm_bo.c 398 */ 399 400/** 401 * ttm_bo_mem_space 402 * 403 * @bo: Pointer to a struct ttm_buffer_object. the data of which 404 * we want to allocate space for. 405 * @proposed_placement: Proposed new placement for the buffer object. 406 * @mem: A struct ttm_resource. 407 * @interruptible: Sleep interruptible when sliping. 408 * @no_wait_gpu: Return immediately if the GPU is busy. 409 * 410 * Allocate memory space for the buffer object pointed to by @bo, using 411 * the placement flags in @mem, potentially evicting other idle buffer objects. 412 * This function may sleep while waiting for space to become available. 413 * Returns: 414 * -EBUSY: No space available (only if no_wait == 1). 415 * -ENOMEM: Could not allocate memory for the buffer object, either due to 416 * fragmentation or concurrent allocators. 417 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 418 */ 419int ttm_bo_mem_space(struct ttm_buffer_object *bo, 420 struct ttm_placement *placement, 421 struct ttm_resource *mem, 422 struct ttm_operation_ctx *ctx); 423 424int ttm_bo_device_release(struct ttm_bo_device *bdev); 425 426/** 427 * ttm_bo_device_init 428 * 429 * @bdev: A pointer to a struct ttm_bo_device to initialize. 430 * @glob: A pointer to an initialized struct ttm_bo_global. 431 * @driver: A pointer to a struct ttm_bo_driver set up by the caller. 432 * @mapping: The address space to use for this bo. 433 * @vma_manager: A pointer to a vma manager. 434 * @file_page_offset: Offset into the device address space that is available 435 * for buffer data. This ensures compatibility with other users of the 436 * address space. 437 * 438 * Initializes a struct ttm_bo_device: 439 * Returns: 440 * !0: Failure. 441 */ 442int ttm_bo_device_init(struct ttm_bo_device *bdev, 443 struct ttm_bo_driver *driver, 444 struct address_space *mapping, 445 struct drm_vma_offset_manager *vma_manager, 446 bool need_dma32); 447 448/** 449 * ttm_bo_unmap_virtual 450 * 451 * @bo: tear down the virtual mappings for this BO 452 */ 453void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 454 455/** 456 * ttm_bo_unmap_virtual 457 * 458 * @bo: tear down the virtual mappings for this BO 459 * 460 * The caller must take ttm_mem_io_lock before calling this function. 461 */ 462void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); 463 464/** 465 * ttm_bo_reserve: 466 * 467 * @bo: A pointer to a struct ttm_buffer_object. 468 * @interruptible: Sleep interruptible if waiting. 469 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 470 * @ticket: ticket used to acquire the ww_mutex. 471 * 472 * Locks a buffer object for validation. (Or prevents other processes from 473 * locking it for validation), while taking a number of measures to prevent 474 * deadlocks. 475 * 476 * Returns: 477 * -EDEADLK: The reservation may cause a deadlock. 478 * Release all buffer reservations, wait for @bo to become unreserved and 479 * try again. 480 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 481 * a signal. Release all buffer reservations and return to user-space. 482 * -EBUSY: The function needed to sleep, but @no_wait was true 483 * -EALREADY: Bo already reserved using @ticket. This error code will only 484 * be returned if @use_ticket is set to true. 485 */ 486static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, 487 bool interruptible, bool no_wait, 488 struct ww_acquire_ctx *ticket) 489{ 490 int ret = 0; 491 492 if (no_wait) { 493 bool success; 494 if (WARN_ON(ticket)) 495 return -EBUSY; 496 497 success = dma_resv_trylock(bo->base.resv); 498 return success ? 0 : -EBUSY; 499 } 500 501 if (interruptible) 502 ret = dma_resv_lock_interruptible(bo->base.resv, ticket); 503 else 504 ret = dma_resv_lock(bo->base.resv, ticket); 505 if (ret == -EINTR) 506 return -ERESTARTSYS; 507 return ret; 508} 509 510/** 511 * ttm_bo_reserve_slowpath: 512 * @bo: A pointer to a struct ttm_buffer_object. 513 * @interruptible: Sleep interruptible if waiting. 514 * @sequence: Set (@bo)->sequence to this value after lock 515 * 516 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off 517 * from all our other reservations. Because there are no other reservations 518 * held by us, this function cannot deadlock any more. 519 */ 520static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, 521 bool interruptible, 522 struct ww_acquire_ctx *ticket) 523{ 524 if (interruptible) { 525 int ret = dma_resv_lock_slow_interruptible(bo->base.resv, 526 ticket); 527 if (ret == -EINTR) 528 ret = -ERESTARTSYS; 529 return ret; 530 } 531 dma_resv_lock_slow(bo->base.resv, ticket); 532 return 0; 533} 534 535static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) 536{ 537 spin_lock(&ttm_bo_glob.lru_lock); 538 ttm_bo_move_to_lru_tail(bo, NULL); 539 spin_unlock(&ttm_bo_glob.lru_lock); 540} 541 542static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, 543 struct ttm_resource *new_mem) 544{ 545 bo->mem = *new_mem; 546 new_mem->mm_node = NULL; 547} 548 549/** 550 * ttm_bo_move_null = assign memory for a buffer object. 551 * @bo: The bo to assign the memory to 552 * @new_mem: The memory to be assigned. 553 * 554 * Assign the memory from new_mem to the memory of the buffer object bo. 555 */ 556static inline void ttm_bo_move_null(struct ttm_buffer_object *bo, 557 struct ttm_resource *new_mem) 558{ 559 struct ttm_resource *old_mem = &bo->mem; 560 561 WARN_ON(old_mem->mm_node != NULL); 562 ttm_bo_assign_mem(bo, new_mem); 563} 564 565/** 566 * ttm_bo_unreserve 567 * 568 * @bo: A pointer to a struct ttm_buffer_object. 569 * 570 * Unreserve a previous reservation of @bo. 571 */ 572static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) 573{ 574 ttm_bo_move_to_lru_tail_unlocked(bo); 575 dma_resv_unlock(bo->base.resv); 576} 577 578/* 579 * ttm_bo_util.c 580 */ 581 582int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 583 struct ttm_resource *mem); 584void ttm_mem_io_free(struct ttm_bo_device *bdev, 585 struct ttm_resource *mem); 586/** 587 * ttm_bo_move_ttm 588 * 589 * @bo: A pointer to a struct ttm_buffer_object. 590 * @interruptible: Sleep interruptible if waiting. 591 * @no_wait_gpu: Return immediately if the GPU is busy. 592 * @new_mem: struct ttm_resource indicating where to move. 593 * 594 * Optimized move function for a buffer object with both old and 595 * new placement backed by a TTM. The function will, if successful, 596 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 597 * and update the (@bo)->mem placement flags. If unsuccessful, the old 598 * data remains untouched, and it's up to the caller to free the 599 * memory space indicated by @new_mem. 600 * Returns: 601 * !0: Failure. 602 */ 603 604int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 605 struct ttm_operation_ctx *ctx, 606 struct ttm_resource *new_mem); 607 608/** 609 * ttm_bo_move_memcpy 610 * 611 * @bo: A pointer to a struct ttm_buffer_object. 612 * @interruptible: Sleep interruptible if waiting. 613 * @no_wait_gpu: Return immediately if the GPU is busy. 614 * @new_mem: struct ttm_resource indicating where to move. 615 * 616 * Fallback move function for a mappable buffer object in mappable memory. 617 * The function will, if successful, 618 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 619 * and update the (@bo)->mem placement flags. If unsuccessful, the old 620 * data remains untouched, and it's up to the caller to free the 621 * memory space indicated by @new_mem. 622 * Returns: 623 * !0: Failure. 624 */ 625 626int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 627 struct ttm_operation_ctx *ctx, 628 struct ttm_resource *new_mem); 629 630/** 631 * ttm_bo_free_old_node 632 * 633 * @bo: A pointer to a struct ttm_buffer_object. 634 * 635 * Utility function to free an old placement after a successful move. 636 */ 637void ttm_bo_free_old_node(struct ttm_buffer_object *bo); 638 639/** 640 * ttm_bo_move_accel_cleanup. 641 * 642 * @bo: A pointer to a struct ttm_buffer_object. 643 * @fence: A fence object that signals when moving is complete. 644 * @evict: This is an evict move. Don't return until the buffer is idle. 645 * @pipeline: evictions are to be pipelined. 646 * @new_mem: struct ttm_resource indicating where to move. 647 * 648 * Accelerated move function to be called when an accelerated move 649 * has been scheduled. The function will create a new temporary buffer object 650 * representing the old placement, and put the sync object on both buffer 651 * objects. After that the newly created buffer object is unref'd to be 652 * destroyed when the move is complete. This will help pipeline 653 * buffer moves. 654 */ 655int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 656 struct dma_fence *fence, bool evict, 657 bool pipeline, 658 struct ttm_resource *new_mem); 659 660/** 661 * ttm_bo_pipeline_gutting. 662 * 663 * @bo: A pointer to a struct ttm_buffer_object. 664 * 665 * Pipelined gutting a BO of its backing store. 666 */ 667int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); 668 669/** 670 * ttm_io_prot 671 * 672 * @c_state: Caching state. 673 * @tmp: Page protection flag for a normal, cached mapping. 674 * 675 * Utility function that returns the pgprot_t that should be used for 676 * setting up a PTE with the caching model indicated by @c_state. 677 */ 678pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); 679 680/** 681 * ttm_bo_tt_bind 682 * 683 * Bind the object tt to a memory resource. 684 */ 685int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem); 686 687/** 688 * ttm_bo_tt_bind 689 * 690 * Unbind the object tt from a memory resource. 691 */ 692void ttm_bo_tt_unbind(struct ttm_buffer_object *bo); 693 694/** 695 * ttm_bo_tt_destroy. 696 */ 697void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); 698 699/** 700 * ttm_range_man_init 701 * 702 * @bdev: ttm device 703 * @type: memory manager type 704 * @use_tt: if the memory manager uses tt 705 * @p_size: size of area to be managed in pages. 706 * 707 * Initialise a generic range manager for the selected memory type. 708 * The range manager is installed for this device in the type slot. 709 */ 710int ttm_range_man_init(struct ttm_bo_device *bdev, 711 unsigned type, bool use_tt, 712 unsigned long p_size); 713 714/** 715 * ttm_range_man_fini 716 * 717 * @bdev: ttm device 718 * @type: memory manager type 719 * 720 * Remove the generic range manager from a slot and tear it down. 721 */ 722int ttm_range_man_fini(struct ttm_bo_device *bdev, 723 unsigned type); 724 725#endif 726