Lines Matching refs:entry
55 struct drm_map_list *entry;
57 list_for_each_entry(entry, &dev->maplist, head) {
66 if (!entry->map ||
67 map->type != entry->map->type ||
68 entry->master != dev->master)
74 return entry;
77 if ((entry->map->offset & 0xffffffff) ==
79 return entry;
83 if (entry->map->offset == map->offset)
84 return entry;
271 struct drm_agp_mem *entry;
300 list_for_each_entry(entry, &dev->agp->memory, head) {
301 if ((map->offset >= entry->bound) &&
302 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
521 /* Find the list entry for the map and remove it */
669 * \param entry buffer entry where the error occurred.
671 * Frees any pages and buffers associated with the given entry.
674 struct drm_buf_entry *entry)
678 if (entry->seg_count) {
679 for (i = 0; i < entry->seg_count; i++) {
680 if (entry->seglist[i]) {
681 drm_pci_free(dev, entry->seglist[i]);
684 kfree(entry->seglist);
686 entry->seg_count = 0;
689 if (entry->buf_count) {
690 for (i = 0; i < entry->buf_count; i++) {
691 kfree(entry->buflist[i].dev_private);
693 kfree(entry->buflist);
695 entry->buf_count = 0;
715 struct drm_buf_entry *entry;
778 entry = &dma->bufs[order];
779 if (entry->buf_count) {
791 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
792 if (!entry->buflist) {
798 entry->buf_size = size;
799 entry->page_order = page_order;
803 while (entry->buf_count < count) {
804 buf = &entry->buflist[entry->buf_count];
805 buf->idx = dma->buf_count + entry->buf_count;
822 entry->buf_count = count;
823 drm_cleanup_buf_error(dev, entry);
829 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
832 entry->buf_count++;
839 (dma->buf_count + entry->buf_count) *
842 /* Free the entry because it isn't valid */
843 drm_cleanup_buf_error(dev, entry);
850 for (i = 0; i < entry->buf_count; i++) {
851 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
854 dma->buf_count += entry->buf_count;
855 dma->seg_count += entry->seg_count;
860 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
864 request->count = entry->buf_count;
884 struct drm_buf_entry *entry;
928 entry = &dma->bufs[order];
929 if (entry->buf_count) {
941 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
942 if (!entry->buflist) {
948 entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
949 if (!entry->seglist) {
950 kfree(entry->buflist);
963 kfree(entry->buflist);
964 kfree(entry->seglist);
974 entry->buf_size = size;
975 entry->page_order = page_order;
979 while (entry->buf_count < count) {
985 entry->buf_count = count;
986 entry->seg_count = count;
987 drm_cleanup_buf_error(dev, entry);
993 entry->seglist[entry->seg_count++] = dmah;
1002 offset + size <= total && entry->buf_count < count;
1003 offset += alignment, ++entry->buf_count) {
1004 buf = &entry->buflist[entry->buf_count];
1005 buf->idx = dma->buf_count + entry->buf_count;
1022 entry->buf_count = count;
1023 entry->seg_count = count;
1024 drm_cleanup_buf_error(dev, entry);
1032 entry->buf_count, buf->address);
1038 (dma->buf_count + entry->buf_count) *
1041 /* Free the entry because it isn't valid */
1042 drm_cleanup_buf_error(dev, entry);
1050 for (i = 0; i < entry->buf_count; i++) {
1051 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1062 dma->buf_count += entry->buf_count;
1063 dma->seg_count += entry->seg_count;
1064 dma->page_count += entry->seg_count << page_order;
1065 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
1069 request->count = entry->buf_count;
1085 struct drm_buf_entry *entry;
1140 entry = &dma->bufs[order];
1141 if (entry->buf_count) {
1153 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
1154 if (!entry->buflist) {
1160 entry->buf_size = size;
1161 entry->page_order = page_order;
1165 while (entry->buf_count < count) {
1166 buf = &entry->buflist[entry->buf_count];
1167 buf->idx = dma->buf_count + entry->buf_count;
1185 entry->buf_count = count;
1186 drm_cleanup_buf_error(dev, entry);
1192 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1195 entry->buf_count++;
1202 (dma->buf_count + entry->buf_count) *
1205 /* Free the entry because it isn't valid */
1206 drm_cleanup_buf_error(dev, entry);
1213 for (i = 0; i < entry->buf_count; i++) {
1214 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1217 dma->buf_count += entry->buf_count;
1218 dma->seg_count += entry->seg_count;
1223 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1227 request->count = entry->buf_count;
1380 * updates the respective drm_device_dma::bufs entry low and high water mark.
1390 struct drm_buf_entry *entry;
1406 entry = &dma->bufs[order];
1408 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1410 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1413 entry->low_mark = request->low_mark;
1414 entry->high_mark = request->high_mark;
1598 struct drm_map_list *entry;
1600 list_for_each_entry(entry, &dev->maplist, head) {
1601 if (entry->map && entry->map->type == _DRM_SHM &&
1602 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1603 return entry->map;