1/**************************************************************************
2 *
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29 */
30
31#include <linux/export.h>
32#include <linux/highmem.h>
33
34#include <drm/drm_cache.h>
35
36#if defined(CONFIG_X86)
37#include <asm/smp.h>
38
39/*
40 * clflushopt is an unordered instruction which needs fencing with mfence or
41 * sfence to avoid ordering issues.  For drm_clflush_page this fencing happens
42 * in the caller.
43 */
44static void
45drm_clflush_page(struct page *page)
46{
47	uint8_t *page_virtual;
48	unsigned int i;
49	const int size = boot_cpu_data.x86_clflush_size;
50
51	if (unlikely(page == NULL))
52		return;
53
54	page_virtual = kmap_atomic(page);
55	for (i = 0; i < PAGE_SIZE; i += size)
56		clflushopt(page_virtual + i);
57	kunmap_atomic(page_virtual);
58}
59
60static void drm_cache_flush_clflush(struct page *pages[],
61				    unsigned long num_pages)
62{
63	unsigned long i;
64
65	mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
66	for (i = 0; i < num_pages; i++)
67		drm_clflush_page(*pages++);
68	mb(); /*Also used after CLFLUSH so that all cache is flushed*/
69}
70#endif
71
72/**
73 * drm_clflush_pages - Flush dcache lines of a set of pages.
74 * @pages: List of pages to be flushed.
75 * @num_pages: Number of pages in the array.
76 *
77 * Flush every data cache line entry that points to an address belonging
78 * to a page in the array.
79 */
80void
81drm_clflush_pages(struct page *pages[], unsigned long num_pages)
82{
83
84#if defined(CONFIG_X86)
85	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
86		drm_cache_flush_clflush(pages, num_pages);
87		return;
88	}
89
90	if (wbinvd_on_all_cpus())
91		pr_err("Timed out waiting for cache flush\n");
92
93#elif defined(__powerpc__)
94	unsigned long i;
95
96	for (i = 0; i < num_pages; i++) {
97		struct page *page = pages[i];
98		void *page_virtual;
99
100		if (unlikely(page == NULL))
101			continue;
102
103		page_virtual = kmap_atomic(page);
104		flush_dcache_range((unsigned long)page_virtual,
105				   (unsigned long)page_virtual + PAGE_SIZE);
106		kunmap_atomic(page_virtual);
107	}
108#else
109	pr_err("Architecture has no drm_cache.c support\n");
110	WARN_ON_ONCE(1);
111#endif
112}
113EXPORT_SYMBOL(drm_clflush_pages);
114
115/**
116 * drm_clflush_sg - Flush dcache lines pointing to a scather-gather.
117 * @st: struct sg_table.
118 *
119 * Flush every data cache line entry that points to an address in the
120 * sg.
121 */
122void
123drm_clflush_sg(struct sg_table *st)
124{
125#if defined(CONFIG_X86)
126	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
127		struct sg_page_iter sg_iter;
128
129		mb(); /*CLFLUSH is ordered only by using memory barriers*/
130		for_each_sgtable_page(st, &sg_iter, 0)
131			drm_clflush_page(sg_page_iter_page(&sg_iter));
132		mb(); /*Make sure that all cache line entry is flushed*/
133
134		return;
135	}
136
137	if (wbinvd_on_all_cpus())
138		pr_err("Timed out waiting for cache flush\n");
139#else
140	pr_err("Architecture has no drm_cache.c support\n");
141	WARN_ON_ONCE(1);
142#endif
143}
144EXPORT_SYMBOL(drm_clflush_sg);
145
146/**
147 * drm_clflush_virt_range - Flush dcache lines of a region
148 * @addr: Initial kernel memory address.
149 * @length: Region size.
150 *
151 * Flush every data cache line entry that points to an address in the
152 * region requested.
153 */
154void
155drm_clflush_virt_range(void *addr, unsigned long length)
156{
157#if defined(CONFIG_X86)
158	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
159		const int size = boot_cpu_data.x86_clflush_size;
160		void *end = addr + length;
161
162		addr = (void *)(((unsigned long)addr) & -size);
163		mb(); /*CLFLUSH is only ordered with a full memory barrier*/
164		for (; addr < end; addr += size)
165			clflushopt(addr);
166		clflushopt(end - 1); /* force serialisation */
167		mb(); /*Ensure that evry data cache line entry is flushed*/
168		return;
169	}
170
171	if (wbinvd_on_all_cpus())
172		pr_err("Timed out waiting for cache flush\n");
173#else
174	pr_err("Architecture has no drm_cache.c support\n");
175	WARN_ON_ONCE(1);
176#endif
177}
178EXPORT_SYMBOL(drm_clflush_virt_range);
179