1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Hardware-accelerated CRC-32 variants for Linux on z Systems
4  *
5  * Use the z/Architecture Vector Extension Facility to accelerate the
6  * computing of bitreflected CRC-32 checksums for IEEE 802.3 Ethernet
7  * and Castagnoli.
8  *
9  * This CRC-32 implementation algorithm is bitreflected and processes
10  * the least-significant bit first (Little-Endian).
11  *
12  * Copyright IBM Corp. 2015
13  * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
14  */
15 
16 #include <linux/linkage.h>
17 #include <asm/nospec-insn.h>
18 #include <asm/vx-insn.h>
19 
20 /* Vector register range containing CRC-32 constants */
21 #define CONST_PERM_LE2BE	%v9
22 #define CONST_R2R1		%v10
23 #define CONST_R4R3		%v11
24 #define CONST_R5		%v12
25 #define CONST_RU_POLY		%v13
26 #define CONST_CRC_POLY		%v14
27 
28 	.data
29 	.balign	8
30 
31 /*
32  * The CRC-32 constant block contains reduction constants to fold and
33  * process particular chunks of the input data stream in parallel.
34  *
35  * For the CRC-32 variants, the constants are precomputed according to
36  * these definitions:
37  *
38  *	R1 = [(x4*128+32 mod P'(x) << 32)]' << 1
39  *	R2 = [(x4*128-32 mod P'(x) << 32)]' << 1
40  *	R3 = [(x128+32 mod P'(x) << 32)]'   << 1
41  *	R4 = [(x128-32 mod P'(x) << 32)]'   << 1
42  *	R5 = [(x64 mod P'(x) << 32)]'	    << 1
43  *	R6 = [(x32 mod P'(x) << 32)]'	    << 1
44  *
45  *	The bitreflected Barret reduction constant, u', is defined as
46  *	the bit reversal of floor(x**64 / P(x)).
47  *
48  *	where P(x) is the polynomial in the normal domain and the P'(x) is the
49  *	polynomial in the reversed (bitreflected) domain.
50  *
51  * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials:
52  *
53  *	P(x)  = 0x04C11DB7
54  *	P'(x) = 0xEDB88320
55  *
56  * CRC-32C (Castagnoli) polynomials:
57  *
58  *	P(x)  = 0x1EDC6F41
59  *	P'(x) = 0x82F63B78
60  */
61 
62 SYM_DATA_START_LOCAL(constants_CRC_32_LE)
63 	.octa		0x0F0E0D0C0B0A09080706050403020100	# BE->LE mask
64 	.quad		0x1c6e41596, 0x154442bd4		# R2, R1
65 	.quad		0x0ccaa009e, 0x1751997d0		# R4, R3
66 	.octa		0x163cd6124				# R5
67 	.octa		0x1F7011641				# u'
68 	.octa		0x1DB710641				# P'(x) << 1
69 SYM_DATA_END(constants_CRC_32_LE)
70 
71 SYM_DATA_START_LOCAL(constants_CRC_32C_LE)
72 	.octa		0x0F0E0D0C0B0A09080706050403020100	# BE->LE mask
73 	.quad		0x09e4addf8, 0x740eef02			# R2, R1
74 	.quad		0x14cd00bd6, 0xf20c0dfe			# R4, R3
75 	.octa		0x0dd45aab8				# R5
76 	.octa		0x0dea713f1				# u'
77 	.octa		0x105ec76f0				# P'(x) << 1
78 SYM_DATA_END(constants_CRC_32C_LE)
79 
80 	.previous
81 
82 	GEN_BR_THUNK %r14
83 
84 	.text
85 
86 /*
87  * The CRC-32 functions use these calling conventions:
88  *
89  * Parameters:
90  *
91  *	%r2:	Initial CRC value, typically ~0; and final CRC (return) value.
92  *	%r3:	Input buffer pointer, performance might be improved if the
93  *		buffer is on a doubleword boundary.
94  *	%r4:	Length of the buffer, must be 64 bytes or greater.
95  *
96  * Register usage:
97  *
98  *	%r5:	CRC-32 constant pool base pointer.
99  *	V0:	Initial CRC value and intermediate constants and results.
100  *	V1..V4:	Data for CRC computation.
101  *	V5..V8:	Next data chunks that are fetched from the input buffer.
102  *	V9:	Constant for BE->LE conversion and shift operations
103  *
104  *	V10..V14: CRC-32 constants.
105  */
106 
107 SYM_FUNC_START(crc32_le_vgfm_16)
108 	larl	%r5,constants_CRC_32_LE
109 	j	crc32_le_vgfm_generic
110 SYM_FUNC_END(crc32_le_vgfm_16)
111 
112 SYM_FUNC_START(crc32c_le_vgfm_16)
113 	larl	%r5,constants_CRC_32C_LE
114 	j	crc32_le_vgfm_generic
115 SYM_FUNC_END(crc32c_le_vgfm_16)
116 
117 SYM_FUNC_START(crc32_le_vgfm_generic)
118 	/* Load CRC-32 constants */
119 	VLM	CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5
120 
121 	/*
122 	 * Load the initial CRC value.
123 	 *
124 	 * The CRC value is loaded into the rightmost word of the
125 	 * vector register and is later XORed with the LSB portion
126 	 * of the loaded input data.
127 	 */
128 	VZERO	%v0			/* Clear V0 */
129 	VLVGF	%v0,%r2,3		/* Load CRC into rightmost word */
130 
131 	/* Load a 64-byte data chunk and XOR with CRC */
132 	VLM	%v1,%v4,0,%r3		/* 64-bytes into V1..V4 */
133 	VPERM	%v1,%v1,%v1,CONST_PERM_LE2BE
134 	VPERM	%v2,%v2,%v2,CONST_PERM_LE2BE
135 	VPERM	%v3,%v3,%v3,CONST_PERM_LE2BE
136 	VPERM	%v4,%v4,%v4,CONST_PERM_LE2BE
137 
138 	VX	%v1,%v0,%v1		/* V1 ^= CRC */
139 	aghi	%r3,64			/* BUF = BUF + 64 */
140 	aghi	%r4,-64			/* LEN = LEN - 64 */
141 
142 	cghi	%r4,64
143 	jl	.Lless_than_64bytes
144 
145 .Lfold_64bytes_loop:
146 	/* Load the next 64-byte data chunk into V5 to V8 */
147 	VLM	%v5,%v8,0,%r3
148 	VPERM	%v5,%v5,%v5,CONST_PERM_LE2BE
149 	VPERM	%v6,%v6,%v6,CONST_PERM_LE2BE
150 	VPERM	%v7,%v7,%v7,CONST_PERM_LE2BE
151 	VPERM	%v8,%v8,%v8,CONST_PERM_LE2BE
152 
153 	/*
154 	 * Perform a GF(2) multiplication of the doublewords in V1 with
155 	 * the R1 and R2 reduction constants in V0.  The intermediate result
156 	 * is then folded (accumulated) with the next data chunk in V5 and
157 	 * stored in V1. Repeat this step for the register contents
158 	 * in V2, V3, and V4 respectively.
159 	 */
160 	VGFMAG	%v1,CONST_R2R1,%v1,%v5
161 	VGFMAG	%v2,CONST_R2R1,%v2,%v6
162 	VGFMAG	%v3,CONST_R2R1,%v3,%v7
163 	VGFMAG	%v4,CONST_R2R1,%v4,%v8
164 
165 	aghi	%r3,64			/* BUF = BUF + 64 */
166 	aghi	%r4,-64			/* LEN = LEN - 64 */
167 
168 	cghi	%r4,64
169 	jnl	.Lfold_64bytes_loop
170 
171 .Lless_than_64bytes:
172 	/*
173 	 * Fold V1 to V4 into a single 128-bit value in V1.  Multiply V1 with R3
174 	 * and R4 and accumulating the next 128-bit chunk until a single 128-bit
175 	 * value remains.
176 	 */
177 	VGFMAG	%v1,CONST_R4R3,%v1,%v2
178 	VGFMAG	%v1,CONST_R4R3,%v1,%v3
179 	VGFMAG	%v1,CONST_R4R3,%v1,%v4
180 
181 	cghi	%r4,16
182 	jl	.Lfinal_fold
183 
184 .Lfold_16bytes_loop:
185 
186 	VL	%v2,0,,%r3		/* Load next data chunk */
187 	VPERM	%v2,%v2,%v2,CONST_PERM_LE2BE
188 	VGFMAG	%v1,CONST_R4R3,%v1,%v2	/* Fold next data chunk */
189 
190 	aghi	%r3,16
191 	aghi	%r4,-16
192 
193 	cghi	%r4,16
194 	jnl	.Lfold_16bytes_loop
195 
196 .Lfinal_fold:
197 	/*
198 	 * Set up a vector register for byte shifts.  The shift value must
199 	 * be loaded in bits 1-4 in byte element 7 of a vector register.
200 	 * Shift by 8 bytes: 0x40
201 	 * Shift by 4 bytes: 0x20
202 	 */
203 	VLEIB	%v9,0x40,7
204 
205 	/*
206 	 * Prepare V0 for the next GF(2) multiplication: shift V0 by 8 bytes
207 	 * to move R4 into the rightmost doubleword and set the leftmost
208 	 * doubleword to 0x1.
209 	 */
210 	VSRLB	%v0,CONST_R4R3,%v9
211 	VLEIG	%v0,1,0
212 
213 	/*
214 	 * Compute GF(2) product of V1 and V0.	The rightmost doubleword
215 	 * of V1 is multiplied with R4.  The leftmost doubleword of V1 is
216 	 * multiplied by 0x1 and is then XORed with rightmost product.
217 	 * Implicitly, the intermediate leftmost product becomes padded
218 	 */
219 	VGFMG	%v1,%v0,%v1
220 
221 	/*
222 	 * Now do the final 32-bit fold by multiplying the rightmost word
223 	 * in V1 with R5 and XOR the result with the remaining bits in V1.
224 	 *
225 	 * To achieve this by a single VGFMAG, right shift V1 by a word
226 	 * and store the result in V2 which is then accumulated.  Use the
227 	 * vector unpack instruction to load the rightmost half of the
228 	 * doubleword into the rightmost doubleword element of V1; the other
229 	 * half is loaded in the leftmost doubleword.
230 	 * The vector register with CONST_R5 contains the R5 constant in the
231 	 * rightmost doubleword and the leftmost doubleword is zero to ignore
232 	 * the leftmost product of V1.
233 	 */
234 	VLEIB	%v9,0x20,7		  /* Shift by words */
235 	VSRLB	%v2,%v1,%v9		  /* Store remaining bits in V2 */
236 	VUPLLF	%v1,%v1			  /* Split rightmost doubleword */
237 	VGFMAG	%v1,CONST_R5,%v1,%v2	  /* V1 = (V1 * R5) XOR V2 */
238 
239 	/*
240 	 * Apply a Barret reduction to compute the final 32-bit CRC value.
241 	 *
242 	 * The input values to the Barret reduction are the degree-63 polynomial
243 	 * in V1 (R(x)), degree-32 generator polynomial, and the reduction
244 	 * constant u.	The Barret reduction result is the CRC value of R(x) mod
245 	 * P(x).
246 	 *
247 	 * The Barret reduction algorithm is defined as:
248 	 *
249 	 *    1. T1(x) = floor( R(x) / x^32 ) GF2MUL u
250 	 *    2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x)
251 	 *    3. C(x)  = R(x) XOR T2(x) mod x^32
252 	 *
253 	 *  Note: The leftmost doubleword of vector register containing
254 	 *  CONST_RU_POLY is zero and, thus, the intermediate GF(2) product
255 	 *  is zero and does not contribute to the final result.
256 	 */
257 
258 	/* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
259 	VUPLLF	%v2,%v1
260 	VGFMG	%v2,CONST_RU_POLY,%v2
261 
262 	/*
263 	 * Compute the GF(2) product of the CRC polynomial with T1(x) in
264 	 * V2 and XOR the intermediate result, T2(x), with the value in V1.
265 	 * The final result is stored in word element 2 of V2.
266 	 */
267 	VUPLLF	%v2,%v2
268 	VGFMAG	%v2,CONST_CRC_POLY,%v2,%v1
269 
270 .Ldone:
271 	VLGVF	%r2,%v2,2
272 	BR_EX	%r14
273 SYM_FUNC_END(crc32_le_vgfm_generic)
274 
275 .previous
276