1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Hardware-accelerated CRC-32 variants for Linux on z Systems
4  *
5  * Use the z/Architecture Vector Extension Facility to accelerate the
6  * computing of CRC-32 checksums.
7  *
8  * This CRC-32 implementation algorithm processes the most-significant
9  * bit first (BE).
10  *
11  * Copyright IBM Corp. 2015
12  * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
13  */
14 
15 #include <linux/linkage.h>
16 #include <asm/nospec-insn.h>
17 #include <asm/vx-insn.h>
18 
19 /* Vector register range containing CRC-32 constants */
20 #define CONST_R1R2		%v9
21 #define CONST_R3R4		%v10
22 #define CONST_R5		%v11
23 #define CONST_R6		%v12
24 #define CONST_RU_POLY		%v13
25 #define CONST_CRC_POLY		%v14
26 
27 	.data
28 	.balign	8
29 
30 /*
31  * The CRC-32 constant block contains reduction constants to fold and
32  * process particular chunks of the input data stream in parallel.
33  *
34  * For the CRC-32 variants, the constants are precomputed according to
35  * these definitions:
36  *
37  *	R1 = x4*128+64 mod P(x)
38  *	R2 = x4*128    mod P(x)
39  *	R3 = x128+64   mod P(x)
40  *	R4 = x128      mod P(x)
41  *	R5 = x96       mod P(x)
42  *	R6 = x64       mod P(x)
43  *
44  *	Barret reduction constant, u, is defined as floor(x**64 / P(x)).
45  *
46  *	where P(x) is the polynomial in the normal domain and the P'(x) is the
47  *	polynomial in the reversed (bitreflected) domain.
48  *
49  * Note that the constant definitions below are extended in order to compute
50  * intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction.
51  * The rightmost doubleword can be 0 to prevent contribution to the result or
52  * can be multiplied by 1 to perform an XOR without the need for a separate
53  * VECTOR EXCLUSIVE OR instruction.
54  *
55  * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials:
56  *
57  *	P(x)  = 0x04C11DB7
58  *	P'(x) = 0xEDB88320
59  */
60 
61 SYM_DATA_START_LOCAL(constants_CRC_32_BE)
62 	.quad		0x08833794c, 0x0e6228b11	# R1, R2
63 	.quad		0x0c5b9cd4c, 0x0e8a45605	# R3, R4
64 	.quad		0x0f200aa66, 1 << 32		# R5, x32
65 	.quad		0x0490d678d, 1			# R6, 1
66 	.quad		0x104d101df, 0			# u
67 	.quad		0x104C11DB7, 0			# P(x)
68 SYM_DATA_END(constants_CRC_32_BE)
69 
70 	.previous
71 
72 	GEN_BR_THUNK %r14
73 
74 	.text
75 /*
76  * The CRC-32 function(s) use these calling conventions:
77  *
78  * Parameters:
79  *
80  *	%r2:	Initial CRC value, typically ~0; and final CRC (return) value.
81  *	%r3:	Input buffer pointer, performance might be improved if the
82  *		buffer is on a doubleword boundary.
83  *	%r4:	Length of the buffer, must be 64 bytes or greater.
84  *
85  * Register usage:
86  *
87  *	%r5:	CRC-32 constant pool base pointer.
88  *	V0:	Initial CRC value and intermediate constants and results.
89  *	V1..V4:	Data for CRC computation.
90  *	V5..V8:	Next data chunks that are fetched from the input buffer.
91  *
92  *	V9..V14: CRC-32 constants.
93  */
94 SYM_FUNC_START(crc32_be_vgfm_16)
95 	/* Load CRC-32 constants */
96 	larl	%r5,constants_CRC_32_BE
97 	VLM	CONST_R1R2,CONST_CRC_POLY,0,%r5
98 
99 	/* Load the initial CRC value into the leftmost word of V0. */
100 	VZERO	%v0
101 	VLVGF	%v0,%r2,0
102 
103 	/* Load a 64-byte data chunk and XOR with CRC */
104 	VLM	%v1,%v4,0,%r3		/* 64-bytes into V1..V4 */
105 	VX	%v1,%v0,%v1		/* V1 ^= CRC */
106 	aghi	%r3,64			/* BUF = BUF + 64 */
107 	aghi	%r4,-64			/* LEN = LEN - 64 */
108 
109 	/* Check remaining buffer size and jump to proper folding method */
110 	cghi	%r4,64
111 	jl	.Lless_than_64bytes
112 
113 .Lfold_64bytes_loop:
114 	/* Load the next 64-byte data chunk into V5 to V8 */
115 	VLM	%v5,%v8,0,%r3
116 
117 	/*
118 	 * Perform a GF(2) multiplication of the doublewords in V1 with
119 	 * the reduction constants in V0.  The intermediate result is
120 	 * then folded (accumulated) with the next data chunk in V5 and
121 	 * stored in V1.  Repeat this step for the register contents
122 	 * in V2, V3, and V4 respectively.
123 	 */
124 	VGFMAG	%v1,CONST_R1R2,%v1,%v5
125 	VGFMAG	%v2,CONST_R1R2,%v2,%v6
126 	VGFMAG	%v3,CONST_R1R2,%v3,%v7
127 	VGFMAG	%v4,CONST_R1R2,%v4,%v8
128 
129 	/* Adjust buffer pointer and length for next loop */
130 	aghi	%r3,64			/* BUF = BUF + 64 */
131 	aghi	%r4,-64			/* LEN = LEN - 64 */
132 
133 	cghi	%r4,64
134 	jnl	.Lfold_64bytes_loop
135 
136 .Lless_than_64bytes:
137 	/* Fold V1 to V4 into a single 128-bit value in V1 */
138 	VGFMAG	%v1,CONST_R3R4,%v1,%v2
139 	VGFMAG	%v1,CONST_R3R4,%v1,%v3
140 	VGFMAG	%v1,CONST_R3R4,%v1,%v4
141 
142 	/* Check whether to continue with 64-bit folding */
143 	cghi	%r4,16
144 	jl	.Lfinal_fold
145 
146 .Lfold_16bytes_loop:
147 
148 	VL	%v2,0,,%r3		/* Load next data chunk */
149 	VGFMAG	%v1,CONST_R3R4,%v1,%v2	/* Fold next data chunk */
150 
151 	/* Adjust buffer pointer and size for folding next data chunk */
152 	aghi	%r3,16
153 	aghi	%r4,-16
154 
155 	/* Process remaining data chunks */
156 	cghi	%r4,16
157 	jnl	.Lfold_16bytes_loop
158 
159 .Lfinal_fold:
160 	/*
161 	 * The R5 constant is used to fold a 128-bit value into an 96-bit value
162 	 * that is XORed with the next 96-bit input data chunk.  To use a single
163 	 * VGFMG instruction, multiply the rightmost 64-bit with x^32 (1<<32) to
164 	 * form an intermediate 96-bit value (with appended zeros) which is then
165 	 * XORed with the intermediate reduction result.
166 	 */
167 	VGFMG	%v1,CONST_R5,%v1
168 
169 	/*
170 	 * Further reduce the remaining 96-bit value to a 64-bit value using a
171 	 * single VGFMG, the rightmost doubleword is multiplied with 0x1. The
172 	 * intermediate result is then XORed with the product of the leftmost
173 	 * doubleword with R6.	The result is a 64-bit value and is subject to
174 	 * the Barret reduction.
175 	 */
176 	VGFMG	%v1,CONST_R6,%v1
177 
178 	/*
179 	 * The input values to the Barret reduction are the degree-63 polynomial
180 	 * in V1 (R(x)), degree-32 generator polynomial, and the reduction
181 	 * constant u.	The Barret reduction result is the CRC value of R(x) mod
182 	 * P(x).
183 	 *
184 	 * The Barret reduction algorithm is defined as:
185 	 *
186 	 *    1. T1(x) = floor( R(x) / x^32 ) GF2MUL u
187 	 *    2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x)
188 	 *    3. C(x)  = R(x) XOR T2(x) mod x^32
189 	 *
190 	 * Note: To compensate the division by x^32, use the vector unpack
191 	 * instruction to move the leftmost word into the leftmost doubleword
192 	 * of the vector register.  The rightmost doubleword is multiplied
193 	 * with zero to not contribute to the intermediate results.
194 	 */
195 
196 	/* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
197 	VUPLLF	%v2,%v1
198 	VGFMG	%v2,CONST_RU_POLY,%v2
199 
200 	/*
201 	 * Compute the GF(2) product of the CRC polynomial in VO with T1(x) in
202 	 * V2 and XOR the intermediate result, T2(x),  with the value in V1.
203 	 * The final result is in the rightmost word of V2.
204 	 */
205 	VUPLLF	%v2,%v2
206 	VGFMAG	%v2,CONST_CRC_POLY,%v2,%v1
207 
208 .Ldone:
209 	VLGVF	%r2,%v2,3
210 	BR_EX	%r14
211 SYM_FUNC_END(crc32_be_vgfm_16)
212 
213 .previous
214