1 /* GPL HEADER START
2  *
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 only,
7  * as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License version 2 for more details (a copy is included
13  * in the LICENSE file that accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License
16  * version 2 along with this program; If not, see http://www.gnu.org/licenses
17  *
18  * Please  visit http://www.xyratex.com/contact if you need additional
19  * information or have any questions.
20  *
21  * GPL HEADER END
22  */
23 
24 /*
25  * Copyright 2012 Xyratex Technology Limited
26  *
27  * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
28  * calculation.
29  * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
30  * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
31  * at:
32  * http://www.intel.com/products/processor/manuals/
33  * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
34  * Volume 2B: Instruction Set Reference, N-Z
35  *
36  * Authors:   Gregory Prestas <Gregory_Prestas@us.xyratex.com>
37  *	      Alexander Boyko <Alexander_Boyko@xyratex.com>
38  */
39 
40 #include <linux/linkage.h>
41 
42 
43 .section .rodata
44 .align 16
45 /*
46  * [x4*128+32 mod P(x) << 32)]'  << 1   = 0x154442bd4
47  * #define CONSTANT_R1  0x154442bd4LL
48  *
49  * [(x4*128-32 mod P(x) << 32)]' << 1   = 0x1c6e41596
50  * #define CONSTANT_R2  0x1c6e41596LL
51  */
52 .Lconstant_R2R1:
53 	.octa 0x00000001c6e415960000000154442bd4
54 /*
55  * [(x128+32 mod P(x) << 32)]'   << 1   = 0x1751997d0
56  * #define CONSTANT_R3  0x1751997d0LL
57  *
58  * [(x128-32 mod P(x) << 32)]'   << 1   = 0x0ccaa009e
59  * #define CONSTANT_R4  0x0ccaa009eLL
60  */
61 .Lconstant_R4R3:
62 	.octa 0x00000000ccaa009e00000001751997d0
63 /*
64  * [(x64 mod P(x) << 32)]'       << 1   = 0x163cd6124
65  * #define CONSTANT_R5  0x163cd6124LL
66  */
67 .Lconstant_R5:
68 	.octa 0x00000000000000000000000163cd6124
69 .Lconstant_mask32:
70 	.octa 0x000000000000000000000000FFFFFFFF
71 /*
72  * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
73  *
74  * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` = 0x1F7011641LL
75  * #define CONSTANT_RU  0x1F7011641LL
76  */
77 .Lconstant_RUpoly:
78 	.octa 0x00000001F701164100000001DB710641
79 
80 #define CONSTANT %xmm0
81 
82 #ifdef __x86_64__
83 #define BUF     %rdi
84 #define LEN     %rsi
85 #define CRC     %edx
86 #else
87 #define BUF     %eax
88 #define LEN     %edx
89 #define CRC     %ecx
90 #endif
91 
92 
93 
94 .text
95 /**
96  *      Calculate crc32
97  *      BUF - buffer (16 bytes aligned)
98  *      LEN - sizeof buffer (16 bytes aligned), LEN should be grater than 63
99  *      CRC - initial crc32
100  *      return %eax crc32
101  *      uint crc32_pclmul_le_16(unsigned char const *buffer,
102  *	                     size_t len, uint crc32)
103  */
104 
105 SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
106 	movdqa  (BUF), %xmm1
107 	movdqa  0x10(BUF), %xmm2
108 	movdqa  0x20(BUF), %xmm3
109 	movdqa  0x30(BUF), %xmm4
110 	movd    CRC, CONSTANT
111 	pxor    CONSTANT, %xmm1
112 	sub     $0x40, LEN
113 	add     $0x40, BUF
114 	cmp     $0x40, LEN
115 	jb      less_64
116 
117 #ifdef __x86_64__
118 	movdqa .Lconstant_R2R1(%rip), CONSTANT
119 #else
120 	movdqa .Lconstant_R2R1, CONSTANT
121 #endif
122 
123 loop_64:/*  64 bytes Full cache line folding */
124 	prefetchnta    0x40(BUF)
125 	movdqa  %xmm1, %xmm5
126 	movdqa  %xmm2, %xmm6
127 	movdqa  %xmm3, %xmm7
128 #ifdef __x86_64__
129 	movdqa  %xmm4, %xmm8
130 #endif
131 	pclmulqdq $0x00, CONSTANT, %xmm1
132 	pclmulqdq $0x00, CONSTANT, %xmm2
133 	pclmulqdq $0x00, CONSTANT, %xmm3
134 #ifdef __x86_64__
135 	pclmulqdq $0x00, CONSTANT, %xmm4
136 #endif
137 	pclmulqdq $0x11, CONSTANT, %xmm5
138 	pclmulqdq $0x11, CONSTANT, %xmm6
139 	pclmulqdq $0x11, CONSTANT, %xmm7
140 #ifdef __x86_64__
141 	pclmulqdq $0x11, CONSTANT, %xmm8
142 #endif
143 	pxor    %xmm5, %xmm1
144 	pxor    %xmm6, %xmm2
145 	pxor    %xmm7, %xmm3
146 #ifdef __x86_64__
147 	pxor    %xmm8, %xmm4
148 #else
149 	/* xmm8 unsupported for x32 */
150 	movdqa  %xmm4, %xmm5
151 	pclmulqdq $0x00, CONSTANT, %xmm4
152 	pclmulqdq $0x11, CONSTANT, %xmm5
153 	pxor    %xmm5, %xmm4
154 #endif
155 
156 	pxor    (BUF), %xmm1
157 	pxor    0x10(BUF), %xmm2
158 	pxor    0x20(BUF), %xmm3
159 	pxor    0x30(BUF), %xmm4
160 
161 	sub     $0x40, LEN
162 	add     $0x40, BUF
163 	cmp     $0x40, LEN
164 	jge     loop_64
165 less_64:/*  Folding cache line into 128bit */
166 #ifdef __x86_64__
167 	movdqa  .Lconstant_R4R3(%rip), CONSTANT
168 #else
169 	movdqa  .Lconstant_R4R3, CONSTANT
170 #endif
171 	prefetchnta     (BUF)
172 
173 	movdqa  %xmm1, %xmm5
174 	pclmulqdq $0x00, CONSTANT, %xmm1
175 	pclmulqdq $0x11, CONSTANT, %xmm5
176 	pxor    %xmm5, %xmm1
177 	pxor    %xmm2, %xmm1
178 
179 	movdqa  %xmm1, %xmm5
180 	pclmulqdq $0x00, CONSTANT, %xmm1
181 	pclmulqdq $0x11, CONSTANT, %xmm5
182 	pxor    %xmm5, %xmm1
183 	pxor    %xmm3, %xmm1
184 
185 	movdqa  %xmm1, %xmm5
186 	pclmulqdq $0x00, CONSTANT, %xmm1
187 	pclmulqdq $0x11, CONSTANT, %xmm5
188 	pxor    %xmm5, %xmm1
189 	pxor    %xmm4, %xmm1
190 
191 	cmp     $0x10, LEN
192 	jb      fold_64
193 loop_16:/* Folding rest buffer into 128bit */
194 	movdqa  %xmm1, %xmm5
195 	pclmulqdq $0x00, CONSTANT, %xmm1
196 	pclmulqdq $0x11, CONSTANT, %xmm5
197 	pxor    %xmm5, %xmm1
198 	pxor    (BUF), %xmm1
199 	sub     $0x10, LEN
200 	add     $0x10, BUF
201 	cmp     $0x10, LEN
202 	jge     loop_16
203 
204 fold_64:
205 	/* perform the last 64 bit fold, also adds 32 zeroes
206 	 * to the input stream */
207 	pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
208 	psrldq  $0x08, %xmm1
209 	pxor    CONSTANT, %xmm1
210 
211 	/* final 32-bit fold */
212 	movdqa  %xmm1, %xmm2
213 #ifdef __x86_64__
214 	movdqa  .Lconstant_R5(%rip), CONSTANT
215 	movdqa  .Lconstant_mask32(%rip), %xmm3
216 #else
217 	movdqa  .Lconstant_R5, CONSTANT
218 	movdqa  .Lconstant_mask32, %xmm3
219 #endif
220 	psrldq  $0x04, %xmm2
221 	pand    %xmm3, %xmm1
222 	pclmulqdq $0x00, CONSTANT, %xmm1
223 	pxor    %xmm2, %xmm1
224 
225 	/* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
226 #ifdef __x86_64__
227 	movdqa  .Lconstant_RUpoly(%rip), CONSTANT
228 #else
229 	movdqa  .Lconstant_RUpoly, CONSTANT
230 #endif
231 	movdqa  %xmm1, %xmm2
232 	pand    %xmm3, %xmm1
233 	pclmulqdq $0x10, CONSTANT, %xmm1
234 	pand    %xmm3, %xmm1
235 	pclmulqdq $0x00, CONSTANT, %xmm1
236 	pxor    %xmm2, %xmm1
237 	pextrd  $0x01, %xmm1, %eax
238 
239 	RET
240 SYM_FUNC_END(crc32_pclmul_le_16)
241