1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020 Loongson Technology Corporation Limited
4  */
5 #include <asm/asm.h>
6 #include <asm/export.h>
7 #include <asm/loongarchregs.h>
8 #include <asm/page.h>
9 #include <asm/pgtable-bits.h>
10 #include <asm/regdef.h>
11 #include <asm/stackframe.h>
12 
13 #ifdef CONFIG_64BIT
14 #include <asm/pgtable-64.h>
15 #endif
16 
17 #define INVTLB_ADDR_GFALSE_AND_ASID	5
18 
19 #define PTRS_PER_PGD_BITS	(PAGE_SHIFT - 3)
20 #define PTRS_PER_PUD_BITS	(PAGE_SHIFT - 3)
21 #define PTRS_PER_PMD_BITS	(PAGE_SHIFT - 3)
22 #define PTRS_PER_PTE_BITS	(PAGE_SHIFT - 3)
23 
24 	.macro tlb_do_page_fault, write
25 	SYM_CODE_START(tlb_do_page_fault_\write)
26 	SAVE_ALL
27 	csrrd		a2, LOONGARCH_CSR_BADV
28 	move		a0, sp
29 	REG_S		a2, sp, PT_BVADDR
30 	li.w		a1, \write
31 	la.abs		t0, do_page_fault
32 	jirl		ra, t0, 0
33 	UNWIND_HINT_REGS
34 	RESTORE_ALL_AND_RET
35 	SYM_CODE_END(tlb_do_page_fault_\write)
36 	.endm
37 
38 	tlb_do_page_fault 0
39 	tlb_do_page_fault 1
40 
41 SYM_CODE_START(handle_tlb_protect)
42 	BACKUP_T0T1
43 	SAVE_ALL
44 	move		a0, sp
45 	move		a1, zero
46 	csrrd		a2, LOONGARCH_CSR_BADV
47 	REG_S		a2, sp, PT_BVADDR
48 	la.abs		t0, do_page_fault
49 	jirl		ra, t0, 0
50 	UNWIND_HINT_REGS
51 	RESTORE_ALL_AND_RET
52 SYM_CODE_END(handle_tlb_protect)
53 
54 SYM_CODE_START(handle_tlb_load)
55 	csrwr		t0, EXCEPTION_KS0
56 	csrwr		t1, EXCEPTION_KS1
57 	csrwr		ra, EXCEPTION_KS2
58 
59 	/*
60 	 * The vmalloc handling is not in the hotpath.
61 	 */
62 	csrrd		t0, LOONGARCH_CSR_BADV
63 	bltz		t0, vmalloc_load
64 	csrrd		t1, LOONGARCH_CSR_PGDL
65 
66 vmalloc_done_load:
67 	/* Get PGD offset in bytes */
68 	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
69 	alsl.d		t1, ra, t1, 3
70 #if CONFIG_PGTABLE_LEVELS > 3
71 	ld.d		t1, t1, 0
72 	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
73 	alsl.d		t1, ra, t1, 3
74 #endif
75 #if CONFIG_PGTABLE_LEVELS > 2
76 	ld.d		t1, t1, 0
77 	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
78 	alsl.d		t1, ra, t1, 3
79 #endif
80 	ld.d		ra, t1, 0
81 
82 	/*
83 	 * For huge tlb entries, pmde doesn't contain an address but
84 	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
85 	 * see if we need to jump to huge tlb processing.
86 	 */
87 	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
88 	bltz		ra, tlb_huge_update_load
89 
90 	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
91 	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
92 	alsl.d		t1, t0, ra, _PTE_T_LOG2
93 
94 #ifdef CONFIG_SMP
95 smp_pgtable_change_load:
96 	ll.d		t0, t1, 0
97 #else
98 	ld.d		t0, t1, 0
99 #endif
100 	andi		ra, t0, _PAGE_PRESENT
101 	beqz		ra, nopage_tlb_load
102 
103 	ori		t0, t0, _PAGE_VALID
104 #ifdef CONFIG_SMP
105 	sc.d		t0, t1, 0
106 	beqz		t0, smp_pgtable_change_load
107 #else
108 	st.d		t0, t1, 0
109 #endif
110 	tlbsrch
111 	bstrins.d	t1, zero, 3, 3
112 	ld.d		t0, t1, 0
113 	ld.d		t1, t1, 8
114 	csrwr		t0, LOONGARCH_CSR_TLBELO0
115 	csrwr		t1, LOONGARCH_CSR_TLBELO1
116 	tlbwr
117 
118 	csrrd		t0, EXCEPTION_KS0
119 	csrrd		t1, EXCEPTION_KS1
120 	csrrd		ra, EXCEPTION_KS2
121 	ertn
122 
123 #ifdef CONFIG_64BIT
124 vmalloc_load:
125 	la.abs		t1, swapper_pg_dir
126 	b		vmalloc_done_load
127 #endif
128 
129 	/* This is the entry point of a huge page. */
130 tlb_huge_update_load:
131 #ifdef CONFIG_SMP
132 	ll.d		ra, t1, 0
133 #endif
134 	andi		t0, ra, _PAGE_PRESENT
135 	beqz		t0, nopage_tlb_load
136 
137 #ifdef CONFIG_SMP
138 	ori		t0, ra, _PAGE_VALID
139 	sc.d		t0, t1, 0
140 	beqz		t0, tlb_huge_update_load
141 	ori		t0, ra, _PAGE_VALID
142 #else
143 	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
144 	ori		t0, ra, _PAGE_VALID
145 	st.d		t0, t1, 0
146 #endif
147 	csrrd		ra, LOONGARCH_CSR_ASID
148 	csrrd		t1, LOONGARCH_CSR_BADV
149 	andi		ra, ra, CSR_ASID_ASID
150 	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
151 
152 	/*
153 	 * A huge PTE describes an area the size of the
154 	 * configured huge page size. This is twice the
155 	 * of the large TLB entry size we intend to use.
156 	 * A TLB entry half the size of the configured
157 	 * huge page size is configured into entrylo0
158 	 * and entrylo1 to cover the contiguous huge PTE
159 	 * address space.
160 	 */
161 	/* Huge page: Move Global bit */
162 	xori		t0, t0, _PAGE_HUGE
163 	lu12i.w		t1, _PAGE_HGLOBAL >> 12
164 	and		t1, t0, t1
165 	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
166 	or		t0, t0, t1
167 
168 	move		ra, t0
169 	csrwr		ra, LOONGARCH_CSR_TLBELO0
170 
171 	/* Convert to entrylo1 */
172 	addi.d		t1, zero, 1
173 	slli.d		t1, t1, (HPAGE_SHIFT - 1)
174 	add.d		t0, t0, t1
175 	csrwr		t0, LOONGARCH_CSR_TLBELO1
176 
177 	/* Set huge page tlb entry size */
178 	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
179 	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
180 	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
181 
182 	tlbfill
183 
184 	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
185 	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
186 	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
187 
188 	csrrd		t0, EXCEPTION_KS0
189 	csrrd		t1, EXCEPTION_KS1
190 	csrrd		ra, EXCEPTION_KS2
191 	ertn
192 
193 nopage_tlb_load:
194 	dbar		0x700
195 	csrrd		ra, EXCEPTION_KS2
196 	la.abs		t0, tlb_do_page_fault_0
197 	jr		t0
198 SYM_CODE_END(handle_tlb_load)
199 
200 SYM_CODE_START(handle_tlb_load_ptw)
201 	csrwr		t0, LOONGARCH_CSR_KS0
202 	csrwr		t1, LOONGARCH_CSR_KS1
203 	la.abs		t0, tlb_do_page_fault_0
204 	jirl		zero, t0, 0
205 SYM_CODE_END(handle_tlb_load_ptw)
206 
207 SYM_CODE_START(handle_tlb_store)
208 	csrwr		t0, EXCEPTION_KS0
209 	csrwr		t1, EXCEPTION_KS1
210 	csrwr		ra, EXCEPTION_KS2
211 
212 	/*
213 	 * The vmalloc handling is not in the hotpath.
214 	 */
215 	csrrd		t0, LOONGARCH_CSR_BADV
216 	bltz		t0, vmalloc_store
217 	csrrd		t1, LOONGARCH_CSR_PGDL
218 
219 vmalloc_done_store:
220 	/* Get PGD offset in bytes */
221 	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
222 	alsl.d		t1, ra, t1, 3
223 #if CONFIG_PGTABLE_LEVELS > 3
224 	ld.d		t1, t1, 0
225 	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
226 	alsl.d		t1, ra, t1, 3
227 #endif
228 #if CONFIG_PGTABLE_LEVELS > 2
229 	ld.d		t1, t1, 0
230 	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
231 	alsl.d		t1, ra, t1, 3
232 #endif
233 	ld.d		ra, t1, 0
234 
235 	/*
236 	 * For huge tlb entries, pmde doesn't contain an address but
237 	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
238 	 * see if we need to jump to huge tlb processing.
239 	 */
240 	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
241 	bltz		ra, tlb_huge_update_store
242 
243 	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
244 	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
245 	alsl.d		t1, t0, ra, _PTE_T_LOG2
246 
247 #ifdef CONFIG_SMP
248 smp_pgtable_change_store:
249 	ll.d		t0, t1, 0
250 #else
251 	ld.d		t0, t1, 0
252 #endif
253 	andi		ra, t0, _PAGE_PRESENT | _PAGE_WRITE
254 	xori		ra, ra, _PAGE_PRESENT | _PAGE_WRITE
255 	bnez		ra, nopage_tlb_store
256 
257 	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
258 #ifdef CONFIG_SMP
259 	sc.d		t0, t1, 0
260 	beqz		t0, smp_pgtable_change_store
261 #else
262 	st.d		t0, t1, 0
263 #endif
264 	tlbsrch
265 	bstrins.d	t1, zero, 3, 3
266 	ld.d		t0, t1, 0
267 	ld.d		t1, t1, 8
268 	csrwr		t0, LOONGARCH_CSR_TLBELO0
269 	csrwr		t1, LOONGARCH_CSR_TLBELO1
270 	tlbwr
271 
272 	csrrd		t0, EXCEPTION_KS0
273 	csrrd		t1, EXCEPTION_KS1
274 	csrrd		ra, EXCEPTION_KS2
275 	ertn
276 
277 #ifdef CONFIG_64BIT
278 vmalloc_store:
279 	la.abs		t1, swapper_pg_dir
280 	b		vmalloc_done_store
281 #endif
282 
283 	/* This is the entry point of a huge page. */
284 tlb_huge_update_store:
285 #ifdef CONFIG_SMP
286 	ll.d		ra, t1, 0
287 #endif
288 	andi		t0, ra, _PAGE_PRESENT | _PAGE_WRITE
289 	xori		t0, t0, _PAGE_PRESENT | _PAGE_WRITE
290 	bnez		t0, nopage_tlb_store
291 
292 #ifdef CONFIG_SMP
293 	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
294 	sc.d		t0, t1, 0
295 	beqz		t0, tlb_huge_update_store
296 	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
297 #else
298 	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
299 	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
300 	st.d		t0, t1, 0
301 #endif
302 	csrrd		ra, LOONGARCH_CSR_ASID
303 	csrrd		t1, LOONGARCH_CSR_BADV
304 	andi		ra, ra, CSR_ASID_ASID
305 	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
306 
307 	/*
308 	 * A huge PTE describes an area the size of the
309 	 * configured huge page size. This is twice the
310 	 * of the large TLB entry size we intend to use.
311 	 * A TLB entry half the size of the configured
312 	 * huge page size is configured into entrylo0
313 	 * and entrylo1 to cover the contiguous huge PTE
314 	 * address space.
315 	 */
316 	/* Huge page: Move Global bit */
317 	xori		t0, t0, _PAGE_HUGE
318 	lu12i.w		t1, _PAGE_HGLOBAL >> 12
319 	and		t1, t0, t1
320 	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
321 	or		t0, t0, t1
322 
323 	move		ra, t0
324 	csrwr		ra, LOONGARCH_CSR_TLBELO0
325 
326 	/* Convert to entrylo1 */
327 	addi.d		t1, zero, 1
328 	slli.d		t1, t1, (HPAGE_SHIFT - 1)
329 	add.d		t0, t0, t1
330 	csrwr		t0, LOONGARCH_CSR_TLBELO1
331 
332 	/* Set huge page tlb entry size */
333 	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
334 	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
335 	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
336 
337 	tlbfill
338 
339 	/* Reset default page size */
340 	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
341 	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
342 	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
343 
344 	csrrd		t0, EXCEPTION_KS0
345 	csrrd		t1, EXCEPTION_KS1
346 	csrrd		ra, EXCEPTION_KS2
347 	ertn
348 
349 nopage_tlb_store:
350 	dbar		0x700
351 	csrrd		ra, EXCEPTION_KS2
352 	la.abs		t0, tlb_do_page_fault_1
353 	jr		t0
354 SYM_CODE_END(handle_tlb_store)
355 
356 SYM_CODE_START(handle_tlb_store_ptw)
357 	csrwr		t0, LOONGARCH_CSR_KS0
358 	csrwr		t1, LOONGARCH_CSR_KS1
359 	la.abs		t0, tlb_do_page_fault_1
360 	jirl		zero, t0, 0
361 SYM_CODE_END(handle_tlb_store_ptw)
362 
363 SYM_CODE_START(handle_tlb_modify)
364 	csrwr		t0, EXCEPTION_KS0
365 	csrwr		t1, EXCEPTION_KS1
366 	csrwr		ra, EXCEPTION_KS2
367 
368 	/*
369 	 * The vmalloc handling is not in the hotpath.
370 	 */
371 	csrrd		t0, LOONGARCH_CSR_BADV
372 	bltz		t0, vmalloc_modify
373 	csrrd		t1, LOONGARCH_CSR_PGDL
374 
375 vmalloc_done_modify:
376 	/* Get PGD offset in bytes */
377 	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
378 	alsl.d		t1, ra, t1, 3
379 #if CONFIG_PGTABLE_LEVELS > 3
380 	ld.d		t1, t1, 0
381 	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
382 	alsl.d		t1, ra, t1, 3
383 #endif
384 #if CONFIG_PGTABLE_LEVELS > 2
385 	ld.d		t1, t1, 0
386 	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
387 	alsl.d		t1, ra, t1, 3
388 #endif
389 	ld.d		ra, t1, 0
390 
391 	/*
392 	 * For huge tlb entries, pmde doesn't contain an address but
393 	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
394 	 * see if we need to jump to huge tlb processing.
395 	 */
396 	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
397 	bltz		ra, tlb_huge_update_modify
398 
399 	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
400 	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
401 	alsl.d		t1, t0, ra, _PTE_T_LOG2
402 
403 #ifdef CONFIG_SMP
404 smp_pgtable_change_modify:
405 	ll.d		t0, t1, 0
406 #else
407 	ld.d		t0, t1, 0
408 #endif
409 	andi		ra, t0, _PAGE_WRITE
410 	beqz		ra, nopage_tlb_modify
411 
412 	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
413 #ifdef CONFIG_SMP
414 	sc.d		t0, t1, 0
415 	beqz		t0, smp_pgtable_change_modify
416 #else
417 	st.d		t0, t1, 0
418 #endif
419 	tlbsrch
420 	bstrins.d	t1, zero, 3, 3
421 	ld.d		t0, t1, 0
422 	ld.d		t1, t1, 8
423 	csrwr		t0, LOONGARCH_CSR_TLBELO0
424 	csrwr		t1, LOONGARCH_CSR_TLBELO1
425 	tlbwr
426 
427 	csrrd		t0, EXCEPTION_KS0
428 	csrrd		t1, EXCEPTION_KS1
429 	csrrd		ra, EXCEPTION_KS2
430 	ertn
431 
432 #ifdef CONFIG_64BIT
433 vmalloc_modify:
434 	la.abs		t1, swapper_pg_dir
435 	b		vmalloc_done_modify
436 #endif
437 
438 	/* This is the entry point of a huge page. */
439 tlb_huge_update_modify:
440 #ifdef CONFIG_SMP
441 	ll.d		ra, t1, 0
442 #endif
443 	andi		t0, ra, _PAGE_WRITE
444 	beqz		t0, nopage_tlb_modify
445 
446 #ifdef CONFIG_SMP
447 	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
448 	sc.d		t0, t1, 0
449 	beqz		t0, tlb_huge_update_modify
450 	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
451 #else
452 	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
453 	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
454 	st.d		t0, t1, 0
455 #endif
456 	csrrd		ra, LOONGARCH_CSR_ASID
457 	csrrd		t1, LOONGARCH_CSR_BADV
458 	andi		ra, ra, CSR_ASID_ASID
459 	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
460 
461 	/*
462 	 * A huge PTE describes an area the size of the
463 	 * configured huge page size. This is twice the
464 	 * of the large TLB entry size we intend to use.
465 	 * A TLB entry half the size of the configured
466 	 * huge page size is configured into entrylo0
467 	 * and entrylo1 to cover the contiguous huge PTE
468 	 * address space.
469 	 */
470 	/* Huge page: Move Global bit */
471 	xori		t0, t0, _PAGE_HUGE
472 	lu12i.w		t1, _PAGE_HGLOBAL >> 12
473 	and		t1, t0, t1
474 	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
475 	or		t0, t0, t1
476 
477 	move		ra, t0
478 	csrwr		ra, LOONGARCH_CSR_TLBELO0
479 
480 	/* Convert to entrylo1 */
481 	addi.d		t1, zero, 1
482 	slli.d		t1, t1, (HPAGE_SHIFT - 1)
483 	add.d		t0, t0, t1
484 	csrwr		t0, LOONGARCH_CSR_TLBELO1
485 
486 	/* Set huge page tlb entry size */
487 	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
488 	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
489 	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
490 
491 	tlbfill
492 
493 	/* Reset default page size */
494 	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
495 	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
496 	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
497 
498 	csrrd		t0, EXCEPTION_KS0
499 	csrrd		t1, EXCEPTION_KS1
500 	csrrd		ra, EXCEPTION_KS2
501 	ertn
502 
503 nopage_tlb_modify:
504 	dbar		0x700
505 	csrrd		ra, EXCEPTION_KS2
506 	la.abs		t0, tlb_do_page_fault_1
507 	jr		t0
508 SYM_CODE_END(handle_tlb_modify)
509 
510 SYM_CODE_START(handle_tlb_modify_ptw)
511 	csrwr		t0, LOONGARCH_CSR_KS0
512 	csrwr		t1, LOONGARCH_CSR_KS1
513 	la.abs		t0, tlb_do_page_fault_1
514 	jirl		zero, t0, 0
515 SYM_CODE_END(handle_tlb_modify_ptw)
516 
517 SYM_CODE_START(handle_tlb_refill)
518 	csrwr		t0, LOONGARCH_CSR_TLBRSAVE
519 	csrrd		t0, LOONGARCH_CSR_PGD
520 	lddir		t0, t0, 3
521 #if CONFIG_PGTABLE_LEVELS > 3
522 	lddir		t0, t0, 2
523 #endif
524 #if CONFIG_PGTABLE_LEVELS > 2
525 	lddir		t0, t0, 1
526 #endif
527 	ldpte		t0, 0
528 	ldpte		t0, 1
529 	tlbfill
530 	csrrd		t0, LOONGARCH_CSR_TLBRSAVE
531 	ertn
532 SYM_CODE_END(handle_tlb_refill)
533