1/* SPDX-License-Identifier: GPL-2.0-or-later */
2#ifndef _LINUX_MEMBLOCK_H
3#define _LINUX_MEMBLOCK_H
4#ifdef __KERNEL__
5
6/*
7 * Logical memory blocks.
8 *
9 * Copyright (C) 2001 Peter Bergner, IBM Corp.
10 */
11
12#include <linux/init.h>
13#include <linux/mm.h>
14#include <asm/dma.h>
15
16extern unsigned long max_low_pfn;
17extern unsigned long min_low_pfn;
18
19/*
20 * highest page
21 */
22extern unsigned long max_pfn;
23/*
24 * highest possible page
25 */
26extern unsigned long long max_possible_pfn;
27
28#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
29extern int defer_free_memblock(void *unused);
30#endif
31
32/**
33 * enum memblock_flags - definition of memory region attributes
34 * @MEMBLOCK_NONE: no special request
35 * @MEMBLOCK_HOTPLUG: hotpluggable region
36 * @MEMBLOCK_MIRROR: mirrored region
37 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
38 */
39enum memblock_flags {
40    MEMBLOCK_NONE = 0x0,    /* No special request */
41    MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
42    MEMBLOCK_MIRROR = 0x2,  /* mirrored region */
43    MEMBLOCK_NOMAP = 0x4,   /* don't add to kernel direct mapping */
44};
45
46/**
47 * struct memblock_region - represents a memory region
48 * @base: base address of the region
49 * @size: size of the region
50 * @flags: memory region attributes
51 * @nid: NUMA node id
52 */
53struct memblock_region {
54    phys_addr_t base;
55    phys_addr_t size;
56    enum memblock_flags flags;
57#ifdef CONFIG_NEED_MULTIPLE_NODES
58    int nid;
59#endif
60};
61
62/**
63 * struct memblock_type - collection of memory regions of certain type
64 * @cnt: number of regions
65 * @max: size of the allocated array
66 * @total_size: size of all regions
67 * @regions: array of regions
68 * @name: the memory type symbolic name
69 */
70struct memblock_type {
71    unsigned long cnt;
72    unsigned long max;
73    phys_addr_t total_size;
74    struct memblock_region *regions;
75    char *name;
76};
77
78/**
79 * struct memblock - memblock allocator metadata
80 * @bottom_up: is bottom up direction?
81 * @current_limit: physical address of the current allocation limit
82 * @memory: usable memory regions
83 * @reserved: reserved memory regions
84 */
85struct memblock {
86    bool bottom_up; /* is bottom up direction? */
87    phys_addr_t current_limit;
88    struct memblock_type memory;
89    struct memblock_type reserved;
90};
91
92extern struct memblock memblock;
93
94#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
95#define __init_memblock __meminit
96#define __initdata_memblock __meminitdata
97void memblock_discard(void);
98#else
99#define __init_memblock
100#define __initdata_memblock
101static inline void memblock_discard(void)
102{
103}
104#endif
105
106phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align);
107void memblock_allow_resize(void);
108int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
109int memblock_add(phys_addr_t base, phys_addr_t size);
110int memblock_remove(phys_addr_t base, phys_addr_t size);
111int memblock_free(phys_addr_t base, phys_addr_t size);
112int memblock_reserve(phys_addr_t base, phys_addr_t size);
113#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
114int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
115#endif
116void memblock_trim_memory(phys_addr_t align);
117bool memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
118int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
119int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
120int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
121int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
122int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
123
124unsigned long memblock_free_all(void);
125void reset_node_managed_pages(pg_data_t *pgdat);
126void reset_all_zones_managed_pages(void);
127
128/* Low level functions */
129void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, struct memblock_type *type_a,
130                      struct memblock_type *type_b, phys_addr_t *out_start, phys_addr_t *out_end, int *out_nid);
131
132void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, struct memblock_type *type_a,
133                          struct memblock_type *type_b, phys_addr_t *out_start, phys_addr_t *out_end, int *out_nid);
134
135void __memblock_free_late(phys_addr_t base, phys_addr_t size);
136
137#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
138static inline void _next_physmem_range(u64 *idx, struct memblock_type *type, phys_addr_t *out_start,
139                                       phys_addr_t *out_end)
140{
141    extern struct memblock_type physmem;
142
143    __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type, out_start, out_end, NULL);
144}
145
146/**
147 * for_each_physmem_range - iterate through physmem areas not included in type.
148 * @i: u64 used as loop variable
149 * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
150 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
151 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
152 */
153#define for_each_physmem_range(i, type, p_start, p_end)                                                                \
154    for (i = 0, _next_physmem_range(&i, type, p_start, p_end); i != (u64)ULLONG_MAX;                                   \
155         _next_physmem_range(&i, type, p_start, p_end))
156#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
157
158/**
159 * _for_each_mem_range - iterate through memblock areas from type_a and not
160 * included in type_b. Or just type_a if type_b is NULL.
161 * @i: u64 used as loop variable
162 * @type_a: ptr to memblock_type to iterate
163 * @type_b: ptr to memblock_type which excludes from the iteration
164 * @nid: node selector, %NUMA_NO_NODE for all nodes
165 * @flags: pick from blocks based on memory attributes
166 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
167 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
168 * @p_nid: ptr to int for nid of the range, can be %NULL
169 */
170#define _for_each_mem_range(i, type_a, type_b, nid, flags, p_start, p_end, p_nid)                                      \
171    for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, p_start, p_end, p_nid); i != (u64)ULLONG_MAX;         \
172         __next_mem_range(&i, nid, flags, type_a, type_b, p_start, p_end, p_nid))
173
174/**
175 * _for_each_mem_range_rev - reverse iterate through memblock areas from
176 * type_a and not included in type_b. Or just type_a if type_b is NULL.
177 * @i: u64 used as loop variable
178 * @type_a: ptr to memblock_type to iterate
179 * @type_b: ptr to memblock_type which excludes from the iteration
180 * @nid: node selector, %NUMA_NO_NODE for all nodes
181 * @flags: pick from blocks based on memory attributes
182 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
183 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
184 * @p_nid: ptr to int for nid of the range, can be %NULL
185 */
186#define _for_each_mem_range_rev(i, type_a, type_b, nid, flags, p_start, p_end, p_nid)                                  \
187    for (i = (u64)ULLONG_MAX, __next_mem_range_rev(&i, nid, flags, type_a, type_b, p_start, p_end, p_nid);             \
188         i != (u64)ULLONG_MAX; __next_mem_range_rev(&i, nid, flags, type_a, type_b, p_start, p_end, p_nid))
189
190/**
191 * for_each_mem_range - iterate through memory areas.
192 * @i: u64 used as loop variable
193 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
194 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
195 */
196#define for_each_mem_range(i, p_start, p_end)                                                                          \
197    _for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
198
199/**
200 * for_each_mem_range_rev - reverse iterate through memblock areas from
201 * type_a and not included in type_b. Or just type_a if type_b is NULL.
202 * @i: u64 used as loop variable
203 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
204 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
205 */
206#define for_each_mem_range_rev(i, p_start, p_end)                                                                      \
207    _for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
208
209/**
210 * for_each_reserved_mem_range - iterate over all reserved memblock areas
211 * @i: u64 used as loop variable
212 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
213 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
214 *
215 * Walks over reserved areas of memblock. Available as soon as memblock
216 * is initialized.
217 */
218#define for_each_reserved_mem_range(i, p_start, p_end)                                                                 \
219    _for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL)
220
221static inline bool memblock_is_hotpluggable(struct memblock_region *m)
222{
223    return m->flags & MEMBLOCK_HOTPLUG;
224}
225
226static inline bool memblock_is_mirror(struct memblock_region *m)
227{
228    return m->flags & MEMBLOCK_MIRROR;
229}
230
231static inline bool memblock_is_nomap(struct memblock_region *m)
232{
233    return m->flags & MEMBLOCK_NOMAP;
234}
235
236int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn);
237void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, unsigned long *out_end_pfn, int *out_nid);
238
239/**
240 * for_each_mem_pfn_range - early memory pfn range iterator
241 * @i: an integer used as loop variable
242 * @nid: node selector, %MAX_NUMNODES for all nodes
243 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
244 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
245 * @p_nid: ptr to int for nid of the range, can be %NULL
246 *
247 * Walks over configured memory ranges.
248 */
249#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)                                                          \
250    for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); i >= 0;                                         \
251         __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
252
253#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
254void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, unsigned long *out_spfn, unsigned long *out_epfn);
255/**
256 * for_each_free_mem_range_in_zone - iterate through zone specific free
257 * memblock areas
258 * @i: u64 used as loop variable
259 * @zone: zone in which all of the memory blocks reside
260 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
261 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
262 *
263 * Walks over free (memory && !reserved) areas of memblock in a specific
264 * zone. Available once memblock and an empty zone is initialized. The main
265 * assumption is that the zone start, end, and pgdat have been associated.
266 * This way we can use the zone to determine NUMA node, and if a given part
267 * of the memblock is valid for the zone.
268 */
269#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)                                                   \
270    for (i = 0, __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); i != U64_MAX;                                  \
271         __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
272
273/**
274 * for_each_free_mem_range_in_zone_from - iterate through zone specific
275 * free memblock areas from a given point
276 * @i: u64 used as loop variable
277 * @zone: zone in which all of the memory blocks reside
278 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
279 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
280 *
281 * Walks over free (memory && !reserved) areas of memblock in a specific
282 * zone, continuing from current position. Available as soon as memblock is
283 * initialized.
284 */
285#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end)                                              \
286    for (; i != U64_MAX; __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
287
288int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
289
290#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
291
292/**
293 * for_each_free_mem_range - iterate through free memblock areas
294 * @i: u64 used as loop variable
295 * @nid: node selector, %NUMA_NO_NODE for all nodes
296 * @flags: pick from blocks based on memory attributes
297 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
298 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
299 * @p_nid: ptr to int for nid of the range, can be %NULL
300 *
301 * Walks over free (memory && !reserved) areas of memblock.  Available as
302 * soon as memblock is initialized.
303 */
304#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)                                                  \
305    _for_each_mem_range(i, &memblock.memory, &memblock.reserved, nid, flags, p_start, p_end, p_nid)
306
307/**
308 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
309 * @i: u64 used as loop variable
310 * @nid: node selector, %NUMA_NO_NODE for all nodes
311 * @flags: pick from blocks based on memory attributes
312 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
313 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
314 * @p_nid: ptr to int for nid of the range, can be %NULL
315 *
316 * Walks over free (memory && !reserved) areas of memblock in reverse
317 * order.  Available as soon as memblock is initialized.
318 */
319#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, p_nid)                                          \
320    _for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, nid, flags, p_start, p_end, p_nid)
321
322int memblock_set_node(phys_addr_t base, phys_addr_t size, struct memblock_type *type, int nid);
323
324#ifdef CONFIG_NEED_MULTIPLE_NODES
325static inline void memblock_set_region_node(struct memblock_region *r, int nid)
326{
327    r->nid = nid;
328}
329
330static inline int memblock_get_region_node(const struct memblock_region *r)
331{
332    return r->nid;
333}
334#else
335static inline void memblock_set_region_node(struct memblock_region *r, int nid)
336{
337}
338
339static inline int memblock_get_region_node(const struct memblock_region *r)
340{
341    return 0;
342}
343#endif /* CONFIG_NEED_MULTIPLE_NODES */
344
345/* Flags for memblock allocation APIs */
346#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
347#define MEMBLOCK_ALLOC_ACCESSIBLE 0
348#define MEMBLOCK_ALLOC_KASAN 1
349
350/* We are using top down, so it is safe to use 0 here */
351#define MEMBLOCK_LOW_LIMIT 0
352
353#ifndef ARCH_LOW_ADDRESS_LIMIT
354#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
355#endif
356
357phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end);
358phys_addr_t memblock_alloc_range_nid(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid,
359                                     bool exact_nid);
360phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
361
362static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align)
363{
364    return memblock_phys_alloc_range(size, align, 0, MEMBLOCK_ALLOC_ACCESSIBLE);
365}
366
367void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr,
368                                   int nid);
369void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr,
370                                 int nid);
371void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid);
372
373static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
374{
375    return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
376}
377
378static inline void *memblock_alloc_raw(phys_addr_t size, phys_addr_t align)
379{
380    return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT, MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
381}
382
383static inline void *memblock_alloc_from(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
384{
385    return memblock_alloc_try_nid(size, align, min_addr, MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
386}
387
388static inline void *memblock_alloc_low(phys_addr_t size, phys_addr_t align)
389{
390    return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
391}
392
393static inline void *memblock_alloc_node(phys_addr_t size, phys_addr_t align, int nid)
394{
395    return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
396}
397
398static inline void memblock_free_early(phys_addr_t base, phys_addr_t size)
399{
400    memblock_free(base, size);
401}
402
403static inline void memblock_free_early_nid(phys_addr_t base, phys_addr_t size, int nid)
404{
405    memblock_free(base, size);
406}
407
408static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
409{
410    __memblock_free_late(base, size);
411}
412
413/*
414 * Set the allocation direction to bottom-up or top-down.
415 */
416static inline __init void memblock_set_bottom_up(bool enable)
417{
418    memblock.bottom_up = enable;
419}
420
421/*
422 * Check if the allocation direction is bottom-up or not.
423 * if this is true, that said, memblock will allocate memory
424 * in bottom-up direction.
425 */
426static inline __init bool memblock_bottom_up(void)
427{
428    return memblock.bottom_up;
429}
430
431phys_addr_t memblock_phys_mem_size(void);
432phys_addr_t memblock_reserved_size(void);
433phys_addr_t memblock_start_of_DRAM(void);
434phys_addr_t memblock_end_of_DRAM(void);
435void memblock_enforce_memory_limit(phys_addr_t memory_limit);
436void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
437void memblock_mem_limit_remove_map(phys_addr_t limit);
438bool memblock_is_memory(phys_addr_t addr);
439bool memblock_is_map_memory(phys_addr_t addr);
440bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
441bool memblock_is_reserved(phys_addr_t addr);
442bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
443
444void memblock_dump_all(void);
445
446/**
447 * memblock_set_current_limit - Set the current allocation limit to allow
448 *                         limiting allocations to what is currently
449 *                         accessible during boot
450 * @limit: New limit value (physical address)
451 */
452void memblock_set_current_limit(phys_addr_t limit);
453
454phys_addr_t memblock_get_current_limit(void);
455
456/*
457 * pfn conversion functions
458 *
459 * While the memory MEMBLOCKs should always be page aligned, the reserved
460 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
461 * idea of what they return for such non aligned MEMBLOCKs.
462 */
463
464/**
465 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
466 * @reg: memblock_region structure
467 *
468 * Return: the lowest pfn intersecting with the memory region
469 */
470static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
471{
472    return PFN_UP(reg->base);
473}
474
475/**
476 * memblock_region_memory_end_pfn - get the end pfn of the memory region
477 * @reg: memblock_region structure
478 *
479 * Return: the end_pfn of the reserved region
480 */
481static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
482{
483    return PFN_DOWN(reg->base + reg->size);
484}
485
486/**
487 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
488 * @reg: memblock_region structure
489 *
490 * Return: the lowest pfn intersecting with the reserved region
491 */
492static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
493{
494    return PFN_DOWN(reg->base);
495}
496
497/**
498 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
499 * @reg: memblock_region structure
500 *
501 * Return: the end_pfn of the reserved region
502 */
503static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
504{
505    return PFN_UP(reg->base + reg->size);
506}
507
508/**
509 * for_each_mem_region - itereate over memory regions
510 * @region: loop variable
511 */
512#define for_each_mem_region(region)                                                                                    \
513    for (region = memblock.memory.regions; region < (memblock.memory.regions + memblock.memory.cnt); region++)
514
515/**
516 * for_each_reserved_mem_region - itereate over reserved memory regions
517 * @region: loop variable
518 */
519#define for_each_reserved_mem_region(region)                                                                           \
520    for (region = memblock.reserved.regions; region < (memblock.reserved.regions + memblock.reserved.cnt); region++)
521
522extern void *alloc_large_system_hash(const char *tablename, unsigned long bucketsize, unsigned long numentries,
523                                     int scale, int flags, unsigned int *_hash_shift, unsigned int *_hash_mask,
524                                     unsigned long low_limit, unsigned long high_limit);
525
526#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
527#define HASH_SMALL                                                                                                     \
528    0x00000002               /* sub-page allocation allowed, min                                                       \
529                              * shift passed via *_hash_shift */
530#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
531
532/* Only NUMA needs hash distribution. 64bit NUMA architectures have
533 * sufficient vmalloc space.
534 */
535#ifdef CONFIG_NUMA
536#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
537extern int hashdist; /* Distribute hashes across NUMA nodes? */
538#else
539#define hashdist (0)
540#endif
541
542#ifdef CONFIG_MEMTEST
543extern void early_memtest(phys_addr_t start, phys_addr_t end);
544#else
545static inline void early_memtest(phys_addr_t start, phys_addr_t end)
546{
547}
548#endif
549
550#endif /* __KERNEL__ */
551
552#endif /* _LINUX_MEMBLOCK_H */
553