Lines Matching refs:mem
107 struct memory_block *mem = to_memory_block(dev);
109 WARN_ON(mem->altmap);
110 kfree(mem);
123 struct memory_block *mem = to_memory_block(dev);
125 return sysfs_emit(buf, "%08lx\n", memory_block_id(mem->start_section_nr));
144 struct memory_block *mem = to_memory_block(dev);
151 switch (mem->state) {
163 return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
175 static unsigned long memblk_nr_poison(struct memory_block *mem);
177 static inline unsigned long memblk_nr_poison(struct memory_block *mem)
186 static int memory_block_online(struct memory_block *mem)
188 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
194 if (memblk_nr_poison(mem))
197 zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
207 if (mem->altmap)
208 nr_vmemmap_pages = mem->altmap->free;
218 nr_pages - nr_vmemmap_pages, zone, mem->group);
230 adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
233 mem->zone = zone;
242 static int memory_block_offline(struct memory_block *mem)
244 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
249 if (!mem->zone)
256 if (mem->altmap)
257 nr_vmemmap_pages = mem->altmap->free;
261 adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
265 nr_pages - nr_vmemmap_pages, mem->zone, mem->group);
270 mem->group, nr_vmemmap_pages);
277 mem->zone = NULL;
288 memory_block_action(struct memory_block *mem, unsigned long action)
294 ret = memory_block_online(mem);
297 ret = memory_block_offline(mem);
301 "%ld\n", __func__, mem->start_section_nr, action, action);
308 static int memory_block_change_state(struct memory_block *mem,
313 if (mem->state != from_state_req)
317 mem->state = MEM_GOING_OFFLINE;
319 ret = memory_block_action(mem, to_state);
320 mem->state = ret ? from_state_req : to_state;
328 struct memory_block *mem = to_memory_block(dev);
331 if (mem->state == MEM_ONLINE)
338 if (mem->online_type == MMOP_OFFLINE)
339 mem->online_type = MMOP_ONLINE;
341 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
342 mem->online_type = MMOP_OFFLINE;
349 struct memory_block *mem = to_memory_block(dev);
351 if (mem->state == MEM_OFFLINE)
354 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
361 struct memory_block *mem = to_memory_block(dev);
375 /* mem->online_type is protected by device_hotplug_lock */
376 mem->online_type = online_type;
377 ret = device_online(&mem->dev);
380 ret = device_offline(&mem->dev);
406 struct memory_block *mem = to_memory_block(dev);
407 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
431 struct memory_block *mem = to_memory_block(dev);
432 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
434 struct memory_group *group = mem->group;
436 int nid = mem->nid;
443 if (mem->state == MEM_ONLINE) {
445 * If !mem->zone, the memory block spans multiple zones and
448 default_zone = mem->zone;
620 struct memory_block *mem;
622 mem = xa_load(&memory_blocks, block_id);
623 if (mem)
624 get_device(&mem->dev);
625 return mem;
681 static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
684 const unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
719 * @mem: The memory block device.
726 * set/adjust mem->zone based on the zone ranges of the given node.
728 void memory_block_add_nid(struct memory_block *mem, int nid,
731 if (context == MEMINIT_EARLY && mem->nid != nid) {
741 if (mem->nid == NUMA_NO_NODE)
742 mem->zone = early_node_zone_for_memory_block(mem, nid);
744 mem->zone = NULL;
753 mem->nid = nid;
761 struct memory_block *mem;
764 mem = find_memory_block_by_id(block_id);
765 if (mem) {
766 put_device(&mem->dev);
769 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
770 if (!mem)
773 mem->start_section_nr = block_id * sections_per_block;
774 mem->state = state;
775 mem->nid = NUMA_NO_NODE;
776 mem->altmap = altmap;
777 INIT_LIST_HEAD(&mem->group_next);
787 mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE);
790 ret = __add_memory_block(mem);
795 mem->group = group;
796 list_add(&mem->group_next, &group->memory_blocks);
855 struct memory_block *mem;
872 mem = find_memory_block_by_id(block_id);
873 if (WARN_ON_ONCE(!mem))
875 remove_memory_block(mem);
892 struct memory_block *mem;
900 mem = find_memory_block_by_id(block_id);
901 if (WARN_ON_ONCE(!mem))
903 num_poisoned_pages_sub(-1UL, memblk_nr_poison(mem));
904 unregister_memory_block_under_nodes(mem);
905 remove_memory_block(mem);
991 struct memory_block *mem;
999 mem = find_memory_block_by_id(block_id);
1000 if (!mem)
1003 ret = func(mem, arg);
1004 put_device(&mem->dev);
1018 struct memory_block *mem = to_memory_block(dev);
1021 return cb_data->func(mem, cb_data->arg);
1212 struct memory_block *mem = find_memory_block_by_id(block_id);
1214 if (mem)
1215 atomic_long_inc(&mem->nr_hwpoison);
1221 struct memory_block *mem = find_memory_block_by_id(block_id);
1223 if (mem)
1224 atomic_long_sub(i, &mem->nr_hwpoison);
1227 static unsigned long memblk_nr_poison(struct memory_block *mem)
1229 return atomic_long_read(&mem->nr_hwpoison);