Lines Matching defs:new

149 	 * register_event() callback will be used to add new userspace
1184 * new one might jump in right at the end of
2934 * If the slab is brand new and nobody can yet access its
3406 * grab some new pages. The stock's nr_bytes will be flushed to
3519 * Make sure that the new limit (memsw or memory limit) doesn't
4331 struct mem_cgroup_threshold_ary *new;
4351 /* Check if a threshold crossed before adding a new one */
4357 /* Allocate memory for new array of thresholds */
4358 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4359 if (!new) {
4363 new->size = size;
4365 /* Copy thresholds (if any) to new array */
4367 memcpy(new->entries, thresholds->primary->entries,
4368 flex_array_size(new, entries, size - 1));
4370 /* Add new threshold */
4371 new->entries[size - 1].eventfd = eventfd;
4372 new->entries[size - 1].threshold = threshold;
4374 /* Sort thresholds. Registering of new threshold isn't time-critical */
4375 sort(new->entries, size, sizeof(*new->entries),
4379 new->current_threshold = -1;
4381 if (new->entries[i].threshold <= usage) {
4383 * new->current_threshold will not be used until
4387 ++new->current_threshold;
4396 rcu_assign_pointer(thresholds->primary, new);
4423 struct mem_cgroup_threshold_ary *new;
4444 /* Calculate new number of threshold */
4453 new = thresholds->spare;
4461 kfree(new);
4462 new = NULL;
4466 new->size = size;
4469 new->current_threshold = -1;
4474 new->entries[j] = thresholds->primary->entries[i];
4475 if (new->entries[j].threshold <= usage) {
4477 * new->current_threshold will not be used
4481 ++new->current_threshold;
4490 rcu_assign_pointer(thresholds->primary, new);
4496 if (!new) {
4764 * off again. Also, don't start a new one if there's
4882 * Parse input and register new cgroup event handler.
5186 * relatively small ID space and prevent the creation of new cgroups
5831 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5909 * All state has been migrated, let's switch to the new memcg.
5918 * new memcg that isn't locked, the above state can change
5992 * prevent new faults against pagecache and swapcache,
7146 * new swapcache page, finish the transfer by uncharging the swap
7296 * @new: Replacement folio.
7298 * Charge @new as a replacement folio for @old. @old will
7301 * Both folios must be locked, @new->mapping must be set up.
7303 void mem_cgroup_migrate(struct folio *old, struct folio *new)
7306 long nr_pages = folio_nr_pages(new);
7310 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7311 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7312 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
7317 /* Page cache replacement: new folio already charged? */
7318 if (folio_memcg(new))
7326 /* Force-charge the new page. The old one will be freed soon */
7334 commit_charge(new, memcg);
7338 memcg_check_events(memcg, folio_nid(new));