xref: /kernel/linux/linux-5.10/fs/afs/cell.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* AFS cell and server record management
3 *
4 * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/slab.h>
9#include <linux/key.h>
10#include <linux/ctype.h>
11#include <linux/dns_resolver.h>
12#include <linux/sched.h>
13#include <linux/inet.h>
14#include <linux/namei.h>
15#include <keys/rxrpc-type.h>
16#include "internal.h"
17
18static unsigned __read_mostly afs_cell_gc_delay = 10;
19static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
20static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
21static atomic_t cell_debug_id;
22
23static void afs_queue_cell_manager(struct afs_net *);
24static void afs_manage_cell_work(struct work_struct *);
25
26static void afs_dec_cells_outstanding(struct afs_net *net)
27{
28	if (atomic_dec_and_test(&net->cells_outstanding))
29		wake_up_var(&net->cells_outstanding);
30}
31
32/*
33 * Set the cell timer to fire after a given delay, assuming it's not already
34 * set for an earlier time.
35 */
36static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
37{
38	if (net->live) {
39		atomic_inc(&net->cells_outstanding);
40		if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
41			afs_dec_cells_outstanding(net);
42	} else {
43		afs_queue_cell_manager(net);
44	}
45}
46
47/*
48 * Look up and get an activation reference on a cell record.  The caller must
49 * hold net->cells_lock at least read-locked.
50 */
51static struct afs_cell *afs_find_cell_locked(struct afs_net *net,
52					     const char *name, unsigned int namesz,
53					     enum afs_cell_trace reason)
54{
55	struct afs_cell *cell = NULL;
56	struct rb_node *p;
57	int n;
58
59	_enter("%*.*s", namesz, namesz, name);
60
61	if (name && namesz == 0)
62		return ERR_PTR(-EINVAL);
63	if (namesz > AFS_MAXCELLNAME)
64		return ERR_PTR(-ENAMETOOLONG);
65
66	if (!name) {
67		cell = net->ws_cell;
68		if (!cell)
69			return ERR_PTR(-EDESTADDRREQ);
70		goto found;
71	}
72
73	p = net->cells.rb_node;
74	while (p) {
75		cell = rb_entry(p, struct afs_cell, net_node);
76
77		n = strncasecmp(cell->name, name,
78				min_t(size_t, cell->name_len, namesz));
79		if (n == 0)
80			n = cell->name_len - namesz;
81		if (n < 0)
82			p = p->rb_left;
83		else if (n > 0)
84			p = p->rb_right;
85		else
86			goto found;
87	}
88
89	return ERR_PTR(-ENOENT);
90
91found:
92	return afs_use_cell(cell, reason);
93}
94
95/*
96 * Look up and get an activation reference on a cell record.
97 */
98struct afs_cell *afs_find_cell(struct afs_net *net,
99			       const char *name, unsigned int namesz,
100			       enum afs_cell_trace reason)
101{
102	struct afs_cell *cell;
103
104	down_read(&net->cells_lock);
105	cell = afs_find_cell_locked(net, name, namesz, reason);
106	up_read(&net->cells_lock);
107	return cell;
108}
109
110/*
111 * Set up a cell record and fill in its name, VL server address list and
112 * allocate an anonymous key
113 */
114static struct afs_cell *afs_alloc_cell(struct afs_net *net,
115				       const char *name, unsigned int namelen,
116				       const char *addresses)
117{
118	struct afs_vlserver_list *vllist;
119	struct afs_cell *cell;
120	int i, ret;
121
122	ASSERT(name);
123	if (namelen == 0)
124		return ERR_PTR(-EINVAL);
125	if (namelen > AFS_MAXCELLNAME) {
126		_leave(" = -ENAMETOOLONG");
127		return ERR_PTR(-ENAMETOOLONG);
128	}
129
130	/* Prohibit cell names that contain unprintable chars, '/' and '@' or
131	 * that begin with a dot.  This also precludes "@cell".
132	 */
133	if (name[0] == '.')
134		return ERR_PTR(-EINVAL);
135	for (i = 0; i < namelen; i++) {
136		char ch = name[i];
137		if (!isprint(ch) || ch == '/' || ch == '@')
138			return ERR_PTR(-EINVAL);
139	}
140
141	_enter("%*.*s,%s", namelen, namelen, name, addresses);
142
143	cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
144	if (!cell) {
145		_leave(" = -ENOMEM");
146		return ERR_PTR(-ENOMEM);
147	}
148
149	cell->name = kmalloc(namelen + 1, GFP_KERNEL);
150	if (!cell->name) {
151		kfree(cell);
152		return ERR_PTR(-ENOMEM);
153	}
154
155	cell->net = net;
156	cell->name_len = namelen;
157	for (i = 0; i < namelen; i++)
158		cell->name[i] = tolower(name[i]);
159	cell->name[i] = 0;
160
161	atomic_set(&cell->ref, 1);
162	atomic_set(&cell->active, 0);
163	INIT_WORK(&cell->manager, afs_manage_cell_work);
164	cell->volumes = RB_ROOT;
165	INIT_HLIST_HEAD(&cell->proc_volumes);
166	seqlock_init(&cell->volume_lock);
167	cell->fs_servers = RB_ROOT;
168	seqlock_init(&cell->fs_lock);
169	rwlock_init(&cell->vl_servers_lock);
170	cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
171
172	/* Provide a VL server list, filling it in if we were given a list of
173	 * addresses to use.
174	 */
175	if (addresses) {
176		vllist = afs_parse_text_addrs(net,
177					      addresses, strlen(addresses), ':',
178					      VL_SERVICE, AFS_VL_PORT);
179		if (IS_ERR(vllist)) {
180			ret = PTR_ERR(vllist);
181			goto parse_failed;
182		}
183
184		vllist->source = DNS_RECORD_FROM_CONFIG;
185		vllist->status = DNS_LOOKUP_NOT_DONE;
186		cell->dns_expiry = TIME64_MAX;
187	} else {
188		ret = -ENOMEM;
189		vllist = afs_alloc_vlserver_list(0);
190		if (!vllist)
191			goto error;
192		vllist->source = DNS_RECORD_UNAVAILABLE;
193		vllist->status = DNS_LOOKUP_NOT_DONE;
194		cell->dns_expiry = ktime_get_real_seconds();
195	}
196
197	rcu_assign_pointer(cell->vl_servers, vllist);
198
199	cell->dns_source = vllist->source;
200	cell->dns_status = vllist->status;
201	smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
202	atomic_inc(&net->cells_outstanding);
203	cell->debug_id = atomic_inc_return(&cell_debug_id);
204	trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc);
205
206	_leave(" = %p", cell);
207	return cell;
208
209parse_failed:
210	if (ret == -EINVAL)
211		printk(KERN_ERR "kAFS: bad VL server IP address\n");
212error:
213	kfree(cell->name);
214	kfree(cell);
215	_leave(" = %d", ret);
216	return ERR_PTR(ret);
217}
218
219/*
220 * afs_lookup_cell - Look up or create a cell record.
221 * @net:	The network namespace
222 * @name:	The name of the cell.
223 * @namesz:	The strlen of the cell name.
224 * @vllist:	A colon/comma separated list of numeric IP addresses or NULL.
225 * @excl:	T if an error should be given if the cell name already exists.
226 *
227 * Look up a cell record by name and query the DNS for VL server addresses if
228 * needed.  Note that that actual DNS query is punted off to the manager thread
229 * so that this function can return immediately if interrupted whilst allowing
230 * cell records to be shared even if not yet fully constructed.
231 */
232struct afs_cell *afs_lookup_cell(struct afs_net *net,
233				 const char *name, unsigned int namesz,
234				 const char *vllist, bool excl)
235{
236	struct afs_cell *cell, *candidate, *cursor;
237	struct rb_node *parent, **pp;
238	enum afs_cell_state state;
239	int ret, n;
240
241	_enter("%s,%s", name, vllist);
242
243	if (!excl) {
244		cell = afs_find_cell(net, name, namesz, afs_cell_trace_use_lookup);
245		if (!IS_ERR(cell))
246			goto wait_for_cell;
247	}
248
249	/* Assume we're probably going to create a cell and preallocate and
250	 * mostly set up a candidate record.  We can then use this to stash the
251	 * name, the net namespace and VL server addresses.
252	 *
253	 * We also want to do this before we hold any locks as it may involve
254	 * upcalling to userspace to make DNS queries.
255	 */
256	candidate = afs_alloc_cell(net, name, namesz, vllist);
257	if (IS_ERR(candidate)) {
258		_leave(" = %ld", PTR_ERR(candidate));
259		return candidate;
260	}
261
262	/* Find the insertion point and check to see if someone else added a
263	 * cell whilst we were allocating.
264	 */
265	down_write(&net->cells_lock);
266
267	pp = &net->cells.rb_node;
268	parent = NULL;
269	while (*pp) {
270		parent = *pp;
271		cursor = rb_entry(parent, struct afs_cell, net_node);
272
273		n = strncasecmp(cursor->name, name,
274				min_t(size_t, cursor->name_len, namesz));
275		if (n == 0)
276			n = cursor->name_len - namesz;
277		if (n < 0)
278			pp = &(*pp)->rb_left;
279		else if (n > 0)
280			pp = &(*pp)->rb_right;
281		else
282			goto cell_already_exists;
283	}
284
285	cell = candidate;
286	candidate = NULL;
287	atomic_set(&cell->active, 2);
288	trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), 2, afs_cell_trace_insert);
289	rb_link_node_rcu(&cell->net_node, parent, pp);
290	rb_insert_color(&cell->net_node, &net->cells);
291	up_write(&net->cells_lock);
292
293	afs_queue_cell(cell, afs_cell_trace_get_queue_new);
294
295wait_for_cell:
296	trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), atomic_read(&cell->active),
297		       afs_cell_trace_wait);
298	_debug("wait_for_cell");
299	wait_var_event(&cell->state,
300		       ({
301			       state = smp_load_acquire(&cell->state); /* vs error */
302			       state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED;
303		       }));
304
305	/* Check the state obtained from the wait check. */
306	if (state == AFS_CELL_REMOVED) {
307		ret = cell->error;
308		goto error;
309	}
310
311	_leave(" = %p [cell]", cell);
312	return cell;
313
314cell_already_exists:
315	_debug("cell exists");
316	cell = cursor;
317	if (excl) {
318		ret = -EEXIST;
319	} else {
320		afs_use_cell(cursor, afs_cell_trace_use_lookup);
321		ret = 0;
322	}
323	up_write(&net->cells_lock);
324	if (candidate)
325		afs_put_cell(candidate, afs_cell_trace_put_candidate);
326	if (ret == 0)
327		goto wait_for_cell;
328	goto error_noput;
329error:
330	afs_unuse_cell(net, cell, afs_cell_trace_unuse_lookup);
331error_noput:
332	_leave(" = %d [error]", ret);
333	return ERR_PTR(ret);
334}
335
336/*
337 * set the root cell information
338 * - can be called with a module parameter string
339 * - can be called from a write to /proc/fs/afs/rootcell
340 */
341int afs_cell_init(struct afs_net *net, const char *rootcell)
342{
343	struct afs_cell *old_root, *new_root;
344	const char *cp, *vllist;
345	size_t len;
346
347	_enter("");
348
349	if (!rootcell) {
350		/* module is loaded with no parameters, or built statically.
351		 * - in the future we might initialize cell DB here.
352		 */
353		_leave(" = 0 [no root]");
354		return 0;
355	}
356
357	cp = strchr(rootcell, ':');
358	if (!cp) {
359		_debug("kAFS: no VL server IP addresses specified");
360		vllist = NULL;
361		len = strlen(rootcell);
362	} else {
363		vllist = cp + 1;
364		len = cp - rootcell;
365	}
366
367	/* allocate a cell record for the root cell */
368	new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
369	if (IS_ERR(new_root)) {
370		_leave(" = %ld", PTR_ERR(new_root));
371		return PTR_ERR(new_root);
372	}
373
374	if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
375		afs_use_cell(new_root, afs_cell_trace_use_pin);
376
377	/* install the new cell */
378	down_write(&net->cells_lock);
379	afs_see_cell(new_root, afs_cell_trace_see_ws);
380	old_root = net->ws_cell;
381	net->ws_cell = new_root;
382	up_write(&net->cells_lock);
383
384	afs_unuse_cell(net, old_root, afs_cell_trace_unuse_ws);
385	_leave(" = 0");
386	return 0;
387}
388
389/*
390 * Update a cell's VL server address list from the DNS.
391 */
392static int afs_update_cell(struct afs_cell *cell)
393{
394	struct afs_vlserver_list *vllist, *old = NULL, *p;
395	unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl);
396	unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl);
397	time64_t now, expiry = 0;
398	int ret = 0;
399
400	_enter("%s", cell->name);
401
402	vllist = afs_dns_query(cell, &expiry);
403	if (IS_ERR(vllist)) {
404		ret = PTR_ERR(vllist);
405
406		_debug("%s: fail %d", cell->name, ret);
407		if (ret == -ENOMEM)
408			goto out_wake;
409
410		vllist = afs_alloc_vlserver_list(0);
411		if (!vllist) {
412			if (ret >= 0)
413				ret = -ENOMEM;
414			goto out_wake;
415		}
416
417		switch (ret) {
418		case -ENODATA:
419		case -EDESTADDRREQ:
420			vllist->status = DNS_LOOKUP_GOT_NOT_FOUND;
421			break;
422		case -EAGAIN:
423		case -ECONNREFUSED:
424			vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE;
425			break;
426		default:
427			vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE;
428			break;
429		}
430	}
431
432	_debug("%s: got list %d %d", cell->name, vllist->source, vllist->status);
433	cell->dns_status = vllist->status;
434
435	now = ktime_get_real_seconds();
436	if (min_ttl > max_ttl)
437		max_ttl = min_ttl;
438	if (expiry < now + min_ttl)
439		expiry = now + min_ttl;
440	else if (expiry > now + max_ttl)
441		expiry = now + max_ttl;
442
443	_debug("%s: status %d", cell->name, vllist->status);
444	if (vllist->source == DNS_RECORD_UNAVAILABLE) {
445		switch (vllist->status) {
446		case DNS_LOOKUP_GOT_NOT_FOUND:
447			/* The DNS said that the cell does not exist or there
448			 * weren't any addresses to be had.
449			 */
450			cell->dns_expiry = expiry;
451			break;
452
453		case DNS_LOOKUP_BAD:
454		case DNS_LOOKUP_GOT_LOCAL_FAILURE:
455		case DNS_LOOKUP_GOT_TEMP_FAILURE:
456		case DNS_LOOKUP_GOT_NS_FAILURE:
457		default:
458			cell->dns_expiry = now + 10;
459			break;
460		}
461	} else {
462		cell->dns_expiry = expiry;
463	}
464
465	/* Replace the VL server list if the new record has servers or the old
466	 * record doesn't.
467	 */
468	write_lock(&cell->vl_servers_lock);
469	p = rcu_dereference_protected(cell->vl_servers, true);
470	if (vllist->nr_servers > 0 || p->nr_servers == 0) {
471		rcu_assign_pointer(cell->vl_servers, vllist);
472		cell->dns_source = vllist->source;
473		old = p;
474	}
475	write_unlock(&cell->vl_servers_lock);
476	afs_put_vlserverlist(cell->net, old);
477
478out_wake:
479	smp_store_release(&cell->dns_lookup_count,
480			  cell->dns_lookup_count + 1); /* vs source/status */
481	wake_up_var(&cell->dns_lookup_count);
482	_leave(" = %d", ret);
483	return ret;
484}
485
486/*
487 * Destroy a cell record
488 */
489static void afs_cell_destroy(struct rcu_head *rcu)
490{
491	struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
492	struct afs_net *net = cell->net;
493	int u;
494
495	_enter("%p{%s}", cell, cell->name);
496
497	u = atomic_read(&cell->ref);
498	ASSERTCMP(u, ==, 0);
499	trace_afs_cell(cell->debug_id, u, atomic_read(&cell->active), afs_cell_trace_free);
500
501	afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
502	afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias);
503	key_put(cell->anonymous_key);
504	kfree(cell->name);
505	kfree(cell);
506
507	afs_dec_cells_outstanding(net);
508	_leave(" [destroyed]");
509}
510
511/*
512 * Queue the cell manager.
513 */
514static void afs_queue_cell_manager(struct afs_net *net)
515{
516	int outstanding = atomic_inc_return(&net->cells_outstanding);
517
518	_enter("%d", outstanding);
519
520	if (!queue_work(afs_wq, &net->cells_manager))
521		afs_dec_cells_outstanding(net);
522}
523
524/*
525 * Cell management timer.  We have an increment on cells_outstanding that we
526 * need to pass along to the work item.
527 */
528void afs_cells_timer(struct timer_list *timer)
529{
530	struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
531
532	_enter("");
533	if (!queue_work(afs_wq, &net->cells_manager))
534		afs_dec_cells_outstanding(net);
535}
536
537/*
538 * Get a reference on a cell record.
539 */
540struct afs_cell *afs_get_cell(struct afs_cell *cell, enum afs_cell_trace reason)
541{
542	int u;
543
544	if (atomic_read(&cell->ref) <= 0)
545		BUG();
546
547	u = atomic_inc_return(&cell->ref);
548	trace_afs_cell(cell->debug_id, u, atomic_read(&cell->active), reason);
549	return cell;
550}
551
552/*
553 * Drop a reference on a cell record.
554 */
555void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason)
556{
557	if (cell) {
558		unsigned int debug_id = cell->debug_id;
559		unsigned int u, a;
560
561		a = atomic_read(&cell->active);
562		u = atomic_dec_return(&cell->ref);
563		trace_afs_cell(debug_id, u, a, reason);
564		if (u == 0) {
565			a = atomic_read(&cell->active);
566			WARN(a != 0, "Cell active count %u > 0\n", a);
567			call_rcu(&cell->rcu, afs_cell_destroy);
568		}
569	}
570}
571
572/*
573 * Note a cell becoming more active.
574 */
575struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
576{
577	int u, a;
578
579	if (atomic_read(&cell->ref) <= 0)
580		BUG();
581
582	u = atomic_read(&cell->ref);
583	a = atomic_inc_return(&cell->active);
584	trace_afs_cell(cell->debug_id, u, a, reason);
585	return cell;
586}
587
588/*
589 * Record a cell becoming less active.  When the active counter reaches 1, it
590 * is scheduled for destruction, but may get reactivated.
591 */
592void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_trace reason)
593{
594	unsigned int debug_id;
595	time64_t now, expire_delay;
596	int u, a;
597
598	if (!cell)
599		return;
600
601	_enter("%s", cell->name);
602
603	now = ktime_get_real_seconds();
604	cell->last_inactive = now;
605	expire_delay = 0;
606	if (cell->vl_servers->nr_servers)
607		expire_delay = afs_cell_gc_delay;
608
609	debug_id = cell->debug_id;
610	u = atomic_read(&cell->ref);
611	a = atomic_dec_return(&cell->active);
612	trace_afs_cell(debug_id, u, a, reason);
613	WARN_ON(a == 0);
614	if (a == 1)
615		/* 'cell' may now be garbage collected. */
616		afs_set_cell_timer(net, expire_delay);
617}
618
619/*
620 * Note that a cell has been seen.
621 */
622void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason)
623{
624	int u, a;
625
626	u = atomic_read(&cell->ref);
627	a = atomic_read(&cell->active);
628	trace_afs_cell(cell->debug_id, u, a, reason);
629}
630
631/*
632 * Queue a cell for management, giving the workqueue a ref to hold.
633 */
634void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason)
635{
636	afs_get_cell(cell, reason);
637	if (!queue_work(afs_wq, &cell->manager))
638		afs_put_cell(cell, afs_cell_trace_put_queue_fail);
639}
640
641/*
642 * Allocate a key to use as a placeholder for anonymous user security.
643 */
644static int afs_alloc_anon_key(struct afs_cell *cell)
645{
646	struct key *key;
647	char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
648
649	/* Create a key to represent an anonymous user. */
650	memcpy(keyname, "afs@", 4);
651	dp = keyname + 4;
652	cp = cell->name;
653	do {
654		*dp++ = tolower(*cp);
655	} while (*cp++);
656
657	key = rxrpc_get_null_key(keyname);
658	if (IS_ERR(key))
659		return PTR_ERR(key);
660
661	cell->anonymous_key = key;
662
663	_debug("anon key %p{%x}",
664	       cell->anonymous_key, key_serial(cell->anonymous_key));
665	return 0;
666}
667
668/*
669 * Activate a cell.
670 */
671static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
672{
673	struct hlist_node **p;
674	struct afs_cell *pcell;
675	int ret;
676
677	if (!cell->anonymous_key) {
678		ret = afs_alloc_anon_key(cell);
679		if (ret < 0)
680			return ret;
681	}
682
683#ifdef CONFIG_AFS_FSCACHE
684	cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
685					     &afs_cell_cache_index_def,
686					     cell->name, strlen(cell->name),
687					     NULL, 0,
688					     cell, 0, true);
689#endif
690	ret = afs_proc_cell_setup(cell);
691	if (ret < 0)
692		return ret;
693
694	mutex_lock(&net->proc_cells_lock);
695	for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
696		pcell = hlist_entry(*p, struct afs_cell, proc_link);
697		if (strcmp(cell->name, pcell->name) < 0)
698			break;
699	}
700
701	cell->proc_link.pprev = p;
702	cell->proc_link.next = *p;
703	rcu_assign_pointer(*p, &cell->proc_link.next);
704	if (cell->proc_link.next)
705		cell->proc_link.next->pprev = &cell->proc_link.next;
706
707	afs_dynroot_mkdir(net, cell);
708	mutex_unlock(&net->proc_cells_lock);
709	return 0;
710}
711
712/*
713 * Deactivate a cell.
714 */
715static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
716{
717	_enter("%s", cell->name);
718
719	afs_proc_cell_remove(cell);
720
721	mutex_lock(&net->proc_cells_lock);
722	hlist_del_rcu(&cell->proc_link);
723	afs_dynroot_rmdir(net, cell);
724	mutex_unlock(&net->proc_cells_lock);
725
726#ifdef CONFIG_AFS_FSCACHE
727	fscache_relinquish_cookie(cell->cache, NULL, false);
728	cell->cache = NULL;
729#endif
730
731	_leave("");
732}
733
734/*
735 * Manage a cell record, initialising and destroying it, maintaining its DNS
736 * records.
737 */
738static void afs_manage_cell(struct afs_cell *cell)
739{
740	struct afs_net *net = cell->net;
741	int ret, active;
742
743	_enter("%s", cell->name);
744
745again:
746	_debug("state %u", cell->state);
747	switch (cell->state) {
748	case AFS_CELL_INACTIVE:
749	case AFS_CELL_FAILED:
750		down_write(&net->cells_lock);
751		active = 1;
752		if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) {
753			rb_erase(&cell->net_node, &net->cells);
754			trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), 0,
755				       afs_cell_trace_unuse_delete);
756			smp_store_release(&cell->state, AFS_CELL_REMOVED);
757		}
758		up_write(&net->cells_lock);
759		if (cell->state == AFS_CELL_REMOVED) {
760			wake_up_var(&cell->state);
761			goto final_destruction;
762		}
763		if (cell->state == AFS_CELL_FAILED)
764			goto done;
765		smp_store_release(&cell->state, AFS_CELL_UNSET);
766		wake_up_var(&cell->state);
767		goto again;
768
769	case AFS_CELL_UNSET:
770		smp_store_release(&cell->state, AFS_CELL_ACTIVATING);
771		wake_up_var(&cell->state);
772		goto again;
773
774	case AFS_CELL_ACTIVATING:
775		ret = afs_activate_cell(net, cell);
776		if (ret < 0)
777			goto activation_failed;
778
779		smp_store_release(&cell->state, AFS_CELL_ACTIVE);
780		wake_up_var(&cell->state);
781		goto again;
782
783	case AFS_CELL_ACTIVE:
784		if (atomic_read(&cell->active) > 1) {
785			if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
786				ret = afs_update_cell(cell);
787				if (ret < 0)
788					cell->error = ret;
789			}
790			goto done;
791		}
792		smp_store_release(&cell->state, AFS_CELL_DEACTIVATING);
793		wake_up_var(&cell->state);
794		goto again;
795
796	case AFS_CELL_DEACTIVATING:
797		if (atomic_read(&cell->active) > 1)
798			goto reverse_deactivation;
799		afs_deactivate_cell(net, cell);
800		smp_store_release(&cell->state, AFS_CELL_INACTIVE);
801		wake_up_var(&cell->state);
802		goto again;
803
804	case AFS_CELL_REMOVED:
805		goto done;
806
807	default:
808		break;
809	}
810	_debug("bad state %u", cell->state);
811	BUG(); /* Unhandled state */
812
813activation_failed:
814	cell->error = ret;
815	afs_deactivate_cell(net, cell);
816
817	smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */
818	wake_up_var(&cell->state);
819	goto again;
820
821reverse_deactivation:
822	smp_store_release(&cell->state, AFS_CELL_ACTIVE);
823	wake_up_var(&cell->state);
824	_leave(" [deact->act]");
825	return;
826
827done:
828	_leave(" [done %u]", cell->state);
829	return;
830
831final_destruction:
832	/* The root volume is pinning the cell */
833	afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
834	cell->root_volume = NULL;
835	afs_put_cell(cell, afs_cell_trace_put_destroy);
836}
837
838static void afs_manage_cell_work(struct work_struct *work)
839{
840	struct afs_cell *cell = container_of(work, struct afs_cell, manager);
841
842	afs_manage_cell(cell);
843	afs_put_cell(cell, afs_cell_trace_put_queue_work);
844}
845
846/*
847 * Manage the records of cells known to a network namespace.  This includes
848 * updating the DNS records and garbage collecting unused cells that were
849 * automatically added.
850 *
851 * Note that constructed cell records may only be removed from net->cells by
852 * this work item, so it is safe for this work item to stash a cursor pointing
853 * into the tree and then return to caller (provided it skips cells that are
854 * still under construction).
855 *
856 * Note also that we were given an increment on net->cells_outstanding by
857 * whoever queued us that we need to deal with before returning.
858 */
859void afs_manage_cells(struct work_struct *work)
860{
861	struct afs_net *net = container_of(work, struct afs_net, cells_manager);
862	struct rb_node *cursor;
863	time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
864	bool purging = !net->live;
865
866	_enter("");
867
868	/* Trawl the cell database looking for cells that have expired from
869	 * lack of use and cells whose DNS results have expired and dispatch
870	 * their managers.
871	 */
872	down_read(&net->cells_lock);
873
874	for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
875		struct afs_cell *cell =
876			rb_entry(cursor, struct afs_cell, net_node);
877		unsigned active;
878		bool sched_cell = false;
879
880		active = atomic_read(&cell->active);
881		trace_afs_cell(cell->debug_id, atomic_read(&cell->ref),
882			       active, afs_cell_trace_manage);
883
884		ASSERTCMP(active, >=, 1);
885
886		if (purging) {
887			if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) {
888				active = atomic_dec_return(&cell->active);
889				trace_afs_cell(cell->debug_id, atomic_read(&cell->ref),
890					       active, afs_cell_trace_unuse_pin);
891			}
892		}
893
894		if (active == 1) {
895			struct afs_vlserver_list *vllist;
896			time64_t expire_at = cell->last_inactive;
897
898			read_lock(&cell->vl_servers_lock);
899			vllist = rcu_dereference_protected(
900				cell->vl_servers,
901				lockdep_is_held(&cell->vl_servers_lock));
902			if (vllist->nr_servers > 0)
903				expire_at += afs_cell_gc_delay;
904			read_unlock(&cell->vl_servers_lock);
905			if (purging || expire_at <= now)
906				sched_cell = true;
907			else if (expire_at < next_manage)
908				next_manage = expire_at;
909		}
910
911		if (!purging) {
912			if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags))
913				sched_cell = true;
914		}
915
916		if (sched_cell)
917			afs_queue_cell(cell, afs_cell_trace_get_queue_manage);
918	}
919
920	up_read(&net->cells_lock);
921
922	/* Update the timer on the way out.  We have to pass an increment on
923	 * cells_outstanding in the namespace that we are in to the timer or
924	 * the work scheduler.
925	 */
926	if (!purging && next_manage < TIME64_MAX) {
927		now = ktime_get_real_seconds();
928
929		if (next_manage - now <= 0) {
930			if (queue_work(afs_wq, &net->cells_manager))
931				atomic_inc(&net->cells_outstanding);
932		} else {
933			afs_set_cell_timer(net, next_manage - now);
934		}
935	}
936
937	afs_dec_cells_outstanding(net);
938	_leave(" [%d]", atomic_read(&net->cells_outstanding));
939}
940
941/*
942 * Purge in-memory cell database.
943 */
944void afs_cell_purge(struct afs_net *net)
945{
946	struct afs_cell *ws;
947
948	_enter("");
949
950	down_write(&net->cells_lock);
951	ws = net->ws_cell;
952	net->ws_cell = NULL;
953	up_write(&net->cells_lock);
954	afs_unuse_cell(net, ws, afs_cell_trace_unuse_ws);
955
956	_debug("del timer");
957	if (del_timer_sync(&net->cells_timer))
958		atomic_dec(&net->cells_outstanding);
959
960	_debug("kick mgr");
961	afs_queue_cell_manager(net);
962
963	_debug("wait");
964	wait_var_event(&net->cells_outstanding,
965		       !atomic_read(&net->cells_outstanding));
966	_leave("");
967}
968