Lines Matching refs:clp

161 static bool is_client_expired(struct nfs4_client *clp)
163 return clp->cl_time == 0;
167 struct nfs4_client *clp)
169 if (clp->cl_state != NFSD4_ACTIVE)
173 static __be32 get_client_locked(struct nfs4_client *clp)
175 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
179 if (is_client_expired(clp))
181 atomic_inc(&clp->cl_rpc_users);
182 nfsd4_dec_courtesy_client_count(nn, clp);
183 clp->cl_state = NFSD4_ACTIVE;
189 renew_client_locked(struct nfs4_client *clp)
191 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
193 if (is_client_expired(clp)) {
197 clp->cl_clientid.cl_boot,
198 clp->cl_clientid.cl_id);
202 list_move_tail(&clp->cl_lru, &nn->client_lru);
203 clp->cl_time = ktime_get_boottime_seconds();
204 nfsd4_dec_courtesy_client_count(nn, clp);
205 clp->cl_state = NFSD4_ACTIVE;
208 static void put_client_renew_locked(struct nfs4_client *clp)
210 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
214 if (!atomic_dec_and_test(&clp->cl_rpc_users))
216 if (!is_client_expired(clp))
217 renew_client_locked(clp);
222 static void put_client_renew(struct nfs4_client *clp)
224 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
226 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
228 if (!is_client_expired(clp))
229 renew_client_locked(clp);
250 struct nfs4_client *clp = ses->se_client;
251 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
257 put_client_renew_locked(clp);
262 struct nfs4_client *clp = ses->se_client;
263 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
335 struct nfs4_client *clp = lo->lo_owner.so_client;
336 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
543 struct nfs4_client *clp)
547 lockdep_assert_held(&clp->cl_lock);
549 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
561 struct nfs4_client *clp)
565 spin_lock(&clp->cl_lock);
566 oo = find_openstateowner_str_locked(hashval, open, clp);
567 spin_unlock(&clp->cl_lock);
754 struct nfs4_client *clp;
767 clp = st->st_stid.sc_client;
768 if (try_to_expire_client(clp))
774 clp = stp->st_stid.sc_client;
775 nn = net_generic(clp->net, nfsd_net_id);
865 alloc_clnt_odstate(struct nfs4_client *clp)
871 co->co_client = clp;
1058 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
1062 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
1158 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
1170 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
1202 struct nfs4_client *clp = s->sc_client;
1204 might_lock(&clp->cl_lock);
1206 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
1210 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1211 nfs4_free_cpntf_statelist(clp->net, s);
1212 spin_unlock(&clp->cl_lock);
1268 * @clp: a pointer to the nfs4_client we're granting a delegation to
1276 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1286 if (clp == searchclp) {
1309 struct nfs4_client *clp = dp->dl_stid.sc_client;
1314 if (nfs4_delegation_exists(clp, fp))
1319 list_add(&dp->dl_perclnt, &clp->cl_delegations);
1362 struct nfs4_client *clp = dp->dl_stid.sc_client;
1368 if (clp->cl_minorversion) {
1369 spin_lock(&clp->cl_lock);
1372 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1373 spin_unlock(&clp->cl_lock);
1451 struct nfs4_client *clp = sop->so_client;
1453 might_lock(&clp->cl_lock);
1455 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1458 spin_unlock(&clp->cl_lock);
1520 struct nfs4_client *clp = s->sc_client;
1522 lockdep_assert_held(&clp->cl_lock);
1531 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1548 struct nfs4_client *clp = stp->st_stid.sc_client;
1551 spin_lock(&clp->cl_lock);
1553 spin_unlock(&clp->cl_lock);
1560 struct nfs4_client *clp = lo->lo_owner.so_client;
1562 lockdep_assert_held(&clp->cl_lock);
1629 struct nfs4_client *clp = oo->oo_owner.so_client;
1631 lockdep_assert_held(&clp->cl_lock);
1657 struct nfs4_client *clp = oo->oo_owner.so_client;
1662 spin_lock(&clp->cl_lock);
1670 spin_unlock(&clp->cl_lock);
1724 struct nfs4_client *clp = ses->se_client;
1728 sid->clientid = clp->cl_clientid;
1867 struct nfs4_client *clp = c->cn_session->se_client;
1869 trace_nfsd_cb_lost(clp);
1871 spin_lock(&clp->cl_lock);
1876 nfsd4_probe_callback(clp);
1877 spin_unlock(&clp->cl_lock);
1902 struct nfs4_client *clp = ses->se_client;
1904 spin_lock(&clp->cl_lock);
1906 spin_unlock(&clp->cl_lock);
1940 struct nfs4_client *clp = s->se_client;
1943 spin_lock(&clp->cl_lock);
1947 spin_unlock(&clp->cl_lock);
1952 spin_lock(&clp->cl_lock);
1954 spin_unlock(&clp->cl_lock);
1970 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1975 new->se_client = clp;
1987 spin_lock(&clp->cl_lock);
1988 list_add(&new->se_perclnt, &clp->cl_sessions);
1989 spin_unlock(&clp->cl_lock);
2000 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
2001 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
2051 struct nfs4_client *clp = ses->se_client;
2052 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2085 struct nfs4_client *clp;
2092 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
2093 if (clp == NULL)
2095 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
2096 if (clp->cl_name.data == NULL)
2098 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
2101 if (!clp->cl_ownerstr_hashtbl)
2104 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
2105 INIT_LIST_HEAD(&clp->cl_sessions);
2106 idr_init(&clp->cl_stateids);
2107 atomic_set(&clp->cl_rpc_users, 0);
2108 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
2109 clp->cl_state = NFSD4_ACTIVE;
2111 atomic_set(&clp->cl_delegs_in_recall, 0);
2112 INIT_LIST_HEAD(&clp->cl_idhash);
2113 INIT_LIST_HEAD(&clp->cl_openowners);
2114 INIT_LIST_HEAD(&clp->cl_delegations);
2115 INIT_LIST_HEAD(&clp->cl_lru);
2116 INIT_LIST_HEAD(&clp->cl_revoked);
2118 INIT_LIST_HEAD(&clp->cl_lo_states);
2120 INIT_LIST_HEAD(&clp->async_copies);
2121 spin_lock_init(&clp->async_lock);
2122 spin_lock_init(&clp->cl_lock);
2123 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
2124 return clp;
2126 kfree(clp->cl_name.data);
2128 kmem_cache_free(client_slab, clp);
2135 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
2137 free_svc_cred(&clp->cl_cred);
2138 kfree(clp->cl_ownerstr_hashtbl);
2139 kfree(clp->cl_name.data);
2140 kfree(clp->cl_nii_domain.data);
2141 kfree(clp->cl_nii_name.data);
2142 idr_destroy(&clp->cl_stateids);
2143 kfree(clp->cl_ra);
2144 kmem_cache_free(client_slab, clp);
2147 static void drop_client(struct nfs4_client *clp)
2149 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2153 free_client(struct nfs4_client *clp)
2155 while (!list_empty(&clp->cl_sessions)) {
2157 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2163 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2164 if (clp->cl_nfsd_dentry) {
2165 nfsd_client_rmdir(clp->cl_nfsd_dentry);
2166 clp->cl_nfsd_dentry = NULL;
2169 drop_client(clp);
2174 unhash_client_locked(struct nfs4_client *clp)
2176 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2182 clp->cl_time = 0;
2184 if (!list_empty(&clp->cl_idhash)) {
2185 list_del_init(&clp->cl_idhash);
2186 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2187 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2189 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2191 list_del_init(&clp->cl_lru);
2192 spin_lock(&clp->cl_lock);
2193 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2195 spin_unlock(&clp->cl_lock);
2199 unhash_client(struct nfs4_client *clp)
2201 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2204 unhash_client_locked(clp);
2208 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2210 if (atomic_read(&clp->cl_rpc_users))
2212 unhash_client_locked(clp);
2217 __destroy_client(struct nfs4_client *clp)
2219 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2227 while (!list_empty(&clp->cl_delegations)) {
2228 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2238 while (!list_empty(&clp->cl_revoked)) {
2239 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2243 while (!list_empty(&clp->cl_openowners)) {
2244 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2251 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2258 nfsd4_return_all_client_layouts(clp);
2259 nfsd4_shutdown_copy(clp);
2260 nfsd4_shutdown_callback(clp);
2261 if (clp->cl_cb_conn.cb_xprt)
2262 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2264 nfsd4_dec_courtesy_client_count(nn, clp);
2265 free_client(clp);
2270 destroy_client(struct nfs4_client *clp)
2272 unhash_client(clp);
2273 __destroy_client(clp);
2276 static void inc_reclaim_complete(struct nfs4_client *clp)
2278 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2282 if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2287 clp->net->ns.inum);
2292 static void expire_client(struct nfs4_client *clp)
2294 unhash_client(clp);
2295 nfsd4_client_record_remove(clp);
2296 __destroy_client(clp);
2429 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2439 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2442 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2444 clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2445 clp->cl_clientid.cl_id = nn->clientid_counter++;
2446 gen_confirm(clp, nn);
2511 struct nfs4_client *clp;
2514 clp = get_nfsdfs_clp(inode);
2515 if (!clp)
2517 memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2519 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2521 if (clp->cl_state == NFSD4_COURTESY)
2523 else if (clp->cl_state == NFSD4_EXPIRABLE)
2525 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2530 ktime_get_boottime_seconds() - clp->cl_time);
2532 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2533 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2534 if (clp->cl_nii_domain.data) {
2536 seq_quote_mem(m, clp->cl_nii_domain.data,
2537 clp->cl_nii_domain.len);
2539 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2541 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2543 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
2544 seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
2545 drop_client(clp);
2553 __acquires(&clp->cl_lock)
2555 struct nfs4_client *clp = s->private;
2559 spin_lock(&clp->cl_lock);
2560 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2567 struct nfs4_client *clp = s->private;
2573 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2579 __releases(&clp->cl_lock)
2581 struct nfs4_client *clp = s->private;
2583 spin_unlock(&clp->cl_lock);
2778 struct nfs4_client *clp;
2781 clp = get_nfsdfs_clp(inode);
2782 if (!clp)
2789 s->private = clp;
2796 struct nfs4_client *clp = m->private;
2799 drop_client(clp);
2816 static void force_expire_client(struct nfs4_client *clp)
2818 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2821 trace_nfsd_clid_admin_expired(&clp->cl_clientid);
2824 clp->cl_time = 0;
2827 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2829 already_expired = list_empty(&clp->cl_lru);
2831 unhash_client_locked(clp);
2835 expire_client(clp);
2837 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2844 struct nfs4_client *clp;
2851 clp = get_nfsdfs_clp(file_inode(file));
2852 if (!clp)
2854 force_expire_client(clp);
2855 drop_client(clp);
2888 struct nfs4_client *clp = cb->cb_clp;
2889 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2892 clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
2893 put_client_renew_locked(clp);
2905 struct nfs4_client *clp;
2912 clp = alloc_client(name, nn);
2913 if (clp == NULL)
2916 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2918 free_client(clp);
2921 gen_clid(clp, nn);
2922 kref_init(&clp->cl_nfsdfs.cl_ref);
2923 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2924 clp->cl_time = ktime_get_boottime_seconds();
2925 clear_bit(0, &clp->cl_cb_slot_busy);
2926 copy_verf(clp, verf);
2927 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
2928 clp->cl_cb_session = NULL;
2929 clp->net = net;
2930 clp->cl_nfsd_dentry = nfsd_client_mkdir(
2931 nn, &clp->cl_nfsdfs,
2932 clp->cl_clientid.cl_id - nn->clientid_base,
2934 clp->cl_nfsd_info_dentry = dentries[0];
2935 if (!clp->cl_nfsd_dentry) {
2936 free_client(clp);
2939 clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL);
2940 if (!clp->cl_ra) {
2941 free_client(clp);
2944 clp->cl_ra_time = 0;
2945 nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops,
2947 return clp;
2954 struct nfs4_client *clp;
2957 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2960 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2975 struct nfs4_client *clp;
2978 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2979 cmp = compare_blob(&clp->cl_name, name);
2985 return clp;
2991 add_to_unconfirmed(struct nfs4_client *clp)
2994 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2998 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2999 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
3000 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3001 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
3002 renew_client_locked(clp);
3006 move_to_confirmed(struct nfs4_client *clp)
3008 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3009 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3013 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
3014 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
3015 add_clp_to_name_tree(clp, &nn->conf_name_tree);
3016 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3017 trace_nfsd_clid_confirmed(&clp->cl_clientid);
3018 renew_client_locked(clp);
3024 struct nfs4_client *clp;
3027 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
3028 if (same_clid(&clp->cl_clientid, clid)) {
3029 if ((bool)clp->cl_minorversion != sessions)
3031 renew_client_locked(clp);
3032 return clp;
3056 static bool clp_used_exchangeid(struct nfs4_client *clp)
3058 return clp->cl_exchange_flags != 0;
3076 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
3078 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
3093 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
3107 trace_nfsd_cb_args(clp, conn);
3112 trace_nfsd_cb_nodelegs(clp);
3233 static bool client_has_openowners(struct nfs4_client *clp)
3237 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3244 static bool client_has_state(struct nfs4_client *clp)
3246 return client_has_openowners(clp)
3248 || !list_empty(&clp->cl_lo_states)
3250 || !list_empty(&clp->cl_delegations)
3251 || !list_empty(&clp->cl_sessions)
3252 || !list_empty(&clp->async_copies);
3255 static __be32 copy_impl_id(struct nfs4_client *clp,
3260 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3261 if (!clp->cl_nii_domain.data)
3263 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3264 if (!clp->cl_nii_name.data)
3266 clp->cl_nii_time = exid->nii_time;
3747 struct nfs4_client *clp = session->se_client;
3753 spin_lock(&clp->cl_lock);
3767 spin_unlock(&clp->cl_lock);
3872 struct nfs4_client *clp = ses->se_client;
3877 spin_lock(&clp->cl_lock);
3882 if (clp->cl_mach_cred)
3885 spin_unlock(&clp->cl_lock);
3892 spin_unlock(&clp->cl_lock);
3953 struct nfs4_client *clp;
3976 clp = session->se_client;
4009 cstate->clp = clp;
4044 cstate->clp = clp;
4047 switch (clp->cl_cb_state) {
4057 if (!list_empty(&clp->cl_revoked))
4081 } else if (cs->clp)
4082 put_client_renew(cs->clp);
4092 struct nfs4_client *clp = NULL;
4109 clp = conf;
4111 clp = unconf;
4116 if (!nfsd4_mach_creds_match(clp, rqstp)) {
4117 clp = NULL;
4121 trace_nfsd_clid_destroyed(&clp->cl_clientid);
4122 unhash_client_locked(clp);
4125 if (clp)
4126 expire_client(clp);
4135 struct nfs4_client *clp = cstate->clp;
4149 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
4153 if (is_client_expired(clp))
4164 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
4165 nfsd4_client_record_create(clp);
4166 inc_reclaim_complete(clp);
4472 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4487 sop->so_client = clp;
4493 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4495 lockdep_assert_held(&clp->cl_lock);
4498 &clp->cl_ownerstr_hashtbl[strhashval]);
4499 list_add(&oo->oo_perclient, &clp->cl_openowners);
4593 struct nfs4_client *clp = cstate->clp;
4596 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4608 spin_lock(&clp->cl_lock);
4609 ret = find_openstateowner_str_locked(strhashval, open, clp);
4611 hash_openowner(oo, clp, strhashval);
4616 spin_unlock(&clp->cl_lock);
4928 struct nfs4_client *clp = dp->dl_stid.sc_client;
4934 atomic_inc(&clp->cl_delegs_in_recall);
4935 if (try_to_expire_client(clp)) {
4936 nn = net_generic(clp->net, nfsd_net_id);
4964 struct nfs4_client *clp;
4972 clp = *(rqst->rq_lease_breaker);
4973 return dl->dl_stid.sc_client == clp;
4981 struct nfs4_client *clp = dp->dl_stid.sc_client;
4985 atomic_dec(&clp->cl_delegs_in_recall);
5025 if (cstate->clp) {
5026 if (!same_clid(&cstate->clp->cl_clientid, clid))
5034 * set cstate->clp), so session = false:
5036 cstate->clp = lookup_clientid(clid, false, nn);
5037 if (!cstate->clp)
5047 struct nfs4_client *clp = NULL;
5063 clp = cstate->clp;
5066 oo = find_openstateowner_str(strhashval, open, clp);
5087 open->op_stp = nfs4_alloc_open_stateid(clp);
5093 open->op_odstate = alloc_clnt_odstate(clp);
5319 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
5321 if (clp->cl_cb_state == NFSD4_CB_UP)
5328 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
5349 static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
5393 st->st_stid.sc_client != clp) {
5458 struct nfs4_client *clp = stp->st_stid.sc_client;
5505 if (nfs4_delegation_exists(clp, fp))
5525 dp = alloc_init_deleg(clp, fp, odstate, dl_type);
5545 status = nfsd4_check_conflicting_opens(clp, fp);
5632 struct nfs4_client *clp = stp->st_stid.sc_client;
5653 if (locks_in_grace(clp->net))
5658 !clp->cl_minorversion)
5856 struct nfs4_client *clp;
5864 clp = cstate->clp;
5865 if (!list_empty(&clp->cl_delegations)
5866 && clp->cl_cb_state != NFSD4_CB_UP)
6028 nfs4_anylock_blockers(struct nfs4_client *clp)
6034 if (atomic_read(&clp->cl_delegs_in_recall))
6036 spin_lock(&clp->cl_lock);
6038 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i],
6044 spin_unlock(&clp->cl_lock);
6049 spin_unlock(&clp->cl_lock);
6059 struct nfs4_client *clp;
6066 clp = list_entry(pos, struct nfs4_client, cl_lru);
6067 if (clp->cl_state == NFSD4_EXPIRABLE)
6069 if (!state_expired(lt, clp->cl_time))
6071 if (!atomic_read(&clp->cl_rpc_users)) {
6072 if (clp->cl_state == NFSD4_ACTIVE)
6074 clp->cl_state = NFSD4_COURTESY;
6076 if (!client_has_state(clp))
6078 if (!nfs4_anylock_blockers(clp))
6082 if (!mark_client_expired_locked(clp)) {
6083 list_add(&clp->cl_lru, reaplist);
6096 struct nfs4_client *clp;
6103 clp = list_entry(pos, struct nfs4_client, cl_lru);
6104 if (clp->cl_state == NFSD4_ACTIVE)
6108 if (!mark_client_expired_locked(clp)) {
6109 list_add(&clp->cl_lru, reaplist);
6120 struct nfs4_client *clp;
6123 clp = list_entry(pos, struct nfs4_client, cl_lru);
6124 trace_nfsd_clid_purged(&clp->cl_clientid);
6125 list_del_init(&clp->cl_lru);
6126 expire_client(clp);
6258 struct nfs4_client *clp;
6264 clp = list_entry(pos, struct nfs4_client, cl_lru);
6265 if (clp->cl_state != NFSD4_ACTIVE ||
6266 list_empty(&clp->cl_delegations) ||
6267 atomic_read(&clp->cl_delegs_in_recall) ||
6268 test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) ||
6270 clp->cl_ra_time < 5)) {
6273 list_add(&clp->cl_ra_cblist, &cblist);
6276 atomic_inc(&clp->cl_rpc_users);
6277 set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
6278 clp->cl_ra_time = ktime_get_boottime_seconds();
6283 clp = list_first_entry(&cblist, struct nfs4_client,
6285 list_del_init(&clp->cl_ra_cblist);
6286 clp->cl_ra->ra_keep = 0;
6287 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG);
6288 trace_nfsd_cb_recall_any(clp->cl_ra);
6289 nfsd4_run_cb(&clp->cl_ra->ra_cb);
6459 stid = find_stateid_by_type(cstate->clp, stateid, typemask);
6550 struct nfs4_client *clp,
6567 if (!clp)
6576 if (!clp && state)
6706 struct nfs4_client *cl = cstate->clp;
6752 struct nfs4_client *cl = cstate->clp;
6994 struct nfs4_client *clp = s->st_stid.sc_client;
6999 spin_lock(&clp->cl_lock);
7002 if (clp->cl_minorversion) {
7005 spin_unlock(&clp->cl_lock);
7007 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid);
7010 spin_unlock(&clp->cl_lock);
7013 move_to_close_lru(s, clp->net);
7151 struct nfs4_client *clp = lo->lo_owner.so_client;
7154 if (try_to_expire_client(clp)) {
7155 nn = net_generic(clp->net, nfsd_net_id);
7233 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
7238 lockdep_assert_held(&clp->cl_lock);
7240 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
7251 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
7255 spin_lock(&clp->cl_lock);
7256 lo = find_lockowner_str_locked(clp, owner);
7257 spin_unlock(&clp->cl_lock);
7286 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
7292 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
7300 spin_lock(&clp->cl_lock);
7301 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
7304 &clp->cl_ownerstr_hashtbl[strhashval]);
7309 spin_unlock(&clp->cl_lock);
7337 struct nfs4_client *clp = lo->lo_owner.so_client;
7343 spin_lock(&clp->cl_lock);
7362 spin_unlock(&clp->cl_lock);
7365 spin_unlock(&clp->cl_lock);
7374 spin_unlock(&clp->cl_lock);
7387 struct nfs4_client *clp = oo->oo_owner.so_client;
7390 spin_lock(&clp->cl_lock);
7392 spin_unlock(&clp->cl_lock);
7398 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
7515 &cstate->clp->cl_clientid,
7785 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
7947 struct nfs4_client *clp;
7957 clp = cstate->clp;
7959 spin_lock(&clp->cl_lock);
7960 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner);
7962 spin_unlock(&clp->cl_lock);
7968 spin_unlock(&clp->cl_lock);
7981 spin_unlock(&clp->cl_lock);
8076 nfs4_check_open_reclaim(struct nfs4_client *clp)
8078 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
8081 if (nfsd4_client_record_check(clp))
8178 struct nfs4_client *clp = NULL;
8183 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8184 destroy_client(clp);
8192 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8193 destroy_client(clp);