xref: /kernel/linux/linux-5.10/net/sched/sch_atm.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */
3
4/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
5
6#include <linux/module.h>
7#include <linux/slab.h>
8#include <linux/init.h>
9#include <linux/interrupt.h>
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/skbuff.h>
13#include <linux/atmdev.h>
14#include <linux/atmclip.h>
15#include <linux/rtnetlink.h>
16#include <linux/file.h>		/* for fput */
17#include <net/netlink.h>
18#include <net/pkt_sched.h>
19#include <net/pkt_cls.h>
20
21/*
22 * The ATM queuing discipline provides a framework for invoking classifiers
23 * (aka "filters"), which in turn select classes of this queuing discipline.
24 * Each class maps the flow(s) it is handling to a given VC. Multiple classes
25 * may share the same VC.
26 *
27 * When creating a class, VCs are specified by passing the number of the open
28 * socket descriptor by which the calling process references the VC. The kernel
29 * keeps the VC open at least until all classes using it are removed.
30 *
31 * In this file, most functions are named atm_tc_* to avoid confusion with all
32 * the atm_* in net/atm. This naming convention differs from what's used in the
33 * rest of net/sched.
34 *
35 * Known bugs:
36 *  - sometimes messes up the IP stack
37 *  - any manipulations besides the few operations described in the README, are
38 *    untested and likely to crash the system
39 *  - should lock the flow while there is data in the queue (?)
40 */
41
42#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
43
44struct atm_flow_data {
45	struct Qdisc_class_common common;
46	struct Qdisc		*q;	/* FIFO, TBF, etc. */
47	struct tcf_proto __rcu	*filter_list;
48	struct tcf_block	*block;
49	struct atm_vcc		*vcc;	/* VCC; NULL if VCC is closed */
50	void			(*old_pop)(struct atm_vcc *vcc,
51					   struct sk_buff *skb); /* chaining */
52	struct atm_qdisc_data	*parent;	/* parent qdisc */
53	struct socket		*sock;		/* for closing */
54	int			ref;		/* reference count */
55	struct gnet_stats_basic_packed	bstats;
56	struct gnet_stats_queue	qstats;
57	struct list_head	list;
58	struct atm_flow_data	*excess;	/* flow for excess traffic;
59						   NULL to set CLP instead */
60	int			hdr_len;
61	unsigned char		hdr[];		/* header data; MUST BE LAST */
62};
63
64struct atm_qdisc_data {
65	struct atm_flow_data	link;		/* unclassified skbs go here */
66	struct list_head	flows;		/* NB: "link" is also on this
67						   list */
68	struct tasklet_struct	task;		/* dequeue tasklet */
69};
70
71/* ------------------------- Class/flow operations ------------------------- */
72
73static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
74{
75	struct atm_qdisc_data *p = qdisc_priv(sch);
76	struct atm_flow_data *flow;
77
78	list_for_each_entry(flow, &p->flows, list) {
79		if (flow->common.classid == classid)
80			return flow;
81	}
82	return NULL;
83}
84
85static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
86			struct Qdisc *new, struct Qdisc **old,
87			struct netlink_ext_ack *extack)
88{
89	struct atm_qdisc_data *p = qdisc_priv(sch);
90	struct atm_flow_data *flow = (struct atm_flow_data *)arg;
91
92	pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
93		sch, p, flow, new, old);
94	if (list_empty(&flow->list))
95		return -EINVAL;
96	if (!new)
97		new = &noop_qdisc;
98	*old = flow->q;
99	flow->q = new;
100	if (*old)
101		qdisc_reset(*old);
102	return 0;
103}
104
105static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
106{
107	struct atm_flow_data *flow = (struct atm_flow_data *)cl;
108
109	pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
110	return flow ? flow->q : NULL;
111}
112
113static unsigned long atm_tc_find(struct Qdisc *sch, u32 classid)
114{
115	struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
116	struct atm_flow_data *flow;
117
118	pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
119	flow = lookup_flow(sch, classid);
120	pr_debug("%s: flow %p\n", __func__, flow);
121	return (unsigned long)flow;
122}
123
124static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
125					unsigned long parent, u32 classid)
126{
127	struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
128	struct atm_flow_data *flow;
129
130	pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
131	flow = lookup_flow(sch, classid);
132	if (flow)
133		flow->ref++;
134	pr_debug("%s: flow %p\n", __func__, flow);
135	return (unsigned long)flow;
136}
137
138/*
139 * atm_tc_put handles all destructions, including the ones that are explicitly
140 * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
141 * anything that still seems to be in use.
142 */
143static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
144{
145	struct atm_qdisc_data *p = qdisc_priv(sch);
146	struct atm_flow_data *flow = (struct atm_flow_data *)cl;
147
148	pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
149	if (--flow->ref)
150		return;
151	pr_debug("atm_tc_put: destroying\n");
152	list_del_init(&flow->list);
153	pr_debug("atm_tc_put: qdisc %p\n", flow->q);
154	qdisc_put(flow->q);
155	tcf_block_put(flow->block);
156	if (flow->sock) {
157		pr_debug("atm_tc_put: f_count %ld\n",
158			file_count(flow->sock->file));
159		flow->vcc->pop = flow->old_pop;
160		sockfd_put(flow->sock);
161	}
162	if (flow->excess)
163		atm_tc_put(sch, (unsigned long)flow->excess);
164	if (flow != &p->link)
165		kfree(flow);
166	/*
167	 * If flow == &p->link, the qdisc no longer works at this point and
168	 * needs to be removed. (By the caller of atm_tc_put.)
169	 */
170}
171
172static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
173{
174	struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
175
176	pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
177	VCC2FLOW(vcc)->old_pop(vcc, skb);
178	tasklet_schedule(&p->task);
179}
180
181static const u8 llc_oui_ip[] = {
182	0xaa,			/* DSAP: non-ISO */
183	0xaa,			/* SSAP: non-ISO */
184	0x03,			/* Ctrl: Unnumbered Information Command PDU */
185	0x00,			/* OUI: EtherType */
186	0x00, 0x00,
187	0x08, 0x00
188};				/* Ethertype IP (0800) */
189
190static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
191	[TCA_ATM_FD]		= { .type = NLA_U32 },
192	[TCA_ATM_EXCESS]	= { .type = NLA_U32 },
193};
194
195static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
196			 struct nlattr **tca, unsigned long *arg,
197			 struct netlink_ext_ack *extack)
198{
199	struct atm_qdisc_data *p = qdisc_priv(sch);
200	struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
201	struct atm_flow_data *excess = NULL;
202	struct nlattr *opt = tca[TCA_OPTIONS];
203	struct nlattr *tb[TCA_ATM_MAX + 1];
204	struct socket *sock;
205	int fd, error, hdr_len;
206	void *hdr;
207
208	pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
209		"flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
210	/*
211	 * The concept of parents doesn't apply for this qdisc.
212	 */
213	if (parent && parent != TC_H_ROOT && parent != sch->handle)
214		return -EINVAL;
215	/*
216	 * ATM classes cannot be changed. In order to change properties of the
217	 * ATM connection, that socket needs to be modified directly (via the
218	 * native ATM API. In order to send a flow to a different VC, the old
219	 * class needs to be removed and a new one added. (This may be changed
220	 * later.)
221	 */
222	if (flow)
223		return -EBUSY;
224	if (opt == NULL)
225		return -EINVAL;
226
227	error = nla_parse_nested_deprecated(tb, TCA_ATM_MAX, opt, atm_policy,
228					    NULL);
229	if (error < 0)
230		return error;
231
232	if (!tb[TCA_ATM_FD])
233		return -EINVAL;
234	fd = nla_get_u32(tb[TCA_ATM_FD]);
235	pr_debug("atm_tc_change: fd %d\n", fd);
236	if (tb[TCA_ATM_HDR]) {
237		hdr_len = nla_len(tb[TCA_ATM_HDR]);
238		hdr = nla_data(tb[TCA_ATM_HDR]);
239	} else {
240		hdr_len = RFC1483LLC_LEN;
241		hdr = NULL;	/* default LLC/SNAP for IP */
242	}
243	if (!tb[TCA_ATM_EXCESS])
244		excess = NULL;
245	else {
246		excess = (struct atm_flow_data *)
247			atm_tc_find(sch, nla_get_u32(tb[TCA_ATM_EXCESS]));
248		if (!excess)
249			return -ENOENT;
250	}
251	pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n",
252		 opt->nla_type, nla_len(opt), hdr_len);
253	sock = sockfd_lookup(fd, &error);
254	if (!sock)
255		return error;	/* f_count++ */
256	pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file));
257	if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
258		error = -EPROTOTYPE;
259		goto err_out;
260	}
261	/* @@@ should check if the socket is really operational or we'll crash
262	   on vcc->send */
263	if (classid) {
264		if (TC_H_MAJ(classid ^ sch->handle)) {
265			pr_debug("atm_tc_change: classid mismatch\n");
266			error = -EINVAL;
267			goto err_out;
268		}
269	} else {
270		int i;
271		unsigned long cl;
272
273		for (i = 1; i < 0x8000; i++) {
274			classid = TC_H_MAKE(sch->handle, 0x8000 | i);
275			cl = atm_tc_find(sch, classid);
276			if (!cl)
277				break;
278		}
279	}
280	pr_debug("atm_tc_change: new id %x\n", classid);
281	flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
282	pr_debug("atm_tc_change: flow %p\n", flow);
283	if (!flow) {
284		error = -ENOBUFS;
285		goto err_out;
286	}
287
288	error = tcf_block_get(&flow->block, &flow->filter_list, sch,
289			      extack);
290	if (error) {
291		kfree(flow);
292		goto err_out;
293	}
294
295	flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
296				    extack);
297	if (!flow->q)
298		flow->q = &noop_qdisc;
299	pr_debug("atm_tc_change: qdisc %p\n", flow->q);
300	flow->sock = sock;
301	flow->vcc = ATM_SD(sock);	/* speedup */
302	flow->vcc->user_back = flow;
303	pr_debug("atm_tc_change: vcc %p\n", flow->vcc);
304	flow->old_pop = flow->vcc->pop;
305	flow->parent = p;
306	flow->vcc->pop = sch_atm_pop;
307	flow->common.classid = classid;
308	flow->ref = 1;
309	flow->excess = excess;
310	list_add(&flow->list, &p->link.list);
311	flow->hdr_len = hdr_len;
312	if (hdr)
313		memcpy(flow->hdr, hdr, hdr_len);
314	else
315		memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
316	*arg = (unsigned long)flow;
317	return 0;
318err_out:
319	sockfd_put(sock);
320	return error;
321}
322
323static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
324{
325	struct atm_qdisc_data *p = qdisc_priv(sch);
326	struct atm_flow_data *flow = (struct atm_flow_data *)arg;
327
328	pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
329	if (list_empty(&flow->list))
330		return -EINVAL;
331	if (rcu_access_pointer(flow->filter_list) || flow == &p->link)
332		return -EBUSY;
333	/*
334	 * Reference count must be 2: one for "keepalive" (set at class
335	 * creation), and one for the reference held when calling delete.
336	 */
337	if (flow->ref < 2) {
338		pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
339		return -EINVAL;
340	}
341	if (flow->ref > 2)
342		return -EBUSY;	/* catch references via excess, etc. */
343	atm_tc_put(sch, arg);
344	return 0;
345}
346
347static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
348{
349	struct atm_qdisc_data *p = qdisc_priv(sch);
350	struct atm_flow_data *flow;
351
352	pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
353	if (walker->stop)
354		return;
355	list_for_each_entry(flow, &p->flows, list) {
356		if (walker->count >= walker->skip &&
357		    walker->fn(sch, (unsigned long)flow, walker) < 0) {
358			walker->stop = 1;
359			break;
360		}
361		walker->count++;
362	}
363}
364
365static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
366					  struct netlink_ext_ack *extack)
367{
368	struct atm_qdisc_data *p = qdisc_priv(sch);
369	struct atm_flow_data *flow = (struct atm_flow_data *)cl;
370
371	pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
372	return flow ? flow->block : p->link.block;
373}
374
375/* --------------------------- Qdisc operations ---------------------------- */
376
377static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
378			  struct sk_buff **to_free)
379{
380	struct atm_qdisc_data *p = qdisc_priv(sch);
381	struct atm_flow_data *flow;
382	struct tcf_result res;
383	int result;
384	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
385
386	pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
387	result = TC_ACT_OK;	/* be nice to gcc */
388	flow = NULL;
389	if (TC_H_MAJ(skb->priority) != sch->handle ||
390	    !(flow = (struct atm_flow_data *)atm_tc_find(sch, skb->priority))) {
391		struct tcf_proto *fl;
392
393		list_for_each_entry(flow, &p->flows, list) {
394			fl = rcu_dereference_bh(flow->filter_list);
395			if (fl) {
396				result = tcf_classify(skb, fl, &res, true);
397				if (result < 0)
398					continue;
399				if (result == TC_ACT_SHOT)
400					goto done;
401
402				flow = (struct atm_flow_data *)res.class;
403				if (!flow)
404					flow = lookup_flow(sch, res.classid);
405				goto drop;
406			}
407		}
408		flow = NULL;
409done:
410		;
411	}
412	if (!flow) {
413		flow = &p->link;
414	} else {
415		if (flow->vcc)
416			ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
417		/*@@@ looks good ... but it's not supposed to work :-) */
418#ifdef CONFIG_NET_CLS_ACT
419		switch (result) {
420		case TC_ACT_QUEUED:
421		case TC_ACT_STOLEN:
422		case TC_ACT_TRAP:
423			__qdisc_drop(skb, to_free);
424			return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
425		case TC_ACT_SHOT:
426			__qdisc_drop(skb, to_free);
427			goto drop;
428		case TC_ACT_RECLASSIFY:
429			if (flow->excess)
430				flow = flow->excess;
431			else
432				ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
433			break;
434		}
435#endif
436	}
437
438	ret = qdisc_enqueue(skb, flow->q, to_free);
439	if (ret != NET_XMIT_SUCCESS) {
440drop: __maybe_unused
441		if (net_xmit_drop_count(ret)) {
442			qdisc_qstats_drop(sch);
443			if (flow)
444				flow->qstats.drops++;
445		}
446		return ret;
447	}
448	/*
449	 * Okay, this may seem weird. We pretend we've dropped the packet if
450	 * it goes via ATM. The reason for this is that the outer qdisc
451	 * expects to be able to q->dequeue the packet later on if we return
452	 * success at this place. Also, sch->q.qdisc needs to reflect whether
453	 * there is a packet egligible for dequeuing or not. Note that the
454	 * statistics of the outer qdisc are necessarily wrong because of all
455	 * this. There's currently no correct solution for this.
456	 */
457	if (flow == &p->link) {
458		sch->q.qlen++;
459		return NET_XMIT_SUCCESS;
460	}
461	tasklet_schedule(&p->task);
462	return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
463}
464
465/*
466 * Dequeue packets and send them over ATM. Note that we quite deliberately
467 * avoid checking net_device's flow control here, simply because sch_atm
468 * uses its own channels, which have nothing to do with any CLIP/LANE/or
469 * non-ATM interfaces.
470 */
471
472static void sch_atm_dequeue(unsigned long data)
473{
474	struct Qdisc *sch = (struct Qdisc *)data;
475	struct atm_qdisc_data *p = qdisc_priv(sch);
476	struct atm_flow_data *flow;
477	struct sk_buff *skb;
478
479	pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
480	list_for_each_entry(flow, &p->flows, list) {
481		if (flow == &p->link)
482			continue;
483		/*
484		 * If traffic is properly shaped, this won't generate nasty
485		 * little bursts. Otherwise, it may ... (but that's okay)
486		 */
487		while ((skb = flow->q->ops->peek(flow->q))) {
488			if (!atm_may_send(flow->vcc, skb->truesize))
489				break;
490
491			skb = qdisc_dequeue_peeked(flow->q);
492			if (unlikely(!skb))
493				break;
494
495			qdisc_bstats_update(sch, skb);
496			bstats_update(&flow->bstats, skb);
497			pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
498			/* remove any LL header somebody else has attached */
499			skb_pull(skb, skb_network_offset(skb));
500			if (skb_headroom(skb) < flow->hdr_len) {
501				struct sk_buff *new;
502
503				new = skb_realloc_headroom(skb, flow->hdr_len);
504				dev_kfree_skb(skb);
505				if (!new)
506					continue;
507				skb = new;
508			}
509			pr_debug("sch_atm_dequeue: ip %p, data %p\n",
510				 skb_network_header(skb), skb->data);
511			ATM_SKB(skb)->vcc = flow->vcc;
512			memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
513			       flow->hdr_len);
514			refcount_add(skb->truesize,
515				   &sk_atm(flow->vcc)->sk_wmem_alloc);
516			/* atm.atm_options are already set by atm_tc_enqueue */
517			flow->vcc->send(flow->vcc, skb);
518		}
519	}
520}
521
522static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
523{
524	struct atm_qdisc_data *p = qdisc_priv(sch);
525	struct sk_buff *skb;
526
527	pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
528	tasklet_schedule(&p->task);
529	skb = qdisc_dequeue_peeked(p->link.q);
530	if (skb)
531		sch->q.qlen--;
532	return skb;
533}
534
535static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
536{
537	struct atm_qdisc_data *p = qdisc_priv(sch);
538
539	pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p);
540
541	return p->link.q->ops->peek(p->link.q);
542}
543
544static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
545		       struct netlink_ext_ack *extack)
546{
547	struct atm_qdisc_data *p = qdisc_priv(sch);
548	int err;
549
550	pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
551	INIT_LIST_HEAD(&p->flows);
552	INIT_LIST_HEAD(&p->link.list);
553	list_add(&p->link.list, &p->flows);
554	p->link.q = qdisc_create_dflt(sch->dev_queue,
555				      &pfifo_qdisc_ops, sch->handle, extack);
556	if (!p->link.q)
557		p->link.q = &noop_qdisc;
558	pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
559	p->link.vcc = NULL;
560	p->link.sock = NULL;
561	p->link.common.classid = sch->handle;
562	p->link.ref = 1;
563
564	err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
565			    extack);
566	if (err)
567		return err;
568
569	tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
570	return 0;
571}
572
573static void atm_tc_reset(struct Qdisc *sch)
574{
575	struct atm_qdisc_data *p = qdisc_priv(sch);
576	struct atm_flow_data *flow;
577
578	pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
579	list_for_each_entry(flow, &p->flows, list)
580		qdisc_reset(flow->q);
581}
582
583static void atm_tc_destroy(struct Qdisc *sch)
584{
585	struct atm_qdisc_data *p = qdisc_priv(sch);
586	struct atm_flow_data *flow, *tmp;
587
588	pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
589	list_for_each_entry(flow, &p->flows, list) {
590		tcf_block_put(flow->block);
591		flow->block = NULL;
592	}
593
594	list_for_each_entry_safe(flow, tmp, &p->flows, list) {
595		if (flow->ref > 1)
596			pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
597		atm_tc_put(sch, (unsigned long)flow);
598	}
599	tasklet_kill(&p->task);
600}
601
602static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
603			     struct sk_buff *skb, struct tcmsg *tcm)
604{
605	struct atm_qdisc_data *p = qdisc_priv(sch);
606	struct atm_flow_data *flow = (struct atm_flow_data *)cl;
607	struct nlattr *nest;
608
609	pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
610		sch, p, flow, skb, tcm);
611	if (list_empty(&flow->list))
612		return -EINVAL;
613	tcm->tcm_handle = flow->common.classid;
614	tcm->tcm_info = flow->q->handle;
615
616	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
617	if (nest == NULL)
618		goto nla_put_failure;
619
620	if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
621		goto nla_put_failure;
622	if (flow->vcc) {
623		struct sockaddr_atmpvc pvc;
624		int state;
625
626		memset(&pvc, 0, sizeof(pvc));
627		pvc.sap_family = AF_ATMPVC;
628		pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
629		pvc.sap_addr.vpi = flow->vcc->vpi;
630		pvc.sap_addr.vci = flow->vcc->vci;
631		if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
632			goto nla_put_failure;
633		state = ATM_VF2VS(flow->vcc->flags);
634		if (nla_put_u32(skb, TCA_ATM_STATE, state))
635			goto nla_put_failure;
636	}
637	if (flow->excess) {
638		if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->common.classid))
639			goto nla_put_failure;
640	} else {
641		if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
642			goto nla_put_failure;
643	}
644	return nla_nest_end(skb, nest);
645
646nla_put_failure:
647	nla_nest_cancel(skb, nest);
648	return -1;
649}
650static int
651atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
652			struct gnet_dump *d)
653{
654	struct atm_flow_data *flow = (struct atm_flow_data *)arg;
655
656	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
657				  d, NULL, &flow->bstats) < 0 ||
658	    gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
659		return -1;
660
661	return 0;
662}
663
664static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
665{
666	return 0;
667}
668
669static const struct Qdisc_class_ops atm_class_ops = {
670	.graft		= atm_tc_graft,
671	.leaf		= atm_tc_leaf,
672	.find		= atm_tc_find,
673	.change		= atm_tc_change,
674	.delete		= atm_tc_delete,
675	.walk		= atm_tc_walk,
676	.tcf_block	= atm_tc_tcf_block,
677	.bind_tcf	= atm_tc_bind_filter,
678	.unbind_tcf	= atm_tc_put,
679	.dump		= atm_tc_dump_class,
680	.dump_stats	= atm_tc_dump_class_stats,
681};
682
683static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
684	.cl_ops		= &atm_class_ops,
685	.id		= "atm",
686	.priv_size	= sizeof(struct atm_qdisc_data),
687	.enqueue	= atm_tc_enqueue,
688	.dequeue	= atm_tc_dequeue,
689	.peek		= atm_tc_peek,
690	.init		= atm_tc_init,
691	.reset		= atm_tc_reset,
692	.destroy	= atm_tc_destroy,
693	.dump		= atm_tc_dump,
694	.owner		= THIS_MODULE,
695};
696
697static int __init atm_init(void)
698{
699	return register_qdisc(&atm_qdisc_ops);
700}
701
702static void __exit atm_exit(void)
703{
704	unregister_qdisc(&atm_qdisc_ops);
705}
706
707module_init(atm_init)
708module_exit(atm_exit)
709MODULE_LICENSE("GPL");
710