2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * 2003-10-17 - Ported from altq
12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
14 * Permission to use, copy, modify, and distribute this software and
15 * its documentation is hereby granted (including for commercial or
16 * for-profit use), provided that both the copyright notice and this
17 * permission notice appear in all copies of the software, derivative
18 * works, or modified versions, and any portions thereof.
20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
35 * Carnegie Mellon encourages (but does not require) users of this
36 * software to return any improvements or extensions that they make,
37 * and to grant Carnegie Mellon the rights to redistribute these
38 * changes without encumbrance.
41 * H-FSC is described in Proceedings of SIGCOMM'97,
42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43 * Real-Time and Priority Service"
44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47 * when a class has an upperlimit, the fit-time is computed from the
48 * upperlimit service curve. the link-sharing scheduler does not schedule
49 * a class whose fit-time exceeds the current time.
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/jiffies.h>
57 #include <linux/compiler.h>
58 #include <linux/spinlock.h>
59 #include <linux/skbuff.h>
60 #include <linux/string.h>
61 #include <linux/slab.h>
62 #include <linux/timer.h>
63 #include <linux/list.h>
64 #include <linux/rbtree.h>
65 #include <linux/init.h>
66 #include <linux/netdevice.h>
67 #include <linux/rtnetlink.h>
68 #include <linux/pkt_sched.h>
69 #include <net/pkt_sched.h>
70 #include <net/pkt_cls.h>
71 #include <asm/system.h>
72 #include <asm/div64.h>
75 * kernel internal service curve representation:
76 * coordinates are given by 64 bit unsigned integers.
77 * x-axis: unit is clock count.
78 * y-axis: unit is byte.
80 * The service curve parameters are converted to the internal
81 * representation. The slope values are scaled to avoid overflow.
82 * the inverse slope values as well as the y-projection of the 1st
83 * segment are kept in order to to avoid 64-bit divide operations
84 * that are expensive on 32-bit architectures.
89 u64 sm1; /* scaled slope of the 1st segment */
90 u64 ism1; /* scaled inverse-slope of the 1st segment */
91 u64 dx; /* the x-projection of the 1st segment */
92 u64 dy; /* the y-projection of the 1st segment */
93 u64 sm2; /* scaled slope of the 2nd segment */
94 u64 ism2; /* scaled inverse-slope of the 2nd segment */
97 /* runtime service curve */
100 u64 x; /* current starting position on x-axis */
101 u64 y; /* current starting position on y-axis */
102 u64 sm1; /* scaled slope of the 1st segment */
103 u64 ism1; /* scaled inverse-slope of the 1st segment */
104 u64 dx; /* the x-projection of the 1st segment */
105 u64 dy; /* the y-projection of the 1st segment */
106 u64 sm2; /* scaled slope of the 2nd segment */
107 u64 ism2; /* scaled inverse-slope of the 2nd segment */
110 enum hfsc_class_flags
119 u32 classid; /* class id */
120 unsigned int refcnt; /* usage count */
122 struct gnet_stats_basic bstats;
123 struct gnet_stats_queue qstats;
124 struct gnet_stats_rate_est rate_est;
125 spinlock_t *stats_lock;
126 unsigned int level; /* class level in hierarchy */
127 struct tcf_proto *filter_list; /* filter list */
128 unsigned int filter_cnt; /* filter count */
130 struct hfsc_sched *sched; /* scheduler data */
131 struct hfsc_class *cl_parent; /* parent class */
132 struct list_head siblings; /* sibling classes */
133 struct list_head children; /* child classes */
134 struct Qdisc *qdisc; /* leaf qdisc */
136 struct rb_node el_node; /* qdisc's eligible tree member */
137 struct rb_root vt_tree; /* active children sorted by cl_vt */
138 struct rb_node vt_node; /* parent's vt_tree member */
139 struct rb_root cf_tree; /* active children sorted by cl_f */
140 struct rb_node cf_node; /* parent's cf_heap member */
141 struct list_head hlist; /* hash list member */
142 struct list_head dlist; /* drop list member */
144 u64 cl_total; /* total work in bytes */
145 u64 cl_cumul; /* cumulative work in bytes done by
146 real-time criteria */
148 u64 cl_d; /* deadline*/
149 u64 cl_e; /* eligible time */
150 u64 cl_vt; /* virtual time */
151 u64 cl_f; /* time when this class will fit for
152 link-sharing, max(myf, cfmin) */
153 u64 cl_myf; /* my fit-time (calculated from this
154 class's own upperlimit curve) */
155 u64 cl_myfadj; /* my fit-time adjustment (to cancel
156 history dependence) */
157 u64 cl_cfmin; /* earliest children's fit-time (used
158 with cl_myf to obtain cl_f) */
159 u64 cl_cvtmin; /* minimal virtual time among the
160 children fit for link-sharing
161 (monotonic within a period) */
162 u64 cl_vtadj; /* intra-period cumulative vt
164 u64 cl_vtoff; /* inter-period cumulative vt offset */
165 u64 cl_cvtmax; /* max child's vt in the last period */
166 u64 cl_cvtoff; /* cumulative cvtmax of all periods */
167 u64 cl_pcvtoff; /* parent's cvtoff at initalization
170 struct internal_sc cl_rsc; /* internal real-time service curve */
171 struct internal_sc cl_fsc; /* internal fair service curve */
172 struct internal_sc cl_usc; /* internal upperlimit service curve */
173 struct runtime_sc cl_deadline; /* deadline curve */
174 struct runtime_sc cl_eligible; /* eligible curve */
175 struct runtime_sc cl_virtual; /* virtual curve */
176 struct runtime_sc cl_ulimit; /* upperlimit curve */
178 unsigned long cl_flags; /* which curves are valid */
179 unsigned long cl_vtperiod; /* vt period sequence number */
180 unsigned long cl_parentperiod;/* parent's vt period sequence number*/
181 unsigned long cl_nactive; /* number of active children */
184 #define HFSC_HSIZE 16
188 u16 defcls; /* default class id */
189 struct hfsc_class root; /* root class */
190 struct list_head clhash[HFSC_HSIZE]; /* class hash */
191 struct rb_root eligible; /* eligible tree */
192 struct list_head droplist; /* active leaf class list (for
194 struct sk_buff_head requeue; /* requeued packet */
195 struct timer_list wd_timer; /* watchdog timer */
201 #ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
202 #include <linux/time.h>
203 #undef PSCHED_GET_TIME
204 #define PSCHED_GET_TIME(stamp) \
207 do_gettimeofday(&tv); \
208 (stamp) = 1ULL * USEC_PER_SEC * tv.tv_sec + tv.tv_usec; \
212 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
216 * eligible tree holds backlogged classes being sorted by their eligible times.
217 * there is one eligible tree per hfsc instance.
221 eltree_insert(struct hfsc_class *cl)
223 struct rb_node **p = &cl->sched->eligible.rb_node;
224 struct rb_node *parent = NULL;
225 struct hfsc_class *cl1;
229 cl1 = rb_entry(parent, struct hfsc_class, el_node);
230 if (cl->cl_e >= cl1->cl_e)
231 p = &parent->rb_right;
233 p = &parent->rb_left;
235 rb_link_node(&cl->el_node, parent, p);
236 rb_insert_color(&cl->el_node, &cl->sched->eligible);
240 eltree_remove(struct hfsc_class *cl)
242 rb_erase(&cl->el_node, &cl->sched->eligible);
246 eltree_update(struct hfsc_class *cl)
252 /* find the class with the minimum deadline among the eligible classes */
253 static inline struct hfsc_class *
254 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
256 struct hfsc_class *p, *cl = NULL;
259 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
260 p = rb_entry(n, struct hfsc_class, el_node);
261 if (p->cl_e > cur_time)
263 if (cl == NULL || p->cl_d < cl->cl_d)
269 /* find the class with minimum eligible time among the eligible classes */
270 static inline struct hfsc_class *
271 eltree_get_minel(struct hfsc_sched *q)
275 n = rb_first(&q->eligible);
278 return rb_entry(n, struct hfsc_class, el_node);
282 * vttree holds holds backlogged child classes being sorted by their virtual
283 * time. each intermediate class has one vttree.
286 vttree_insert(struct hfsc_class *cl)
288 struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
289 struct rb_node *parent = NULL;
290 struct hfsc_class *cl1;
294 cl1 = rb_entry(parent, struct hfsc_class, vt_node);
295 if (cl->cl_vt >= cl1->cl_vt)
296 p = &parent->rb_right;
298 p = &parent->rb_left;
300 rb_link_node(&cl->vt_node, parent, p);
301 rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
305 vttree_remove(struct hfsc_class *cl)
307 rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
311 vttree_update(struct hfsc_class *cl)
317 static inline struct hfsc_class *
318 vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
320 struct hfsc_class *p;
323 for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
324 p = rb_entry(n, struct hfsc_class, vt_node);
325 if (p->cl_f <= cur_time)
332 * get the leaf class with the minimum vt in the hierarchy
334 static struct hfsc_class *
335 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
337 /* if root-class's cfmin is bigger than cur_time nothing to do */
338 if (cl->cl_cfmin > cur_time)
341 while (cl->level > 0) {
342 cl = vttree_firstfit(cl, cur_time);
346 * update parent's cl_cvtmin.
348 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
349 cl->cl_parent->cl_cvtmin = cl->cl_vt;
355 cftree_insert(struct hfsc_class *cl)
357 struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
358 struct rb_node *parent = NULL;
359 struct hfsc_class *cl1;
363 cl1 = rb_entry(parent, struct hfsc_class, cf_node);
364 if (cl->cl_f >= cl1->cl_f)
365 p = &parent->rb_right;
367 p = &parent->rb_left;
369 rb_link_node(&cl->cf_node, parent, p);
370 rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
374 cftree_remove(struct hfsc_class *cl)
376 rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
380 cftree_update(struct hfsc_class *cl)
387 * service curve support functions
389 * external service curve parameters
392 * internal service curve parameters
393 * sm: (bytes/psched_us) << SM_SHIFT
394 * ism: (psched_us/byte) << ISM_SHIFT
397 * Clock source resolution (CONFIG_NET_SCH_CLK_*)
398 * JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
399 * CPU: resolution is between 0.5us and 1us.
400 * GETTIMEOFDAY: resolution is exactly 1us.
402 * sm and ism are scaled in order to keep effective digits.
403 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
404 * digits in decimal using the following table.
406 * Note: We can afford the additional accuracy (altq hfsc keeps at most
407 * 3 effective digits) thanks to the fact that linux clock is bounded
410 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
411 * ------------+-------------------------------------------------------
412 * bytes/0.5us 6.25e-3 62.5e-3 625e-3 6250e-e 62500e-3
413 * bytes/us 12.5e-3 125e-3 1250e-3 12500e-3 125000e-3
414 * bytes/1.27us 15.875e-3 158.75e-3 1587.5e-3 15875e-3 158750e-3
416 * 0.5us/byte 160 16 1.6 0.16 0.016
417 * us/byte 80 8 0.8 0.08 0.008
418 * 1.27us/byte 63 6.3 0.63 0.063 0.0063
423 #define SM_MASK ((1ULL << SM_SHIFT) - 1)
424 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
427 seg_x2y(u64 x, u64 sm)
433 * y = x * sm >> SM_SHIFT
434 * but divide it for the upper and lower bits to avoid overflow
436 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
441 seg_y2x(u64 y, u64 ism)
447 else if (ism == HT_INFINITY)
450 x = (y >> ISM_SHIFT) * ism
451 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
456 /* Convert m (bps) into sm (bytes/psched us) */
462 sm = ((u64)m << SM_SHIFT);
463 sm += PSCHED_JIFFIE2US(HZ) - 1;
464 do_div(sm, PSCHED_JIFFIE2US(HZ));
468 /* convert m (bps) into ism (psched us/byte) */
477 ism = ((u64)PSCHED_JIFFIE2US(HZ) << ISM_SHIFT);
484 /* convert d (us) into dx (psched us) */
490 dx = ((u64)d * PSCHED_JIFFIE2US(HZ));
491 dx += USEC_PER_SEC - 1;
492 do_div(dx, USEC_PER_SEC);
496 /* convert sm (bytes/psched us) into m (bps) */
502 m = (sm * PSCHED_JIFFIE2US(HZ)) >> SM_SHIFT;
506 /* convert dx (psched us) into d (us) */
512 d = dx * USEC_PER_SEC;
513 do_div(d, PSCHED_JIFFIE2US(HZ));
518 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
520 isc->sm1 = m2sm(sc->m1);
521 isc->ism1 = m2ism(sc->m1);
522 isc->dx = d2dx(sc->d);
523 isc->dy = seg_x2y(isc->dx, isc->sm1);
524 isc->sm2 = m2sm(sc->m2);
525 isc->ism2 = m2ism(sc->m2);
529 * initialize the runtime service curve with the given internal
530 * service curve starting at (x, y).
533 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
537 rtsc->sm1 = isc->sm1;
538 rtsc->ism1 = isc->ism1;
541 rtsc->sm2 = isc->sm2;
542 rtsc->ism2 = isc->ism2;
546 * calculate the y-projection of the runtime service curve by the
547 * given x-projection value
550 rtsc_y2x(struct runtime_sc *rtsc, u64 y)
556 else if (y <= rtsc->y + rtsc->dy) {
557 /* x belongs to the 1st segment */
559 x = rtsc->x + rtsc->dx;
561 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
563 /* x belongs to the 2nd segment */
564 x = rtsc->x + rtsc->dx
565 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
571 rtsc_x2y(struct runtime_sc *rtsc, u64 x)
577 else if (x <= rtsc->x + rtsc->dx)
578 /* y belongs to the 1st segment */
579 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
581 /* y belongs to the 2nd segment */
582 y = rtsc->y + rtsc->dy
583 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
588 * update the runtime service curve by taking the minimum of the current
589 * runtime service curve and the service curve starting at (x, y).
592 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
597 if (isc->sm1 <= isc->sm2) {
598 /* service curve is convex */
599 y1 = rtsc_x2y(rtsc, x);
601 /* the current rtsc is smaller */
609 * service curve is concave
610 * compute the two y values of the current rtsc
614 y1 = rtsc_x2y(rtsc, x);
616 /* rtsc is below isc, no change to rtsc */
620 y2 = rtsc_x2y(rtsc, x + isc->dx);
621 if (y2 >= y + isc->dy) {
622 /* rtsc is above isc, replace rtsc by isc */
631 * the two curves intersect
632 * compute the offsets (dx, dy) using the reverse
633 * function of seg_x2y()
634 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
636 dx = (y1 - y) << SM_SHIFT;
637 dsm = isc->sm1 - isc->sm2;
640 * check if (x, y1) belongs to the 1st segment of rtsc.
641 * if so, add the offset.
643 if (rtsc->x + rtsc->dx > x)
644 dx += rtsc->x + rtsc->dx - x;
645 dy = seg_x2y(dx, isc->sm1);
655 init_ed(struct hfsc_class *cl, unsigned int next_len)
659 PSCHED_GET_TIME(cur_time);
661 /* update the deadline curve */
662 rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
665 * update the eligible curve.
666 * for concave, it is equal to the deadline curve.
667 * for convex, it is a linear curve with slope m2.
669 cl->cl_eligible = cl->cl_deadline;
670 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
671 cl->cl_eligible.dx = 0;
672 cl->cl_eligible.dy = 0;
675 /* compute e and d */
676 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
677 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
683 update_ed(struct hfsc_class *cl, unsigned int next_len)
685 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
686 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
692 update_d(struct hfsc_class *cl, unsigned int next_len)
694 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
698 update_cfmin(struct hfsc_class *cl)
700 struct rb_node *n = rb_first(&cl->cf_tree);
701 struct hfsc_class *p;
707 p = rb_entry(n, struct hfsc_class, cf_node);
708 cl->cl_cfmin = p->cl_f;
712 init_vf(struct hfsc_class *cl, unsigned int len)
714 struct hfsc_class *max_cl;
721 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
722 if (go_active && cl->cl_nactive++ == 0)
728 n = rb_last(&cl->cl_parent->vt_tree);
730 max_cl = rb_entry(n, struct hfsc_class,vt_node);
732 * set vt to the average of the min and max
733 * classes. if the parent's period didn't
734 * change, don't decrease vt of the class.
737 if (cl->cl_parent->cl_cvtmin != 0)
738 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
740 if (cl->cl_parent->cl_vtperiod !=
741 cl->cl_parentperiod || vt > cl->cl_vt)
745 * first child for a new parent backlog period.
746 * add parent's cvtmax to cvtoff to make a new
747 * vt (vtoff + vt) larger than the vt in the
748 * last period for all children.
750 vt = cl->cl_parent->cl_cvtmax;
751 cl->cl_parent->cl_cvtoff += vt;
752 cl->cl_parent->cl_cvtmax = 0;
753 cl->cl_parent->cl_cvtmin = 0;
757 cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
760 /* update the virtual curve */
761 vt = cl->cl_vt + cl->cl_vtoff;
762 rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
764 if (cl->cl_virtual.x == vt) {
765 cl->cl_virtual.x -= cl->cl_vtoff;
770 cl->cl_vtperiod++; /* increment vt period */
771 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
772 if (cl->cl_parent->cl_nactive == 0)
773 cl->cl_parentperiod++;
779 if (cl->cl_flags & HFSC_USC) {
780 /* class has upper limit curve */
782 PSCHED_GET_TIME(cur_time);
784 /* update the ulimit curve */
785 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
788 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
794 f = max(cl->cl_myf, cl->cl_cfmin);
798 update_cfmin(cl->cl_parent);
804 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
806 u64 f; /* , myf_bound, delta; */
809 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
812 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
815 if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
818 if (go_passive && --cl->cl_nactive == 0)
824 /* no more active child, going passive */
826 /* update cvtmax of the parent class */
827 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
828 cl->cl_parent->cl_cvtmax = cl->cl_vt;
830 /* remove this class from the vt tree */
834 update_cfmin(cl->cl_parent);
842 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
843 - cl->cl_vtoff + cl->cl_vtadj;
846 * if vt of the class is smaller than cvtmin,
847 * the class was skipped in the past due to non-fit.
848 * if so, we need to adjust vtadj.
850 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
851 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
852 cl->cl_vt = cl->cl_parent->cl_cvtmin;
855 /* update the vt tree */
858 if (cl->cl_flags & HFSC_USC) {
859 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
863 * This code causes classes to stay way under their
864 * limit when multiple classes are used at gigabit
865 * speed. needs investigation. -kaber
868 * if myf lags behind by more than one clock tick
869 * from the current time, adjust myfadj to prevent
870 * a rate-limited class from going greedy.
871 * in a steady state under rate-limiting, myf
872 * fluctuates within one clock tick.
874 myf_bound = cur_time - PSCHED_JIFFIE2US(1);
875 if (cl->cl_myf < myf_bound) {
876 delta = cur_time - cl->cl_myf;
877 cl->cl_myfadj += delta;
883 f = max(cl->cl_myf, cl->cl_cfmin);
887 update_cfmin(cl->cl_parent);
893 set_active(struct hfsc_class *cl, unsigned int len)
895 if (cl->cl_flags & HFSC_RSC)
897 if (cl->cl_flags & HFSC_FSC)
900 list_add_tail(&cl->dlist, &cl->sched->droplist);
904 set_passive(struct hfsc_class *cl)
906 if (cl->cl_flags & HFSC_RSC)
909 list_del(&cl->dlist);
912 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
913 * needs to be called explicitly to remove a class from vttree.
918 * hack to get length of first packet in queue.
921 qdisc_peek_len(struct Qdisc *sch)
926 skb = sch->dequeue(sch);
929 printk("qdisc_peek_len: non work-conserving qdisc ?\n");
933 if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) {
935 printk("qdisc_peek_len: failed to requeue\n");
936 qdisc_tree_decrease_qlen(sch, 1);
943 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
945 unsigned int len = cl->qdisc->q.qlen;
947 qdisc_reset(cl->qdisc);
948 qdisc_tree_decrease_qlen(cl->qdisc, len);
952 hfsc_adjust_levels(struct hfsc_class *cl)
954 struct hfsc_class *p;
959 list_for_each_entry(p, &cl->children, siblings) {
960 if (p->level >= level)
961 level = p->level + 1;
964 } while ((cl = cl->cl_parent) != NULL);
967 static inline unsigned int
973 return h & (HFSC_HSIZE - 1);
976 static inline struct hfsc_class *
977 hfsc_find_class(u32 classid, struct Qdisc *sch)
979 struct hfsc_sched *q = qdisc_priv(sch);
980 struct hfsc_class *cl;
982 list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) {
983 if (cl->classid == classid)
990 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
993 sc2isc(rsc, &cl->cl_rsc);
994 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
995 cl->cl_eligible = cl->cl_deadline;
996 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
997 cl->cl_eligible.dx = 0;
998 cl->cl_eligible.dy = 0;
1000 cl->cl_flags |= HFSC_RSC;
1004 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
1006 sc2isc(fsc, &cl->cl_fsc);
1007 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
1008 cl->cl_flags |= HFSC_FSC;
1012 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
1015 sc2isc(usc, &cl->cl_usc);
1016 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
1017 cl->cl_flags |= HFSC_USC;
1021 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1022 struct rtattr **tca, unsigned long *arg)
1024 struct hfsc_sched *q = qdisc_priv(sch);
1025 struct hfsc_class *cl = (struct hfsc_class *)*arg;
1026 struct hfsc_class *parent = NULL;
1027 struct rtattr *opt = tca[TCA_OPTIONS-1];
1028 struct rtattr *tb[TCA_HFSC_MAX];
1029 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
1032 if (opt == NULL || rtattr_parse_nested(tb, TCA_HFSC_MAX, opt))
1035 if (tb[TCA_HFSC_RSC-1]) {
1036 if (RTA_PAYLOAD(tb[TCA_HFSC_RSC-1]) < sizeof(*rsc))
1038 rsc = RTA_DATA(tb[TCA_HFSC_RSC-1]);
1039 if (rsc->m1 == 0 && rsc->m2 == 0)
1043 if (tb[TCA_HFSC_FSC-1]) {
1044 if (RTA_PAYLOAD(tb[TCA_HFSC_FSC-1]) < sizeof(*fsc))
1046 fsc = RTA_DATA(tb[TCA_HFSC_FSC-1]);
1047 if (fsc->m1 == 0 && fsc->m2 == 0)
1051 if (tb[TCA_HFSC_USC-1]) {
1052 if (RTA_PAYLOAD(tb[TCA_HFSC_USC-1]) < sizeof(*usc))
1054 usc = RTA_DATA(tb[TCA_HFSC_USC-1]);
1055 if (usc->m1 == 0 && usc->m2 == 0)
1061 if (cl->cl_parent && cl->cl_parent->classid != parentid)
1063 if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
1066 PSCHED_GET_TIME(cur_time);
1070 hfsc_change_rsc(cl, rsc, cur_time);
1072 hfsc_change_fsc(cl, fsc);
1074 hfsc_change_usc(cl, usc, cur_time);
1076 if (cl->qdisc->q.qlen != 0) {
1077 if (cl->cl_flags & HFSC_RSC)
1078 update_ed(cl, qdisc_peek_len(cl->qdisc));
1079 if (cl->cl_flags & HFSC_FSC)
1080 update_vf(cl, 0, cur_time);
1082 sch_tree_unlock(sch);
1084 #ifdef CONFIG_NET_ESTIMATOR
1085 if (tca[TCA_RATE-1])
1086 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1087 cl->stats_lock, tca[TCA_RATE-1]);
1092 if (parentid == TC_H_ROOT)
1097 parent = hfsc_find_class(parentid, sch);
1102 if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1104 if (hfsc_find_class(classid, sch))
1107 if (rsc == NULL && fsc == NULL)
1110 cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1115 hfsc_change_rsc(cl, rsc, 0);
1117 hfsc_change_fsc(cl, fsc);
1119 hfsc_change_usc(cl, usc, 0);
1122 cl->classid = classid;
1124 cl->cl_parent = parent;
1125 cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
1126 if (cl->qdisc == NULL)
1127 cl->qdisc = &noop_qdisc;
1128 cl->stats_lock = &sch->dev->queue_lock;
1129 INIT_LIST_HEAD(&cl->children);
1130 cl->vt_tree = RB_ROOT;
1131 cl->cf_tree = RB_ROOT;
1134 list_add_tail(&cl->hlist, &q->clhash[hfsc_hash(classid)]);
1135 list_add_tail(&cl->siblings, &parent->children);
1136 if (parent->level == 0)
1137 hfsc_purge_queue(sch, parent);
1138 hfsc_adjust_levels(parent);
1139 cl->cl_pcvtoff = parent->cl_cvtoff;
1140 sch_tree_unlock(sch);
1142 #ifdef CONFIG_NET_ESTIMATOR
1143 if (tca[TCA_RATE-1])
1144 gen_new_estimator(&cl->bstats, &cl->rate_est,
1145 cl->stats_lock, tca[TCA_RATE-1]);
1147 *arg = (unsigned long)cl;
1152 hfsc_destroy_filters(struct tcf_proto **fl)
1154 struct tcf_proto *tp;
1156 while ((tp = *fl) != NULL) {
1163 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1165 struct hfsc_sched *q = qdisc_priv(sch);
1167 hfsc_destroy_filters(&cl->filter_list);
1168 qdisc_destroy(cl->qdisc);
1169 #ifdef CONFIG_NET_ESTIMATOR
1170 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1177 hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1179 struct hfsc_sched *q = qdisc_priv(sch);
1180 struct hfsc_class *cl = (struct hfsc_class *)arg;
1182 if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1187 list_del(&cl->siblings);
1188 hfsc_adjust_levels(cl->cl_parent);
1190 hfsc_purge_queue(sch, cl);
1191 list_del(&cl->hlist);
1193 if (--cl->refcnt == 0)
1194 hfsc_destroy_class(sch, cl);
1196 sch_tree_unlock(sch);
1200 static struct hfsc_class *
1201 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1203 struct hfsc_sched *q = qdisc_priv(sch);
1204 struct hfsc_class *cl;
1205 struct tcf_result res;
1206 struct tcf_proto *tcf;
1209 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1210 (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1214 *qerr = NET_XMIT_BYPASS;
1215 tcf = q->root.filter_list;
1216 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1217 #ifdef CONFIG_NET_CLS_ACT
1221 *qerr = NET_XMIT_SUCCESS;
1225 #elif defined(CONFIG_NET_CLS_POLICE)
1226 if (result == TC_POLICE_SHOT)
1229 if ((cl = (struct hfsc_class *)res.class) == NULL) {
1230 if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
1231 break; /* filter selected invalid classid */
1235 return cl; /* hit leaf class */
1237 /* apply inner filter chain */
1238 tcf = cl->filter_list;
1241 /* classification failed, try default class */
1242 cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1243 if (cl == NULL || cl->level > 0)
1250 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1253 struct hfsc_class *cl = (struct hfsc_class *)arg;
1260 new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1267 hfsc_purge_queue(sch, cl);
1268 *old = xchg(&cl->qdisc, new);
1269 sch_tree_unlock(sch);
1273 static struct Qdisc *
1274 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1276 struct hfsc_class *cl = (struct hfsc_class *)arg;
1278 if (cl != NULL && cl->level == 0)
1285 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1287 struct hfsc_class *cl = (struct hfsc_class *)arg;
1289 if (cl->qdisc->q.qlen == 0) {
1290 update_vf(cl, 0, 0);
1295 static unsigned long
1296 hfsc_get_class(struct Qdisc *sch, u32 classid)
1298 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1303 return (unsigned long)cl;
1307 hfsc_put_class(struct Qdisc *sch, unsigned long arg)
1309 struct hfsc_class *cl = (struct hfsc_class *)arg;
1311 if (--cl->refcnt == 0)
1312 hfsc_destroy_class(sch, cl);
1315 static unsigned long
1316 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1318 struct hfsc_class *p = (struct hfsc_class *)parent;
1319 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1322 if (p != NULL && p->level <= cl->level)
1327 return (unsigned long)cl;
1331 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1333 struct hfsc_class *cl = (struct hfsc_class *)arg;
1338 static struct tcf_proto **
1339 hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
1341 struct hfsc_sched *q = qdisc_priv(sch);
1342 struct hfsc_class *cl = (struct hfsc_class *)arg;
1347 return &cl->filter_list;
1351 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1353 struct tc_service_curve tsc;
1355 tsc.m1 = sm2m(sc->sm1);
1356 tsc.d = dx2d(sc->dx);
1357 tsc.m2 = sm2m(sc->sm2);
1358 RTA_PUT(skb, attr, sizeof(tsc), &tsc);
1367 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1369 if ((cl->cl_flags & HFSC_RSC) &&
1370 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1371 goto rtattr_failure;
1373 if ((cl->cl_flags & HFSC_FSC) &&
1374 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1375 goto rtattr_failure;
1377 if ((cl->cl_flags & HFSC_USC) &&
1378 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1379 goto rtattr_failure;
1388 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1391 struct hfsc_class *cl = (struct hfsc_class *)arg;
1392 unsigned char *b = skb->tail;
1393 struct rtattr *rta = (struct rtattr *)b;
1395 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT;
1396 tcm->tcm_handle = cl->classid;
1398 tcm->tcm_info = cl->qdisc->handle;
1400 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1401 if (hfsc_dump_curves(skb, cl) < 0)
1402 goto rtattr_failure;
1403 rta->rta_len = skb->tail - b;
1407 skb_trim(skb, b - skb->data);
1412 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1413 struct gnet_dump *d)
1415 struct hfsc_class *cl = (struct hfsc_class *)arg;
1416 struct tc_hfsc_stats xstats;
1418 cl->qstats.qlen = cl->qdisc->q.qlen;
1419 xstats.level = cl->level;
1420 xstats.period = cl->cl_vtperiod;
1421 xstats.work = cl->cl_total;
1422 xstats.rtwork = cl->cl_cumul;
1424 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1425 #ifdef CONFIG_NET_ESTIMATOR
1426 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1428 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1431 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1437 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1439 struct hfsc_sched *q = qdisc_priv(sch);
1440 struct hfsc_class *cl;
1446 for (i = 0; i < HFSC_HSIZE; i++) {
1447 list_for_each_entry(cl, &q->clhash[i], hlist) {
1448 if (arg->count < arg->skip) {
1452 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1462 hfsc_watchdog(unsigned long arg)
1464 struct Qdisc *sch = (struct Qdisc *)arg;
1466 sch->flags &= ~TCQ_F_THROTTLED;
1467 netif_schedule(sch->dev);
1471 hfsc_schedule_watchdog(struct Qdisc *sch, u64 cur_time)
1473 struct hfsc_sched *q = qdisc_priv(sch);
1474 struct hfsc_class *cl;
1478 if ((cl = eltree_get_minel(q)) != NULL)
1479 next_time = cl->cl_e;
1480 if (q->root.cl_cfmin != 0) {
1481 if (next_time == 0 || next_time > q->root.cl_cfmin)
1482 next_time = q->root.cl_cfmin;
1484 WARN_ON(next_time == 0);
1485 delay = next_time - cur_time;
1486 delay = PSCHED_US2JIFFIE(delay);
1488 sch->flags |= TCQ_F_THROTTLED;
1489 mod_timer(&q->wd_timer, jiffies + delay);
1493 hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
1495 struct hfsc_sched *q = qdisc_priv(sch);
1496 struct tc_hfsc_qopt *qopt;
1499 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
1501 qopt = RTA_DATA(opt);
1503 sch->stats_lock = &sch->dev->queue_lock;
1505 q->defcls = qopt->defcls;
1506 for (i = 0; i < HFSC_HSIZE; i++)
1507 INIT_LIST_HEAD(&q->clhash[i]);
1508 q->eligible = RB_ROOT;
1509 INIT_LIST_HEAD(&q->droplist);
1510 skb_queue_head_init(&q->requeue);
1513 q->root.classid = sch->handle;
1515 q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1517 if (q->root.qdisc == NULL)
1518 q->root.qdisc = &noop_qdisc;
1519 q->root.stats_lock = &sch->dev->queue_lock;
1520 INIT_LIST_HEAD(&q->root.children);
1521 q->root.vt_tree = RB_ROOT;
1522 q->root.cf_tree = RB_ROOT;
1524 list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]);
1526 init_timer(&q->wd_timer);
1527 q->wd_timer.function = hfsc_watchdog;
1528 q->wd_timer.data = (unsigned long)sch;
1534 hfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt)
1536 struct hfsc_sched *q = qdisc_priv(sch);
1537 struct tc_hfsc_qopt *qopt;
1539 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
1541 qopt = RTA_DATA(opt);
1544 q->defcls = qopt->defcls;
1545 sch_tree_unlock(sch);
1551 hfsc_reset_class(struct hfsc_class *cl)
1564 cl->cl_vtperiod = 0;
1565 cl->cl_parentperiod = 0;
1572 cl->vt_tree = RB_ROOT;
1573 cl->cf_tree = RB_ROOT;
1574 qdisc_reset(cl->qdisc);
1576 if (cl->cl_flags & HFSC_RSC)
1577 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1578 if (cl->cl_flags & HFSC_FSC)
1579 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1580 if (cl->cl_flags & HFSC_USC)
1581 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1585 hfsc_reset_qdisc(struct Qdisc *sch)
1587 struct hfsc_sched *q = qdisc_priv(sch);
1588 struct hfsc_class *cl;
1591 for (i = 0; i < HFSC_HSIZE; i++) {
1592 list_for_each_entry(cl, &q->clhash[i], hlist)
1593 hfsc_reset_class(cl);
1595 __skb_queue_purge(&q->requeue);
1596 q->eligible = RB_ROOT;
1597 INIT_LIST_HEAD(&q->droplist);
1598 del_timer(&q->wd_timer);
1599 sch->flags &= ~TCQ_F_THROTTLED;
1604 hfsc_destroy_qdisc(struct Qdisc *sch)
1606 struct hfsc_sched *q = qdisc_priv(sch);
1607 struct hfsc_class *cl, *next;
1610 for (i = 0; i < HFSC_HSIZE; i++) {
1611 list_for_each_entry_safe(cl, next, &q->clhash[i], hlist)
1612 hfsc_destroy_class(sch, cl);
1614 __skb_queue_purge(&q->requeue);
1615 del_timer(&q->wd_timer);
1619 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1621 struct hfsc_sched *q = qdisc_priv(sch);
1622 unsigned char *b = skb->tail;
1623 struct tc_hfsc_qopt qopt;
1625 qopt.defcls = q->defcls;
1626 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
1630 skb_trim(skb, b - skb->data);
1635 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1637 struct hfsc_class *cl;
1641 cl = hfsc_classify(skb, sch, &err);
1643 if (err == NET_XMIT_BYPASS)
1644 sch->qstats.drops++;
1650 err = cl->qdisc->enqueue(skb, cl->qdisc);
1651 if (unlikely(err != NET_XMIT_SUCCESS)) {
1653 sch->qstats.drops++;
1657 if (cl->qdisc->q.qlen == 1)
1658 set_active(cl, len);
1660 cl->bstats.packets++;
1661 cl->bstats.bytes += len;
1662 sch->bstats.packets++;
1663 sch->bstats.bytes += len;
1666 return NET_XMIT_SUCCESS;
1669 static struct sk_buff *
1670 hfsc_dequeue(struct Qdisc *sch)
1672 struct hfsc_sched *q = qdisc_priv(sch);
1673 struct hfsc_class *cl;
1674 struct sk_buff *skb;
1676 unsigned int next_len;
1679 if (sch->q.qlen == 0)
1681 if ((skb = __skb_dequeue(&q->requeue)))
1684 PSCHED_GET_TIME(cur_time);
1687 * if there are eligible classes, use real-time criteria.
1688 * find the class with the minimum deadline among
1689 * the eligible classes.
1691 if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
1695 * use link-sharing criteria
1696 * get the class with the minimum vt in the hierarchy
1698 cl = vttree_get_minvt(&q->root, cur_time);
1700 sch->qstats.overlimits++;
1701 hfsc_schedule_watchdog(sch, cur_time);
1706 skb = cl->qdisc->dequeue(cl->qdisc);
1708 if (net_ratelimit())
1709 printk("HFSC: Non-work-conserving qdisc ?\n");
1713 update_vf(cl, skb->len, cur_time);
1715 cl->cl_cumul += skb->len;
1717 if (cl->qdisc->q.qlen != 0) {
1718 if (cl->cl_flags & HFSC_RSC) {
1720 next_len = qdisc_peek_len(cl->qdisc);
1722 update_ed(cl, next_len);
1724 update_d(cl, next_len);
1727 /* the class becomes passive */
1732 sch->flags &= ~TCQ_F_THROTTLED;
1739 hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
1741 struct hfsc_sched *q = qdisc_priv(sch);
1743 __skb_queue_head(&q->requeue, skb);
1745 sch->qstats.requeues++;
1746 return NET_XMIT_SUCCESS;
1750 hfsc_drop(struct Qdisc *sch)
1752 struct hfsc_sched *q = qdisc_priv(sch);
1753 struct hfsc_class *cl;
1756 list_for_each_entry(cl, &q->droplist, dlist) {
1757 if (cl->qdisc->ops->drop != NULL &&
1758 (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
1759 if (cl->qdisc->q.qlen == 0) {
1760 update_vf(cl, 0, 0);
1763 list_move_tail(&cl->dlist, &q->droplist);
1766 sch->qstats.drops++;
1774 static struct Qdisc_class_ops hfsc_class_ops = {
1775 .change = hfsc_change_class,
1776 .delete = hfsc_delete_class,
1777 .graft = hfsc_graft_class,
1778 .leaf = hfsc_class_leaf,
1779 .qlen_notify = hfsc_qlen_notify,
1780 .get = hfsc_get_class,
1781 .put = hfsc_put_class,
1782 .bind_tcf = hfsc_bind_tcf,
1783 .unbind_tcf = hfsc_unbind_tcf,
1784 .tcf_chain = hfsc_tcf_chain,
1785 .dump = hfsc_dump_class,
1786 .dump_stats = hfsc_dump_class_stats,
1790 static struct Qdisc_ops hfsc_qdisc_ops = {
1792 .init = hfsc_init_qdisc,
1793 .change = hfsc_change_qdisc,
1794 .reset = hfsc_reset_qdisc,
1795 .destroy = hfsc_destroy_qdisc,
1796 .dump = hfsc_dump_qdisc,
1797 .enqueue = hfsc_enqueue,
1798 .dequeue = hfsc_dequeue,
1799 .requeue = hfsc_requeue,
1801 .cl_ops = &hfsc_class_ops,
1802 .priv_size = sizeof(struct hfsc_sched),
1803 .owner = THIS_MODULE
1809 return register_qdisc(&hfsc_qdisc_ops);
1815 unregister_qdisc(&hfsc_qdisc_ops);
1818 MODULE_LICENSE("GPL");
1819 module_init(hfsc_init);
1820 module_exit(hfsc_cleanup);