Lines Matching refs:table

62 static inline int gred_wred_mode(struct gred_sched *table)  in gred_wred_mode()  argument
64 return test_bit(GRED_WRED_MODE, &table->flags); in gred_wred_mode()
67 static inline void gred_enable_wred_mode(struct gred_sched *table) in gred_enable_wred_mode() argument
69 __set_bit(GRED_WRED_MODE, &table->flags); in gred_enable_wred_mode()
72 static inline void gred_disable_wred_mode(struct gred_sched *table) in gred_disable_wred_mode() argument
74 __clear_bit(GRED_WRED_MODE, &table->flags); in gred_disable_wred_mode()
77 static inline int gred_rio_mode(struct gred_sched *table) in gred_rio_mode() argument
79 return test_bit(GRED_RIO_MODE, &table->flags); in gred_rio_mode()
82 static inline void gred_enable_rio_mode(struct gred_sched *table) in gred_enable_rio_mode() argument
84 __set_bit(GRED_RIO_MODE, &table->flags); in gred_enable_rio_mode()
87 static inline void gred_disable_rio_mode(struct gred_sched *table) in gred_disable_rio_mode() argument
89 __clear_bit(GRED_RIO_MODE, &table->flags); in gred_disable_rio_mode()
94 struct gred_sched *table = qdisc_priv(sch); in gred_wred_mode_check() local
98 for (i = 0; i < table->DPs; i++) { in gred_wred_mode_check()
99 struct gred_sched_data *q = table->tab[i]; in gred_wred_mode_check()
105 for (n = i + 1; n < table->DPs; n++) in gred_wred_mode_check()
106 if (table->tab[n] && table->tab[n]->prio == q->prio) in gred_wred_mode_check()
113 static inline unsigned int gred_backlog(struct gred_sched *table, in gred_backlog() argument
117 if (gred_wred_mode(table)) in gred_backlog()
128 static inline void gred_load_wred_set(const struct gred_sched *table, in gred_load_wred_set() argument
131 q->vars.qavg = table->wred_set.qavg; in gred_load_wred_set()
132 q->vars.qidlestart = table->wred_set.qidlestart; in gred_load_wred_set()
135 static inline void gred_store_wred_set(struct gred_sched *table, in gred_store_wred_set() argument
138 table->wred_set.qavg = q->vars.qavg; in gred_store_wred_set()
139 table->wred_set.qidlestart = q->vars.qidlestart; in gred_store_wred_set()
152 static bool gred_per_vq_red_flags_used(struct gred_sched *table) in gred_per_vq_red_flags_used() argument
157 if (table->red_flags) in gred_per_vq_red_flags_used()
160 if (table->tab[i] && table->tab[i]->red_flags) in gred_per_vq_red_flags_used()
313 struct gred_sched *table = qdisc_priv(sch); in gred_offload() local
315 struct tc_gred_qopt_offload *opt = table->opt; in gred_offload()
328 opt->set.grio_on = gred_rio_mode(table); in gred_offload()
329 opt->set.wred_on = gred_wred_mode(table); in gred_offload()
330 opt->set.dp_cnt = table->DPs; in gred_offload()
331 opt->set.dp_def = table->def; in gred_offload()
333 for (i = 0; i < table->DPs; i++) { in gred_offload()
334 struct gred_sched_data *q = table->tab[i]; in gred_offload()
356 struct gred_sched *table = qdisc_priv(sch); in gred_offload_dump_stats() local
372 if (table->tab[i]) in gred_offload_dump_stats()
373 hw_stats->stats.xstats[i] = &table->tab[i]->stats; in gred_offload_dump_stats()
381 if (!table->tab[i]) in gred_offload_dump_stats()
383 table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets); in gred_offload_dump_stats()
384 table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes); in gred_offload_dump_stats()
385 table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog; in gred_offload_dump_stats()
409 struct gred_sched *table = qdisc_priv(sch); in gred_change_table_def() local
432 if (sopt->flags && gred_per_vq_red_flags_used(table)) { in gred_change_table_def()
438 table->DPs = sopt->DPs; in gred_change_table_def()
439 table->def = sopt->def_DP; in gred_change_table_def()
440 red_flags_changed = table->red_flags != sopt->flags; in gred_change_table_def()
441 table->red_flags = sopt->flags; in gred_change_table_def()
451 gred_enable_rio_mode(table); in gred_change_table_def()
452 gred_disable_wred_mode(table); in gred_change_table_def()
454 gred_enable_wred_mode(table); in gred_change_table_def()
456 gred_disable_rio_mode(table); in gred_change_table_def()
457 gred_disable_wred_mode(table); in gred_change_table_def()
461 for (i = 0; i < table->DPs; i++) in gred_change_table_def()
462 if (table->tab[i]) in gred_change_table_def()
463 table->tab[i]->red_flags = in gred_change_table_def()
464 table->red_flags & GRED_VQ_RED_FLAGS; in gred_change_table_def()
466 for (i = table->DPs; i < MAX_DPs; i++) { in gred_change_table_def()
467 if (table->tab[i]) { in gred_change_table_def()
470 gred_destroy_vq(table->tab[i]); in gred_change_table_def()
471 table->tab[i] = NULL; in gred_change_table_def()
485 struct gred_sched *table = qdisc_priv(sch); in gred_change_vq() local
486 struct gred_sched_data *q = table->tab[dp]; in gred_change_vq()
494 table->tab[dp] = q = *prealloc; in gred_change_vq()
498 q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS; in gred_change_vq()
536 static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry) in gred_vq_apply() argument
547 table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]); in gred_vq_apply()
550 static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs) in gred_vqs_apply() argument
558 gred_vq_apply(table, attr); in gred_vqs_apply()
564 static int gred_vq_validate(struct gred_sched *table, u32 cdp, in gred_vq_validate() argument
582 if (dp >= table->DPs) { in gred_vq_validate()
586 if (dp != cdp && !table->tab[dp]) { in gred_vq_validate()
594 if (table->red_flags && table->red_flags != red_flags) { in gred_vq_validate()
608 static int gred_vqs_validate(struct gred_sched *table, u32 cdp, in gred_vqs_validate() argument
622 err = gred_vq_validate(table, cdp, attr, extack); in gred_vqs_validate()
643 struct gred_sched *table = qdisc_priv(sch); in gred_change() local
677 if (ctl->DP >= table->DPs) { in gred_change()
683 err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST], in gred_change()
689 if (gred_rio_mode(table)) { in gred_change()
693 if (table->tab[table->def]) in gred_change()
694 def_prio = table->tab[table->def]->prio; in gred_change()
713 gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]); in gred_change()
715 if (gred_rio_mode(table)) { in gred_change()
716 gred_disable_wred_mode(table); in gred_change()
718 gred_enable_wred_mode(table); in gred_change()
736 struct gred_sched *table = qdisc_priv(sch); in gred_init() local
761 table->opt = kzalloc(sizeof(*table->opt), GFP_KERNEL); in gred_init()
762 if (!table->opt) in gred_init()
771 struct gred_sched *table = qdisc_priv(sch); in gred_dump() local
776 .DPs = table->DPs, in gred_dump()
777 .def_DP = table->def, in gred_dump()
778 .grio = gred_rio_mode(table), in gred_dump()
779 .flags = table->red_flags, in gred_dump()
792 struct gred_sched_data *q = table->tab[i]; in gred_dump()
808 struct gred_sched_data *q = table->tab[i]; in gred_dump()
825 opt.backlog = gred_backlog(table, q, sch); in gred_dump()
839 if (gred_wred_mode(table)) in gred_dump()
840 gred_load_wred_set(table, q); in gred_dump()
859 struct gred_sched_data *q = table->tab[i]; in gred_dump()
882 gred_backlog(table, q, sch))) in gred_dump()
914 struct gred_sched *table = qdisc_priv(sch); in gred_destroy() local
917 for (i = 0; i < table->DPs; i++) { in gred_destroy()
918 if (table->tab[i]) in gred_destroy()
919 gred_destroy_vq(table->tab[i]); in gred_destroy()
922 kfree(table->opt); in gred_destroy()