1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4 #ifndef __NFP_FLOWER_H__
5 #define __NFP_FLOWER_H__ 1
6
7 #include "cmsg.h"
8 #include "../nfp_net.h"
9
10 #include <linux/circ_buf.h>
11 #include <linux/hashtable.h>
12 #include <linux/rhashtable.h>
13 #include <linux/time64.h>
14 #include <linux/types.h>
15 #include <net/pkt_cls.h>
16 #include <net/tcp.h>
17 #include <linux/workqueue.h>
18 #include <linux/idr.h>
19
20 struct nfp_fl_pre_lag;
21 struct net_device;
22 struct nfp_app;
23
24 #define NFP_FL_STAT_ID_MU_NUM GENMASK(31, 22)
25 #define NFP_FL_STAT_ID_STAT GENMASK(21, 0)
26
27 #define NFP_FL_STATS_ELEM_RS sizeof_field(struct nfp_fl_stats_id, \
28 init_unalloc)
29 #define NFP_FLOWER_MASK_ENTRY_RS 256
30 #define NFP_FLOWER_MASK_ELEMENT_RS 1
31 #define NFP_FLOWER_MASK_HASH_BITS 10
32
33 #define NFP_FLOWER_KEY_MAX_LW 32
34
35 #define NFP_FL_META_FLAG_MANAGE_MASK BIT(7)
36
37 #define NFP_FL_MASK_REUSE_TIME_NS 40000
38 #define NFP_FL_MASK_ID_LOCATION 1
39
40 /* Extra features bitmap. */
41 #define NFP_FL_FEATS_GENEVE BIT(0)
42 #define NFP_FL_NBI_MTU_SETTING BIT(1)
43 #define NFP_FL_FEATS_GENEVE_OPT BIT(2)
44 #define NFP_FL_FEATS_VLAN_PCP BIT(3)
45 #define NFP_FL_FEATS_VF_RLIM BIT(4)
46 #define NFP_FL_FEATS_FLOW_MOD BIT(5)
47 #define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
48 #define NFP_FL_FEATS_IPV6_TUN BIT(7)
49 #define NFP_FL_FEATS_VLAN_QINQ BIT(8)
50 #define NFP_FL_FEATS_QOS_PPS BIT(9)
51 #define NFP_FL_FEATS_HOST_ACK BIT(31)
52
53 #define NFP_FL_ENABLE_FLOW_MERGE BIT(0)
54 #define NFP_FL_ENABLE_LAG BIT(1)
55
56 #define NFP_FL_FEATS_HOST \
57 (NFP_FL_FEATS_GENEVE | \
58 NFP_FL_NBI_MTU_SETTING | \
59 NFP_FL_FEATS_GENEVE_OPT | \
60 NFP_FL_FEATS_VLAN_PCP | \
61 NFP_FL_FEATS_VF_RLIM | \
62 NFP_FL_FEATS_FLOW_MOD | \
63 NFP_FL_FEATS_PRE_TUN_RULES | \
64 NFP_FL_FEATS_IPV6_TUN | \
65 NFP_FL_FEATS_VLAN_QINQ | \
66 NFP_FL_FEATS_QOS_PPS)
67
68 struct nfp_fl_mask_id {
69 struct circ_buf mask_id_free_list;
70 ktime_t *last_used;
71 u8 init_unallocated;
72 };
73
74 struct nfp_fl_stats_id {
75 struct circ_buf free_list;
76 u32 init_unalloc;
77 u8 repeated_em_count;
78 };
79
80 /**
81 * struct nfp_fl_tunnel_offloads - priv data for tunnel offloads
82 * @offloaded_macs: Hashtable of the offloaded MAC addresses
83 * @ipv4_off_list: List of IPv4 addresses to offload
84 * @ipv6_off_list: List of IPv6 addresses to offload
85 * @neigh_off_list_v4: List of IPv4 neighbour offloads
86 * @neigh_off_list_v6: List of IPv6 neighbour offloads
87 * @ipv4_off_lock: Lock for the IPv4 address list
88 * @ipv6_off_lock: Lock for the IPv6 address list
89 * @neigh_off_lock_v4: Lock for the IPv4 neighbour address list
90 * @neigh_off_lock_v6: Lock for the IPv6 neighbour address list
91 * @mac_off_ids: IDA to manage id assignment for offloaded MACs
92 * @neigh_nb: Notifier to monitor neighbour state
93 */
94 struct nfp_fl_tunnel_offloads {
95 struct rhashtable offloaded_macs;
96 struct list_head ipv4_off_list;
97 struct list_head ipv6_off_list;
98 struct list_head neigh_off_list_v4;
99 struct list_head neigh_off_list_v6;
100 struct mutex ipv4_off_lock;
101 struct mutex ipv6_off_lock;
102 spinlock_t neigh_off_lock_v4;
103 spinlock_t neigh_off_lock_v6;
104 struct ida mac_off_ids;
105 struct notifier_block neigh_nb;
106 };
107
108 /**
109 * struct nfp_mtu_conf - manage MTU setting
110 * @portnum: NFP port number of repr with requested MTU change
111 * @requested_val: MTU value requested for repr
112 * @ack: Received ack that MTU has been correctly set
113 * @wait_q: Wait queue for MTU acknowledgements
114 * @lock: Lock for setting/reading MTU variables
115 */
116 struct nfp_mtu_conf {
117 u32 portnum;
118 unsigned int requested_val;
119 bool ack;
120 wait_queue_head_t wait_q;
121 spinlock_t lock;
122 };
123
124 /**
125 * struct nfp_fl_lag - Flower APP priv data for link aggregation
126 * @work: Work queue for writing configs to the HW
127 * @lock: Lock to protect lag_group_list
128 * @group_list: List of all master/slave groups offloaded
129 * @ida_handle: IDA to handle group ids
130 * @pkt_num: Incremented for each config packet sent
131 * @batch_ver: Incremented for each batch of config packets
132 * @global_inst: Instance allocator for groups
133 * @rst_cfg: Marker to reset HW LAG config
134 * @retrans_skbs: Cmsgs that could not be processed by HW and require
135 * retransmission
136 */
137 struct nfp_fl_lag {
138 struct delayed_work work;
139 struct mutex lock;
140 struct list_head group_list;
141 struct ida ida_handle;
142 unsigned int pkt_num;
143 unsigned int batch_ver;
144 u8 global_inst;
145 bool rst_cfg;
146 struct sk_buff_head retrans_skbs;
147 };
148
149 /**
150 * struct nfp_fl_internal_ports - Flower APP priv data for additional ports
151 * @port_ids: Assignment of ids to any additional ports
152 * @lock: Lock for extra ports list
153 */
154 struct nfp_fl_internal_ports {
155 struct idr port_ids;
156 spinlock_t lock;
157 };
158
159 /**
160 * struct nfp_flower_priv - Flower APP per-vNIC priv data
161 * @app: Back pointer to app
162 * @nn: Pointer to vNIC
163 * @mask_id_seed: Seed used for mask hash table
164 * @flower_version: HW version of flower
165 * @flower_ext_feats: Bitmap of extra features the HW supports
166 * @flower_en_feats: Bitmap of features enabled by HW
167 * @stats_ids: List of free stats ids
168 * @mask_ids: List of free mask ids
169 * @mask_table: Hash table used to store masks
170 * @stats_ring_size: Maximum number of allowed stats ids
171 * @flow_table: Hash table used to store flower rules
172 * @stats: Stored stats updates for flower rules
173 * @stats_lock: Lock for flower rule stats updates
174 * @stats_ctx_table: Hash table to map stats contexts to its flow rule
175 * @cmsg_work: Workqueue for control messages processing
176 * @cmsg_skbs_high: List of higher priority skbs for control message
177 * processing
178 * @cmsg_skbs_low: List of lower priority skbs for control message
179 * processing
180 * @tun: Tunnel offload data
181 * @reify_replies: atomically stores the number of replies received
182 * from firmware for repr reify
183 * @reify_wait_queue: wait queue for repr reify response counting
184 * @mtu_conf: Configuration of repr MTU value
185 * @nfp_lag: Link aggregation data block
186 * @indr_block_cb_priv: List of priv data passed to indirect block cbs
187 * @non_repr_priv: List of offloaded non-repr ports and their priv data
188 * @active_mem_unit: Current active memory unit for flower rules
189 * @total_mem_units: Total number of available memory units for flower rules
190 * @internal_ports: Internal port ids used in offloaded rules
191 * @qos_stats_work: Workqueue for qos stats processing
192 * @qos_rate_limiters: Current active qos rate limiters
193 * @qos_stats_lock: Lock on qos stats updates
194 * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
195 * @merge_table: Hash table to store merged flows
196 * @ct_zone_table: Hash table used to store the different zones
197 * @ct_zone_wc: Special zone entry for wildcarded zone matches
198 * @ct_map_table: Hash table used to referennce ct flows
199 */
200 struct nfp_flower_priv {
201 struct nfp_app *app;
202 struct nfp_net *nn;
203 u32 mask_id_seed;
204 u64 flower_version;
205 u64 flower_ext_feats;
206 u8 flower_en_feats;
207 struct nfp_fl_stats_id stats_ids;
208 struct nfp_fl_mask_id mask_ids;
209 DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
210 u32 stats_ring_size;
211 struct rhashtable flow_table;
212 struct nfp_fl_stats *stats;
213 spinlock_t stats_lock; /* lock stats */
214 struct rhashtable stats_ctx_table;
215 struct work_struct cmsg_work;
216 struct sk_buff_head cmsg_skbs_high;
217 struct sk_buff_head cmsg_skbs_low;
218 struct nfp_fl_tunnel_offloads tun;
219 atomic_t reify_replies;
220 wait_queue_head_t reify_wait_queue;
221 struct nfp_mtu_conf mtu_conf;
222 struct nfp_fl_lag nfp_lag;
223 struct list_head indr_block_cb_priv;
224 struct list_head non_repr_priv;
225 unsigned int active_mem_unit;
226 unsigned int total_mem_units;
227 struct nfp_fl_internal_ports internal_ports;
228 struct delayed_work qos_stats_work;
229 unsigned int qos_rate_limiters;
230 spinlock_t qos_stats_lock; /* Protect the qos stats */
231 int pre_tun_rule_cnt;
232 struct rhashtable merge_table;
233 struct rhashtable ct_zone_table;
234 struct nfp_fl_ct_zone_entry *ct_zone_wc;
235 struct rhashtable ct_map_table;
236 };
237
238 /**
239 * struct nfp_fl_qos - Flower APP priv data for quality of service
240 * @netdev_port_id: NFP port number of repr with qos info
241 * @curr_stats: Currently stored stats updates for qos info
242 * @prev_stats: Previously stored updates for qos info
243 * @last_update: Stored time when last stats were updated
244 */
245 struct nfp_fl_qos {
246 u32 netdev_port_id;
247 struct nfp_stat_pair curr_stats;
248 struct nfp_stat_pair prev_stats;
249 u64 last_update;
250 };
251
252 /**
253 * struct nfp_flower_repr_priv - Flower APP per-repr priv data
254 * @nfp_repr: Back pointer to nfp_repr
255 * @lag_port_flags: Extended port flags to record lag state of repr
256 * @mac_offloaded: Flag indicating a MAC address is offloaded for repr
257 * @offloaded_mac_addr: MAC address that has been offloaded for repr
258 * @block_shared: Flag indicating if offload applies to shared blocks
259 * @mac_list: List entry of reprs that share the same offloaded MAC
260 * @qos_table: Stored info on filters implementing qos
261 * @on_bridge: Indicates if the repr is attached to a bridge
262 */
263 struct nfp_flower_repr_priv {
264 struct nfp_repr *nfp_repr;
265 unsigned long lag_port_flags;
266 bool mac_offloaded;
267 u8 offloaded_mac_addr[ETH_ALEN];
268 bool block_shared;
269 struct list_head mac_list;
270 struct nfp_fl_qos qos_table;
271 bool on_bridge;
272 };
273
274 /**
275 * struct nfp_flower_non_repr_priv - Priv data for non-repr offloaded ports
276 * @list: List entry of offloaded reprs
277 * @netdev: Pointer to non-repr net_device
278 * @ref_count: Number of references held for this priv data
279 * @mac_offloaded: Flag indicating a MAC address is offloaded for device
280 * @offloaded_mac_addr: MAC address that has been offloaded for dev
281 */
282 struct nfp_flower_non_repr_priv {
283 struct list_head list;
284 struct net_device *netdev;
285 int ref_count;
286 bool mac_offloaded;
287 u8 offloaded_mac_addr[ETH_ALEN];
288 };
289
290 struct nfp_fl_key_ls {
291 u32 key_layer_two;
292 u8 key_layer;
293 int key_size;
294 };
295
296 struct nfp_fl_rule_metadata {
297 u8 key_len;
298 u8 mask_len;
299 u8 act_len;
300 u8 flags;
301 __be32 host_ctx_id;
302 __be64 host_cookie __packed;
303 __be64 flow_version __packed;
304 __be32 shortcut;
305 };
306
307 struct nfp_fl_stats {
308 u64 pkts;
309 u64 bytes;
310 u64 used;
311 };
312
313 /**
314 * struct nfp_ipv6_addr_entry - cached IPv6 addresses
315 * @ipv6_addr: IP address
316 * @ref_count: number of rules currently using this IP
317 * @list: list pointer
318 */
319 struct nfp_ipv6_addr_entry {
320 struct in6_addr ipv6_addr;
321 int ref_count;
322 struct list_head list;
323 };
324
325 struct nfp_fl_payload {
326 struct nfp_fl_rule_metadata meta;
327 unsigned long tc_flower_cookie;
328 struct rhash_head fl_node;
329 struct rcu_head rcu;
330 __be32 nfp_tun_ipv4_addr;
331 struct nfp_ipv6_addr_entry *nfp_tun_ipv6;
332 struct net_device *ingress_dev;
333 char *unmasked_data;
334 char *mask_data;
335 char *action_data;
336 struct list_head linked_flows;
337 bool in_hw;
338 struct {
339 struct net_device *dev;
340 __be16 vlan_tci;
341 __be16 port_idx;
342 } pre_tun_rule;
343 };
344
345 struct nfp_fl_payload_link {
346 /* A link contains a pointer to a merge flow and an associated sub_flow.
347 * Each merge flow will feature in 2 links to its underlying sub_flows.
348 * A sub_flow will have at least 1 link to a merge flow or more if it
349 * has been used to create multiple merge flows.
350 *
351 * For a merge flow, 'linked_flows' in its nfp_fl_payload struct lists
352 * all links to sub_flows (sub_flow.flow) via merge.list.
353 * For a sub_flow, 'linked_flows' gives all links to merge flows it has
354 * formed (merge_flow.flow) via sub_flow.list.
355 */
356 struct {
357 struct list_head list;
358 struct nfp_fl_payload *flow;
359 } merge_flow, sub_flow;
360 };
361
362 extern const struct rhashtable_params nfp_flower_table_params;
363 extern const struct rhashtable_params merge_table_params;
364
365 struct nfp_merge_info {
366 u64 parent_ctx;
367 struct rhash_head ht_node;
368 };
369
370 struct nfp_fl_stats_frame {
371 __be32 stats_con_id;
372 __be32 pkt_count;
373 __be64 byte_count;
374 __be64 stats_cookie;
375 };
376
377 static inline bool
nfp_flower_internal_port_can_offload(struct nfp_app * app,struct net_device * netdev)378 nfp_flower_internal_port_can_offload(struct nfp_app *app,
379 struct net_device *netdev)
380 {
381 struct nfp_flower_priv *app_priv = app->priv;
382
383 if (!(app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE))
384 return false;
385 if (!netdev->rtnl_link_ops)
386 return false;
387 if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
388 return true;
389
390 return false;
391 }
392
393 /* The address of the merged flow acts as its cookie.
394 * Cookies supplied to us by TC flower are also addresses to allocated
395 * memory and thus this scheme should not generate any collisions.
396 */
nfp_flower_is_merge_flow(struct nfp_fl_payload * flow_pay)397 static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay)
398 {
399 return flow_pay->tc_flower_cookie == (unsigned long)flow_pay;
400 }
401
nfp_flower_is_supported_bridge(struct net_device * netdev)402 static inline bool nfp_flower_is_supported_bridge(struct net_device *netdev)
403 {
404 return netif_is_ovs_master(netdev);
405 }
406
407 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
408 unsigned int host_ctx_split);
409 void nfp_flower_metadata_cleanup(struct nfp_app *app);
410
411 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
412 enum tc_setup_type type, void *type_data);
413 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
414 struct nfp_fl_payload *sub_flow1,
415 struct nfp_fl_payload *sub_flow2);
416 void
417 nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext,
418 struct nfp_flower_meta_tci *msk, u8 key_type);
419 void
420 nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext,
421 struct nfp_flower_meta_tci *msk,
422 struct flow_rule *rule);
423 void
424 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext);
425 int
426 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
427 bool mask_version, enum nfp_flower_tun_type tun_type,
428 struct netlink_ext_ack *extack);
429 void
430 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
431 struct nfp_flower_mac_mpls *msk,
432 struct flow_rule *rule);
433 int
434 nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext,
435 struct nfp_flower_mac_mpls *msk,
436 struct flow_rule *rule,
437 struct netlink_ext_ack *extack);
438 void
439 nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
440 struct nfp_flower_tp_ports *msk,
441 struct flow_rule *rule);
442 void
443 nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
444 struct nfp_flower_vlan *msk,
445 struct flow_rule *rule);
446 void
447 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
448 struct nfp_flower_ipv4 *msk, struct flow_rule *rule);
449 void
450 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
451 struct nfp_flower_ipv6 *msk, struct flow_rule *rule);
452 void
453 nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule);
454 void
455 nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
456 struct nfp_flower_ipv4_gre_tun *msk,
457 struct flow_rule *rule);
458 void
459 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
460 struct nfp_flower_ipv4_udp_tun *msk,
461 struct flow_rule *rule);
462 void
463 nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
464 struct nfp_flower_ipv6_udp_tun *msk,
465 struct flow_rule *rule);
466 void
467 nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
468 struct nfp_flower_ipv6_gre_tun *msk,
469 struct flow_rule *rule);
470 int nfp_flower_compile_flow_match(struct nfp_app *app,
471 struct flow_rule *rule,
472 struct nfp_fl_key_ls *key_ls,
473 struct net_device *netdev,
474 struct nfp_fl_payload *nfp_flow,
475 enum nfp_flower_tun_type tun_type,
476 struct netlink_ext_ack *extack);
477 int nfp_flower_compile_action(struct nfp_app *app,
478 struct flow_rule *rule,
479 struct net_device *netdev,
480 struct nfp_fl_payload *nfp_flow,
481 struct netlink_ext_ack *extack);
482 int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
483 struct nfp_fl_payload *nfp_flow,
484 struct net_device *netdev,
485 struct netlink_ext_ack *extack);
486 void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
487 struct nfp_fl_payload *nfp_flow);
488 int nfp_modify_flow_metadata(struct nfp_app *app,
489 struct nfp_fl_payload *nfp_flow);
490
491 struct nfp_fl_payload *
492 nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
493 struct net_device *netdev);
494 struct nfp_fl_payload *
495 nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id);
496 struct nfp_fl_payload *
497 nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
498
499 void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
500
501 int nfp_tunnel_config_start(struct nfp_app *app);
502 void nfp_tunnel_config_stop(struct nfp_app *app);
503 int nfp_tunnel_mac_event_handler(struct nfp_app *app,
504 struct net_device *netdev,
505 unsigned long event, void *ptr);
506 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
507 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
508 void
509 nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry);
510 struct nfp_ipv6_addr_entry *
511 nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6);
512 void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb);
513 void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb);
514 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
515 void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb);
516 void nfp_flower_lag_init(struct nfp_fl_lag *lag);
517 void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
518 int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
519 int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
520 struct net_device *netdev,
521 unsigned long event, void *ptr);
522 bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb);
523 int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
524 struct net_device *master,
525 struct nfp_fl_pre_lag *pre_act,
526 struct netlink_ext_ack *extack);
527 int nfp_flower_lag_get_output_id(struct nfp_app *app,
528 struct net_device *master);
529 void nfp_flower_qos_init(struct nfp_app *app);
530 void nfp_flower_qos_cleanup(struct nfp_app *app);
531 int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
532 struct tc_cls_matchall_offload *flow);
533 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
534 int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
535 enum tc_setup_type type, void *type_data,
536 void *data,
537 void (*cleanup)(struct flow_block_cb *block_cb));
538 void nfp_flower_setup_indr_tc_release(void *cb_priv);
539
540 void
541 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
542 struct nfp_flower_non_repr_priv *
543 nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev);
544 void
545 __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv);
546 void
547 nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
548 u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
549 struct net_device *netdev);
550 int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
551 struct nfp_fl_payload *flow);
552 int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
553 struct nfp_fl_payload *flow);
554
555 struct nfp_fl_payload *
556 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer);
557 int nfp_flower_calculate_key_layers(struct nfp_app *app,
558 struct net_device *netdev,
559 struct nfp_fl_key_ls *ret_key_ls,
560 struct flow_rule *flow,
561 enum nfp_flower_tun_type *tun_type,
562 struct netlink_ext_ack *extack);
563 void
564 nfp_flower_del_linked_merge_flows(struct nfp_app *app,
565 struct nfp_fl_payload *sub_flow);
566 int
567 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
568 u8 mtype);
569 void
570 nfp_flower_update_merge_stats(struct nfp_app *app,
571 struct nfp_fl_payload *sub_flow);
572 #endif
573