Lines Matching refs:flow

370 			      struct bnxt_tc_flow *flow)  in bnxt_tc_parse_flow()  argument
387 flow->l2_key.ether_type = match.key->n_proto; in bnxt_tc_parse_flow()
388 flow->l2_mask.ether_type = match.mask->n_proto; in bnxt_tc_parse_flow()
392 flow->l4_key.ip_proto = match.key->ip_proto; in bnxt_tc_parse_flow()
393 flow->l4_mask.ip_proto = match.mask->ip_proto; in bnxt_tc_parse_flow()
401 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; in bnxt_tc_parse_flow()
402 ether_addr_copy(flow->l2_key.dmac, match.key->dst); in bnxt_tc_parse_flow()
403 ether_addr_copy(flow->l2_mask.dmac, match.mask->dst); in bnxt_tc_parse_flow()
404 ether_addr_copy(flow->l2_key.smac, match.key->src); in bnxt_tc_parse_flow()
405 ether_addr_copy(flow->l2_mask.smac, match.mask->src); in bnxt_tc_parse_flow()
412 flow->l2_key.inner_vlan_tci = in bnxt_tc_parse_flow()
415 flow->l2_mask.inner_vlan_tci = in bnxt_tc_parse_flow()
418 flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q); in bnxt_tc_parse_flow()
419 flow->l2_mask.inner_vlan_tpid = htons(0xffff); in bnxt_tc_parse_flow()
420 flow->l2_key.num_vlans = 1; in bnxt_tc_parse_flow()
427 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS; in bnxt_tc_parse_flow()
428 flow->l3_key.ipv4.daddr.s_addr = match.key->dst; in bnxt_tc_parse_flow()
429 flow->l3_mask.ipv4.daddr.s_addr = match.mask->dst; in bnxt_tc_parse_flow()
430 flow->l3_key.ipv4.saddr.s_addr = match.key->src; in bnxt_tc_parse_flow()
431 flow->l3_mask.ipv4.saddr.s_addr = match.mask->src; in bnxt_tc_parse_flow()
436 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS; in bnxt_tc_parse_flow()
437 flow->l3_key.ipv6.daddr = match.key->dst; in bnxt_tc_parse_flow()
438 flow->l3_mask.ipv6.daddr = match.mask->dst; in bnxt_tc_parse_flow()
439 flow->l3_key.ipv6.saddr = match.key->src; in bnxt_tc_parse_flow()
440 flow->l3_mask.ipv6.saddr = match.mask->src; in bnxt_tc_parse_flow()
447 flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS; in bnxt_tc_parse_flow()
448 flow->l4_key.ports.dport = match.key->dst; in bnxt_tc_parse_flow()
449 flow->l4_mask.ports.dport = match.mask->dst; in bnxt_tc_parse_flow()
450 flow->l4_key.ports.sport = match.key->src; in bnxt_tc_parse_flow()
451 flow->l4_mask.ports.sport = match.mask->src; in bnxt_tc_parse_flow()
458 flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP; in bnxt_tc_parse_flow()
459 flow->l4_key.icmp.type = match.key->type; in bnxt_tc_parse_flow()
460 flow->l4_key.icmp.code = match.key->code; in bnxt_tc_parse_flow()
461 flow->l4_mask.icmp.type = match.mask->type; in bnxt_tc_parse_flow()
462 flow->l4_mask.icmp.code = match.mask->code; in bnxt_tc_parse_flow()
469 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS; in bnxt_tc_parse_flow()
470 flow->tun_key.u.ipv4.dst = match.key->dst; in bnxt_tc_parse_flow()
471 flow->tun_mask.u.ipv4.dst = match.mask->dst; in bnxt_tc_parse_flow()
472 flow->tun_key.u.ipv4.src = match.key->src; in bnxt_tc_parse_flow()
473 flow->tun_mask.u.ipv4.src = match.mask->src; in bnxt_tc_parse_flow()
483 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID; in bnxt_tc_parse_flow()
484 flow->tun_key.tun_id = key32_to_tunnel_id(match.key->keyid); in bnxt_tc_parse_flow()
485 flow->tun_mask.tun_id = key32_to_tunnel_id(match.mask->keyid); in bnxt_tc_parse_flow()
492 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS; in bnxt_tc_parse_flow()
493 flow->tun_key.tp_dst = match.key->dst; in bnxt_tc_parse_flow()
494 flow->tun_mask.tp_dst = match.mask->dst; in bnxt_tc_parse_flow()
495 flow->tun_key.tp_src = match.key->src; in bnxt_tc_parse_flow()
496 flow->tun_mask.tp_src = match.mask->src; in bnxt_tc_parse_flow()
499 return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action, in bnxt_tc_parse_flow()
585 static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, in bnxt_hwrm_cfa_flow_alloc() argument
590 struct bnxt_tc_actions *actions = &flow->actions; in bnxt_hwrm_cfa_flow_alloc()
591 struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; in bnxt_hwrm_cfa_flow_alloc()
592 struct bnxt_tc_l3_key *l3_key = &flow->l3_key; in bnxt_hwrm_cfa_flow_alloc()
602 req->src_fid = cpu_to_le16(flow->src_fid); in bnxt_hwrm_cfa_flow_alloc()
682 req->ethertype = flow->l2_key.ether_type; in bnxt_hwrm_cfa_flow_alloc()
683 req->ip_proto = flow->l4_key.ip_proto; in bnxt_hwrm_cfa_flow_alloc()
685 if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) { in bnxt_hwrm_cfa_flow_alloc()
686 memcpy(req->dmac, flow->l2_key.dmac, ETH_ALEN); in bnxt_hwrm_cfa_flow_alloc()
687 memcpy(req->smac, flow->l2_key.smac, ETH_ALEN); in bnxt_hwrm_cfa_flow_alloc()
690 if (flow->l2_key.num_vlans > 0) { in bnxt_hwrm_cfa_flow_alloc()
696 req->outer_vlan_tci = flow->l2_key.inner_vlan_tci; in bnxt_hwrm_cfa_flow_alloc()
701 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { in bnxt_hwrm_cfa_flow_alloc()
704 flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ? in bnxt_hwrm_cfa_flow_alloc()
708 if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) { in bnxt_hwrm_cfa_flow_alloc()
715 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) { in bnxt_hwrm_cfa_flow_alloc()
727 if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) { in bnxt_hwrm_cfa_flow_alloc()
728 req->l4_src_port = flow->l4_key.ports.sport; in bnxt_hwrm_cfa_flow_alloc()
729 req->l4_src_port_mask = flow->l4_mask.ports.sport; in bnxt_hwrm_cfa_flow_alloc()
730 req->l4_dst_port = flow->l4_key.ports.dport; in bnxt_hwrm_cfa_flow_alloc()
731 req->l4_dst_port_mask = flow->l4_mask.ports.dport; in bnxt_hwrm_cfa_flow_alloc()
732 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) { in bnxt_hwrm_cfa_flow_alloc()
734 req->l4_src_port = htons(flow->l4_key.icmp.type); in bnxt_hwrm_cfa_flow_alloc()
735 req->l4_src_port_mask = htons(flow->l4_mask.icmp.type); in bnxt_hwrm_cfa_flow_alloc()
736 req->l4_dst_port = htons(flow->l4_key.icmp.code); in bnxt_hwrm_cfa_flow_alloc()
737 req->l4_dst_port_mask = htons(flow->l4_mask.icmp.code); in bnxt_hwrm_cfa_flow_alloc()
790 struct bnxt_tc_flow *flow, in hwrm_cfa_decap_filter_alloc() argument
796 struct ip_tunnel_key *tun_key = &flow->tun_key; in hwrm_cfa_decap_filter_alloc()
811 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) { in hwrm_cfa_decap_filter_alloc()
817 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) { in hwrm_cfa_decap_filter_alloc()
829 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) { in hwrm_cfa_decap_filter_alloc()
839 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) { in hwrm_cfa_decap_filter_alloc()
1001 bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, in bnxt_tc_get_ref_flow_handle() argument
1011 &flow->l2_key); in bnxt_tc_get_ref_flow_handle()
1041 static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow) in bnxt_tc_can_offload() argument
1044 if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) && in bnxt_tc_can_offload()
1045 (flow->l4_key.ip_proto != IPPROTO_TCP && in bnxt_tc_can_offload()
1046 flow->l4_key.ip_proto != IPPROTO_UDP)) { in bnxt_tc_can_offload()
1048 flow->l4_key.ip_proto); in bnxt_tc_can_offload()
1053 if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) && in bnxt_tc_can_offload()
1054 !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) { in bnxt_tc_can_offload()
1058 if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) && in bnxt_tc_can_offload()
1059 !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) { in bnxt_tc_can_offload()
1065 if (bits_set(&flow->l2_key.inner_vlan_tci, in bnxt_tc_can_offload()
1066 sizeof(flow->l2_key.inner_vlan_tci)) && in bnxt_tc_can_offload()
1067 !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci, in bnxt_tc_can_offload()
1068 flow->l2_key.inner_vlan_tci)) { in bnxt_tc_can_offload()
1072 if (bits_set(&flow->l2_key.inner_vlan_tpid, in bnxt_tc_can_offload()
1073 sizeof(flow->l2_key.inner_vlan_tpid)) && in bnxt_tc_can_offload()
1074 !is_exactmatch(&flow->l2_mask.inner_vlan_tpid, in bnxt_tc_can_offload()
1075 sizeof(flow->l2_mask.inner_vlan_tpid))) { in bnxt_tc_can_offload()
1081 if (!is_exactmatch(&flow->l2_mask.ether_type, in bnxt_tc_can_offload()
1082 sizeof(flow->l2_mask.ether_type))) { in bnxt_tc_can_offload()
1150 struct bnxt_tc_flow *flow, in bnxt_tc_get_ref_decap_handle() argument
1231 struct flowi4 flow = { {0} }; in bnxt_tc_resolve_tunnel_hdrs() local
1237 flow.flowi4_proto = IPPROTO_UDP; in bnxt_tc_resolve_tunnel_hdrs()
1238 flow.fl4_dport = tun_key->tp_dst; in bnxt_tc_resolve_tunnel_hdrs()
1239 flow.daddr = tun_key->u.ipv4.dst; in bnxt_tc_resolve_tunnel_hdrs()
1241 rt = ip_route_output_key(dev_net(real_dst_dev), &flow); in bnxt_tc_resolve_tunnel_hdrs()
1243 netdev_info(bp->dev, "no route to %pI4b\n", &flow.daddr); in bnxt_tc_resolve_tunnel_hdrs()
1270 netdev_name(dst_dev), &flow.daddr, in bnxt_tc_resolve_tunnel_hdrs()
1276 nbr = dst_neigh_lookup(&rt->dst, &flow.daddr); in bnxt_tc_resolve_tunnel_hdrs()
1279 &flow.daddr); in bnxt_tc_resolve_tunnel_hdrs()
1284 tun_key->u.ipv4.src = flow.saddr; in bnxt_tc_resolve_tunnel_hdrs()
1300 static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, in bnxt_tc_get_decap_handle() argument
1304 struct ip_tunnel_key *decap_key = &flow->tun_key; in bnxt_tc_get_decap_handle()
1334 tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src; in bnxt_tc_get_decap_handle()
1335 tun_key.tp_dst = flow->tun_key.tp_dst; in bnxt_tc_get_decap_handle()
1348 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS; in bnxt_tc_get_decap_handle()
1355 rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node, in bnxt_tc_get_decap_handle()
1361 rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info, in bnxt_tc_get_decap_handle()
1398 static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, in bnxt_tc_get_encap_handle() argument
1402 struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key; in bnxt_tc_get_encap_handle()
1443 struct bnxt_tc_flow *flow, in bnxt_tc_put_tunnel_handle() argument
1446 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) in bnxt_tc_put_tunnel_handle()
1448 else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) in bnxt_tc_put_tunnel_handle()
1453 struct bnxt_tc_flow *flow, in bnxt_tc_get_tunnel_handle() argument
1457 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) in bnxt_tc_get_tunnel_handle()
1458 return bnxt_tc_get_decap_handle(bp, flow, flow_node, in bnxt_tc_get_tunnel_handle()
1460 else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) in bnxt_tc_get_tunnel_handle()
1461 return bnxt_tc_get_encap_handle(bp, flow, flow_node, in bnxt_tc_get_tunnel_handle()
1478 bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node); in __bnxt_tc_del_flow()
1495 static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow, in bnxt_tc_set_flow_dir() argument
1498 flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; in bnxt_tc_set_flow_dir()
1501 static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, in bnxt_tc_set_src_fid() argument
1504 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) in bnxt_tc_set_src_fid()
1505 flow->src_fid = bp->pf.fw_fid; in bnxt_tc_set_src_fid()
1507 flow->src_fid = src_fid; in bnxt_tc_set_src_fid()
1528 struct bnxt_tc_flow *flow; in bnxt_tc_add_flow() local
1540 flow = &new_node->flow; in bnxt_tc_add_flow()
1542 rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow); in bnxt_tc_add_flow()
1546 bnxt_tc_set_src_fid(bp, flow, src_fid); in bnxt_tc_add_flow()
1547 bnxt_tc_set_flow_dir(bp, flow, flow->src_fid); in bnxt_tc_add_flow()
1549 if (!bnxt_tc_can_offload(bp, flow)) { in bnxt_tc_add_flow()
1566 rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle); in bnxt_tc_add_flow()
1571 rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle); in bnxt_tc_add_flow()
1576 rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle, in bnxt_tc_add_flow()
1581 flow->lastused = jiffies; in bnxt_tc_add_flow()
1582 spin_lock_init(&flow->stats_lock); in bnxt_tc_add_flow()
1595 bnxt_tc_put_tunnel_handle(bp, flow, new_node); in bnxt_tc_add_flow()
1629 struct bnxt_tc_flow *flow; in bnxt_tc_get_flow_stats() local
1638 flow = &flow_node->flow; in bnxt_tc_get_flow_stats()
1639 curr_stats = &flow->stats; in bnxt_tc_get_flow_stats()
1640 prev_stats = &flow->prev_stats; in bnxt_tc_get_flow_stats()
1642 spin_lock(&flow->stats_lock); in bnxt_tc_get_flow_stats()
1646 lastused = flow->lastused; in bnxt_tc_get_flow_stats()
1647 spin_unlock(&flow->stats_lock); in bnxt_tc_get_flow_stats()
1668 if (flow_node->flow.l2_key.dir == BNXT_DIR_RX) in bnxt_fill_cfa_stats_req()
1769 struct bnxt_tc_flow *flow = &flow_node->flow; in bnxt_tc_flow_stats_batch_update() local
1771 spin_lock(&flow->stats_lock); in bnxt_tc_flow_stats_batch_update()
1772 bnxt_flow_stats_accum(tc_info, &flow->stats, in bnxt_tc_flow_stats_batch_update()
1774 if (flow->stats.packets != flow->prev_stats.packets) in bnxt_tc_flow_stats_batch_update()
1775 flow->lastused = jiffies; in bnxt_tc_flow_stats_batch_update()
1776 spin_unlock(&flow->stats_lock); in bnxt_tc_flow_stats_batch_update()