Lines Matching refs:flow
134 struct tid_rdma_flow *flow,
881 static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow, in tid_rdma_find_phys_blocks_4k() argument
898 trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr); in tid_rdma_find_phys_blocks_4k()
901 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0, in tid_rdma_find_phys_blocks_4k()
935 trace_hfi1_tid_pageset(flow->req->qp, setcount, in tid_rdma_find_phys_blocks_4k()
1020 static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow, in tid_rdma_find_phys_blocks_8k() argument
1034 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0); in tid_rdma_find_phys_blocks_8k()
1037 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1); in tid_rdma_find_phys_blocks_8k()
1087 static u32 kern_find_pages(struct tid_rdma_flow *flow, in kern_find_pages() argument
1091 struct tid_rdma_request *req = flow->req; in kern_find_pages()
1093 u32 length = flow->req->seg_len; in kern_find_pages()
1117 flow->length = flow->req->seg_len - length; in kern_find_pages()
1122 static void dma_unmap_flow(struct tid_rdma_flow *flow) in dma_unmap_flow() argument
1128 dd = flow->req->rcd->dd; in dma_unmap_flow()
1129 for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets; in dma_unmap_flow()
1141 static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages) in dma_map_flow() argument
1144 struct hfi1_devdata *dd = flow->req->rcd->dd; in dma_map_flow()
1147 for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets; in dma_map_flow()
1157 dma_unmap_flow(flow); in dma_map_flow()
1166 static inline bool dma_mapped(struct tid_rdma_flow *flow) in dma_mapped() argument
1168 return !!flow->pagesets[0].mapped; in dma_mapped()
1175 static int kern_get_phys_blocks(struct tid_rdma_flow *flow, in kern_get_phys_blocks() argument
1182 if (flow->npagesets) { in kern_get_phys_blocks()
1183 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, in kern_get_phys_blocks()
1184 flow); in kern_get_phys_blocks()
1185 if (!dma_mapped(flow)) in kern_get_phys_blocks()
1186 return dma_map_flow(flow, pages); in kern_get_phys_blocks()
1190 npages = kern_find_pages(flow, pages, ss, last); in kern_get_phys_blocks()
1192 if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096)) in kern_get_phys_blocks()
1193 flow->npagesets = in kern_get_phys_blocks()
1194 tid_rdma_find_phys_blocks_4k(flow, pages, npages, in kern_get_phys_blocks()
1195 flow->pagesets); in kern_get_phys_blocks()
1197 flow->npagesets = in kern_get_phys_blocks()
1198 tid_rdma_find_phys_blocks_8k(flow, pages, npages, in kern_get_phys_blocks()
1199 flow->pagesets); in kern_get_phys_blocks()
1201 return dma_map_flow(flow, pages); in kern_get_phys_blocks()
1204 static inline void kern_add_tid_node(struct tid_rdma_flow *flow, in kern_add_tid_node() argument
1208 struct kern_tid_node *node = &flow->tnode[flow->tnode_cnt++]; in kern_add_tid_node()
1210 WARN_ON_ONCE(flow->tnode_cnt >= in kern_add_tid_node()
1220 trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1, in kern_add_tid_node()
1237 static int kern_alloc_tids(struct tid_rdma_flow *flow) in kern_alloc_tids() argument
1239 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_alloc_tids()
1245 flow->tnode_cnt = 0; in kern_alloc_tids()
1246 ngroups = flow->npagesets / dd->rcv_entries.group_size; in kern_alloc_tids()
1252 kern_add_tid_node(flow, rcd, "complete groups", group, in kern_alloc_tids()
1260 if (pageidx >= flow->npagesets) in kern_alloc_tids()
1266 use = min_t(u32, flow->npagesets - pageidx, in kern_alloc_tids()
1268 kern_add_tid_node(flow, rcd, "used groups", used, use); in kern_alloc_tids()
1271 if (pageidx >= flow->npagesets) in kern_alloc_tids()
1287 use = min_t(u32, flow->npagesets - pageidx, group->size); in kern_alloc_tids()
1288 kern_add_tid_node(flow, rcd, "complete continue", group, use); in kern_alloc_tids()
1290 if (pageidx >= flow->npagesets) in kern_alloc_tids()
1293 trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ", in kern_alloc_tids()
1294 (u64)flow->npagesets); in kern_alloc_tids()
1300 static void kern_program_rcv_group(struct tid_rdma_flow *flow, int grp_num, in kern_program_rcv_group() argument
1303 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_program_rcv_group()
1305 struct kern_tid_node *node = &flow->tnode[grp_num]; in kern_program_rcv_group()
1308 u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT; in kern_program_rcv_group()
1319 pset = &flow->pagesets[(*pset_idx)++]; in kern_program_rcv_group()
1342 flow->tid_entry[flow->tidcnt++] = in kern_program_rcv_group()
1347 flow->req->qp, flow->tidcnt - 1, in kern_program_rcv_group()
1348 flow->tid_entry[flow->tidcnt - 1]); in kern_program_rcv_group()
1351 flow->npkts += (npages + pmtu_pg - 1) >> ilog2(pmtu_pg); in kern_program_rcv_group()
1368 static void kern_unprogram_rcv_group(struct tid_rdma_flow *flow, int grp_num) in kern_unprogram_rcv_group() argument
1370 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_unprogram_rcv_group()
1372 struct kern_tid_node *node = &flow->tnode[grp_num]; in kern_unprogram_rcv_group()
1399 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_unprogram_rcv_group()
1407 static void kern_program_rcvarray(struct tid_rdma_flow *flow) in kern_program_rcvarray() argument
1412 flow->npkts = 0; in kern_program_rcvarray()
1413 flow->tidcnt = 0; in kern_program_rcvarray()
1414 for (i = 0; i < flow->tnode_cnt; i++) in kern_program_rcvarray()
1415 kern_program_rcv_group(flow, i, &pset_idx); in kern_program_rcvarray()
1416 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow); in kern_program_rcvarray()
1465 struct tid_rdma_flow *flow = &req->flows[req->setup_head]; in hfi1_kern_exp_rcv_setup() local
1489 if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) { in hfi1_kern_exp_rcv_setup()
1490 hfi1_wait_kmem(flow->req->qp); in hfi1_kern_exp_rcv_setup()
1495 if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp)) in hfi1_kern_exp_rcv_setup()
1503 if (kern_alloc_tids(flow)) in hfi1_kern_exp_rcv_setup()
1509 kern_program_rcvarray(flow); in hfi1_kern_exp_rcv_setup()
1519 memset(&flow->flow_state, 0x0, sizeof(flow->flow_state)); in hfi1_kern_exp_rcv_setup()
1520 flow->idx = qpriv->flow_state.index; in hfi1_kern_exp_rcv_setup()
1521 flow->flow_state.generation = qpriv->flow_state.generation; in hfi1_kern_exp_rcv_setup()
1522 flow->flow_state.spsn = qpriv->flow_state.psn; in hfi1_kern_exp_rcv_setup()
1523 flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1; in hfi1_kern_exp_rcv_setup()
1524 flow->flow_state.r_next_psn = in hfi1_kern_exp_rcv_setup()
1525 full_flow_psn(flow, flow->flow_state.spsn); in hfi1_kern_exp_rcv_setup()
1526 qpriv->flow_state.psn += flow->npkts; in hfi1_kern_exp_rcv_setup()
1528 dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp); in hfi1_kern_exp_rcv_setup()
1537 queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp); in hfi1_kern_exp_rcv_setup()
1542 static void hfi1_tid_rdma_reset_flow(struct tid_rdma_flow *flow) in hfi1_tid_rdma_reset_flow() argument
1544 flow->npagesets = 0; in hfi1_tid_rdma_reset_flow()
1556 struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; in hfi1_kern_exp_rcv_clear() local
1569 for (i = 0; i < flow->tnode_cnt; i++) in hfi1_kern_exp_rcv_clear()
1570 kern_unprogram_rcv_group(flow, i); in hfi1_kern_exp_rcv_clear()
1572 flow->tnode_cnt = 0; in hfi1_kern_exp_rcv_clear()
1577 dma_unmap_flow(flow); in hfi1_kern_exp_rcv_clear()
1579 hfi1_tid_rdma_reset_flow(flow); in hfi1_kern_exp_rcv_clear()
1685 struct tid_rdma_flow *flow; in find_flow_ib() local
1691 flow = &req->flows[tail]; in find_flow_ib()
1692 if (cmp_psn(psn, flow->flow_state.ib_spsn) >= 0 && in find_flow_ib()
1693 cmp_psn(psn, flow->flow_state.ib_lpsn) <= 0) { in find_flow_ib()
1696 return flow; in find_flow_ib()
1708 struct tid_rdma_flow *flow = &req->flows[req->flow_idx]; in hfi1_build_tid_rdma_read_packet() local
1718 *bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt); in hfi1_build_tid_rdma_read_packet()
1719 trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow); in hfi1_build_tid_rdma_read_packet()
1722 req_addr = &flow->tid_entry[flow->tid_idx]; in hfi1_build_tid_rdma_read_packet()
1723 req_len = sizeof(*flow->tid_entry) * in hfi1_build_tid_rdma_read_packet()
1724 (flow->tidcnt - flow->tid_idx); in hfi1_build_tid_rdma_read_packet()
1749 req->cur_seg * req->seg_len + flow->sent); in hfi1_build_tid_rdma_read_packet()
1753 cpu_to_be32((flow->flow_state.generation << in hfi1_build_tid_rdma_read_packet()
1755 ((flow->flow_state.spsn + flow->pkt) & in hfi1_build_tid_rdma_read_packet()
1759 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) << in hfi1_build_tid_rdma_read_packet()
1769 flow->sent += *len; in hfi1_build_tid_rdma_read_packet()
1794 struct tid_rdma_flow *flow = NULL; in hfi1_build_tid_rdma_read_req() local
1864 flow = &req->flows[req->flow_idx]; in hfi1_build_tid_rdma_read_req()
1865 flow->pkt = 0; in hfi1_build_tid_rdma_read_req()
1866 flow->tid_idx = 0; in hfi1_build_tid_rdma_read_req()
1867 flow->sent = 0; in hfi1_build_tid_rdma_read_req()
1870 flow->flow_state.ib_spsn = req->s_next_psn; in hfi1_build_tid_rdma_read_req()
1871 flow->flow_state.ib_lpsn = in hfi1_build_tid_rdma_read_req()
1872 flow->flow_state.ib_spsn + flow->npkts - 1; in hfi1_build_tid_rdma_read_req()
1876 req->s_next_psn += flow->npkts; in hfi1_build_tid_rdma_read_req()
1897 struct tid_rdma_flow *flow; in tid_rdma_rcv_read_request() local
1903 flow = &req->flows[req->setup_head]; in tid_rdma_rcv_read_request()
1907 if (pktlen > sizeof(flow->tid_entry)) in tid_rdma_rcv_read_request()
1909 memcpy(flow->tid_entry, packet->ebuf, pktlen); in tid_rdma_rcv_read_request()
1910 flow->tidcnt = pktlen / sizeof(*flow->tid_entry); in tid_rdma_rcv_read_request()
1916 flow->npkts = rvt_div_round_up_mtu(qp, len); in tid_rdma_rcv_read_request()
1917 for (i = 0; i < flow->tidcnt; i++) { in tid_rdma_rcv_read_request()
1919 flow->tid_entry[i]); in tid_rdma_rcv_read_request()
1920 tlen = EXP_TID_GET(flow->tid_entry[i], LEN); in tid_rdma_rcv_read_request()
1937 flow->pkt = 0; in tid_rdma_rcv_read_request()
1938 flow->tid_idx = 0; in tid_rdma_rcv_read_request()
1939 flow->tid_offset = 0; in tid_rdma_rcv_read_request()
1940 flow->sent = 0; in tid_rdma_rcv_read_request()
1941 flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_qp); in tid_rdma_rcv_read_request()
1942 flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) & in tid_rdma_rcv_read_request()
1945 flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT; in tid_rdma_rcv_read_request()
1946 flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK; in tid_rdma_rcv_read_request()
1947 flow->length = len; in tid_rdma_rcv_read_request()
1949 flow->flow_state.lpsn = flow->flow_state.spsn + in tid_rdma_rcv_read_request()
1950 flow->npkts - 1; in tid_rdma_rcv_read_request()
1951 flow->flow_state.ib_spsn = psn; in tid_rdma_rcv_read_request()
1952 flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1; in tid_rdma_rcv_read_request()
1954 trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow); in tid_rdma_rcv_read_request()
1966 e->lpsn = psn + flow->npkts - 1; in tid_rdma_rcv_read_request()
2351 struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; in hfi1_build_tid_rdma_read_resp() local
2352 u32 tidentry = flow->tid_entry[flow->tid_idx]; in hfi1_build_tid_rdma_read_resp()
2360 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset); in hfi1_build_tid_rdma_read_resp()
2361 flow->sent += *len; in hfi1_build_tid_rdma_read_resp()
2362 next_offset = flow->tid_offset + *len; in hfi1_build_tid_rdma_read_resp()
2363 last_pkt = (flow->sent >= flow->length); in hfi1_build_tid_rdma_read_resp()
2365 trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry); in hfi1_build_tid_rdma_read_resp()
2366 trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow); in hfi1_build_tid_rdma_read_resp()
2380 KDETH_SET(resp->kdeth0, OFFSET, flow->tid_offset / om); in hfi1_build_tid_rdma_read_resp()
2386 resp->verbs_psn = cpu_to_be32(mask_psn(flow->flow_state.ib_spsn + in hfi1_build_tid_rdma_read_resp()
2387 flow->pkt)); in hfi1_build_tid_rdma_read_resp()
2390 *bth1 = flow->tid_qpn; in hfi1_build_tid_rdma_read_resp()
2391 *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) & in hfi1_build_tid_rdma_read_resp()
2393 (flow->flow_state.generation << in hfi1_build_tid_rdma_read_resp()
2402 flow->tid_offset = 0; in hfi1_build_tid_rdma_read_resp()
2403 flow->tid_idx++; in hfi1_build_tid_rdma_read_resp()
2405 flow->tid_offset = next_offset; in hfi1_build_tid_rdma_read_resp()
2456 struct tid_rdma_flow *flow; in hfi1_rc_rcv_tid_rdma_read_resp() local
2474 flow = &req->flows[req->clear_tail]; in hfi1_rc_rcv_tid_rdma_read_resp()
2476 if (cmp_psn(ipsn, flow->flow_state.ib_lpsn)) { in hfi1_rc_rcv_tid_rdma_read_resp()
2477 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn); in hfi1_rc_rcv_tid_rdma_read_resp()
2479 if (cmp_psn(kpsn, flow->flow_state.r_next_psn)) in hfi1_rc_rcv_tid_rdma_read_resp()
2481 flow->flow_state.r_next_psn = mask_psn(kpsn + 1); in hfi1_rc_rcv_tid_rdma_read_resp()
2512 flow->flow_state.r_next_psn = mask_psn(kpsn + 1); in hfi1_rc_rcv_tid_rdma_read_resp()
2531 trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow); in hfi1_rc_rcv_tid_rdma_read_resp()
2633 struct tid_rdma_flow *flow; in restart_tid_rdma_read_req() local
2638 flow = &req->flows[req->clear_tail]; in restart_tid_rdma_read_req()
2639 hfi1_restart_rc(qp, flow->flow_state.ib_spsn, 0); in restart_tid_rdma_read_req()
2665 struct tid_rdma_flow *flow; in handle_read_kdeth_eflags() local
2756 flow = &req->flows[req->clear_tail]; in handle_read_kdeth_eflags()
2759 flow); in handle_read_kdeth_eflags()
2762 flow->flow_state.r_next_psn); in handle_read_kdeth_eflags()
2785 fpsn = full_flow_psn(flow, in handle_read_kdeth_eflags()
2786 flow->flow_state.lpsn); in handle_read_kdeth_eflags()
2793 flow->flow_state.r_next_psn = in handle_read_kdeth_eflags()
2799 flow->idx); in handle_read_kdeth_eflags()
2800 flow->flow_state.r_next_psn = last_psn; in handle_read_kdeth_eflags()
2866 struct tid_rdma_flow *flow; in hfi1_handle_kdeth_eflags() local
2945 flow = &req->flows[req->clear_tail]; in hfi1_handle_kdeth_eflags()
2951 trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow); in hfi1_handle_kdeth_eflags()
2959 flow->flow_state.r_next_psn = in hfi1_handle_kdeth_eflags()
2961 flow->idx); in hfi1_handle_kdeth_eflags()
2963 flow->flow_state.r_next_psn; in hfi1_handle_kdeth_eflags()
2975 flow->flow_state.r_next_psn); in hfi1_handle_kdeth_eflags()
2987 if (psn == full_flow_psn(flow, in hfi1_handle_kdeth_eflags()
2988 flow->flow_state.lpsn)) in hfi1_handle_kdeth_eflags()
2990 flow->flow_state.r_next_psn = in hfi1_handle_kdeth_eflags()
2993 flow->flow_state.r_next_psn; in hfi1_handle_kdeth_eflags()
3034 qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn); in hfi1_handle_kdeth_eflags()
3050 struct tid_rdma_flow *flow; in hfi1_tid_rdma_restart_req() local
3058 flow = find_flow_ib(req, *bth2, &fidx); in hfi1_tid_rdma_restart_req()
3059 if (!flow) { in hfi1_tid_rdma_restart_req()
3070 flow = &req->flows[fidx]; in hfi1_tid_rdma_restart_req()
3075 delta_pkts = delta_psn(*bth2, flow->flow_state.ib_spsn); in hfi1_tid_rdma_restart_req()
3078 full_flow_psn(flow, in hfi1_tid_rdma_restart_req()
3079 flow->flow_state.spsn)); in hfi1_tid_rdma_restart_req()
3081 trace_hfi1_tid_flow_restart_req(qp, fidx, flow); in hfi1_tid_rdma_restart_req()
3082 diff = delta_pkts + flow->resync_npkts; in hfi1_tid_rdma_restart_req()
3084 flow->sent = 0; in hfi1_tid_rdma_restart_req()
3085 flow->pkt = 0; in hfi1_tid_rdma_restart_req()
3086 flow->tid_idx = 0; in hfi1_tid_rdma_restart_req()
3087 flow->tid_offset = 0; in hfi1_tid_rdma_restart_req()
3089 for (tididx = 0; tididx < flow->tidcnt; tididx++) { in hfi1_tid_rdma_restart_req()
3090 u32 tidentry = flow->tid_entry[tididx], tidlen, in hfi1_tid_rdma_restart_req()
3093 flow->tid_offset = 0; in hfi1_tid_rdma_restart_req()
3097 flow->pkt += npkts; in hfi1_tid_rdma_restart_req()
3098 flow->sent += (npkts == tidnpkts ? tidlen : in hfi1_tid_rdma_restart_req()
3100 flow->tid_offset += npkts * qp->pmtu; in hfi1_tid_rdma_restart_req()
3108 flow->sent, 0); in hfi1_tid_rdma_restart_req()
3116 flow->pkt -= flow->resync_npkts; in hfi1_tid_rdma_restart_req()
3119 if (flow->tid_offset == in hfi1_tid_rdma_restart_req()
3120 EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) { in hfi1_tid_rdma_restart_req()
3122 flow->tid_offset = 0; in hfi1_tid_rdma_restart_req()
3124 flow->tid_idx = tididx; in hfi1_tid_rdma_restart_req()
3131 trace_hfi1_tid_flow_restart_req(qp, fidx, flow); in hfi1_tid_rdma_restart_req()
3843 struct tid_rdma_flow *flow = NULL; in hfi1_build_tid_rdma_write_resp() local
3852 flow = &req->flows[req->flow_idx]; in hfi1_build_tid_rdma_write_resp()
3873 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow); in hfi1_build_tid_rdma_write_resp()
3880 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow); in hfi1_build_tid_rdma_write_resp()
3888 flow->flow_state.resp_ib_psn = bth2; in hfi1_build_tid_rdma_write_resp()
3889 resp_addr = (void *)flow->tid_entry; in hfi1_build_tid_rdma_write_resp()
3890 resp_len = sizeof(*flow->tid_entry) * flow->tidcnt; in hfi1_build_tid_rdma_write_resp()
3920 cpu_to_be32((flow->flow_state.generation << in hfi1_build_tid_rdma_write_resp()
3922 (flow->flow_state.spsn & in hfi1_build_tid_rdma_write_resp()
3926 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) << in hfi1_build_tid_rdma_write_resp()
4046 struct tid_rdma_flow *flow; in hfi1_rc_rcv_tid_rdma_write_resp() local
4106 flow = &req->flows[req->setup_head]; in hfi1_rc_rcv_tid_rdma_write_resp()
4107 flow->pkt = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4108 flow->tid_idx = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4109 flow->tid_offset = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4110 flow->sent = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4111 flow->resync_npkts = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4112 flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_qp); in hfi1_rc_rcv_tid_rdma_write_resp()
4113 flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) & in hfi1_rc_rcv_tid_rdma_write_resp()
4116 flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT; in hfi1_rc_rcv_tid_rdma_write_resp()
4117 flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK; in hfi1_rc_rcv_tid_rdma_write_resp()
4118 flow->flow_state.resp_ib_psn = psn; in hfi1_rc_rcv_tid_rdma_write_resp()
4119 flow->length = min_t(u32, req->seg_len, in hfi1_rc_rcv_tid_rdma_write_resp()
4122 flow->npkts = rvt_div_round_up_mtu(qp, flow->length); in hfi1_rc_rcv_tid_rdma_write_resp()
4123 flow->flow_state.lpsn = flow->flow_state.spsn + in hfi1_rc_rcv_tid_rdma_write_resp()
4124 flow->npkts - 1; in hfi1_rc_rcv_tid_rdma_write_resp()
4127 if (pktlen > sizeof(flow->tid_entry)) { in hfi1_rc_rcv_tid_rdma_write_resp()
4131 memcpy(flow->tid_entry, packet->ebuf, pktlen); in hfi1_rc_rcv_tid_rdma_write_resp()
4132 flow->tidcnt = pktlen / sizeof(*flow->tid_entry); in hfi1_rc_rcv_tid_rdma_write_resp()
4133 trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow); in hfi1_rc_rcv_tid_rdma_write_resp()
4141 for (i = 0; i < flow->tidcnt; i++) { in hfi1_rc_rcv_tid_rdma_write_resp()
4143 qp, i, flow->tid_entry[i]); in hfi1_rc_rcv_tid_rdma_write_resp()
4144 if (!EXP_TID_GET(flow->tid_entry[i], LEN)) { in hfi1_rc_rcv_tid_rdma_write_resp()
4148 tidlen += EXP_TID_GET(flow->tid_entry[i], LEN); in hfi1_rc_rcv_tid_rdma_write_resp()
4150 if (tidlen * PAGE_SIZE < flow->length) { in hfi1_rc_rcv_tid_rdma_write_resp()
4210 struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; in hfi1_build_tid_rdma_packet() local
4214 u32 tidentry = flow->tid_entry[flow->tid_idx]; in hfi1_build_tid_rdma_packet()
4225 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset); in hfi1_build_tid_rdma_packet()
4226 flow->sent += *len; in hfi1_build_tid_rdma_packet()
4227 next_offset = flow->tid_offset + *len; in hfi1_build_tid_rdma_packet()
4228 last_pkt = (flow->tid_idx == (flow->tidcnt - 1) && in hfi1_build_tid_rdma_packet()
4229 next_offset >= tidlen) || (flow->sent >= flow->length); in hfi1_build_tid_rdma_packet()
4230 trace_hfi1_tid_entry_build_write_data(qp, flow->tid_idx, tidentry); in hfi1_build_tid_rdma_packet()
4231 trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow); in hfi1_build_tid_rdma_packet()
4241 KDETH_SET(wd->kdeth0, OFFSET, flow->tid_offset / om); in hfi1_build_tid_rdma_packet()
4246 *bth1 = flow->tid_qpn; in hfi1_build_tid_rdma_packet()
4247 *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) & in hfi1_build_tid_rdma_packet()
4249 (flow->flow_state.generation << in hfi1_build_tid_rdma_packet()
4253 if (flow->flow_state.lpsn + 1 + in hfi1_build_tid_rdma_packet()
4261 flow->tid_offset = 0; in hfi1_build_tid_rdma_packet()
4262 flow->tid_idx++; in hfi1_build_tid_rdma_packet()
4264 flow->tid_offset = next_offset; in hfi1_build_tid_rdma_packet()
4277 struct tid_rdma_flow *flow; in hfi1_rc_rcv_tid_rdma_write_data() local
4295 flow = &req->flows[req->clear_tail]; in hfi1_rc_rcv_tid_rdma_write_data()
4296 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) { in hfi1_rc_rcv_tid_rdma_write_data()
4297 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn); in hfi1_rc_rcv_tid_rdma_write_data()
4299 if (cmp_psn(psn, flow->flow_state.r_next_psn)) in hfi1_rc_rcv_tid_rdma_write_data()
4302 flow->flow_state.r_next_psn = mask_psn(psn + 1); in hfi1_rc_rcv_tid_rdma_write_data()
4324 full_flow_psn(flow, flow->flow_state.spsn)) * in hfi1_rc_rcv_tid_rdma_write_data()
4346 flow->flow_state.r_next_psn = mask_psn(psn + 1); in hfi1_rc_rcv_tid_rdma_write_data()
4349 rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK; in hfi1_rc_rcv_tid_rdma_write_data()
4405 priv->r_next_psn_kdeth = flow->flow_state.r_next_psn; in hfi1_rc_rcv_tid_rdma_write_data()
4414 priv->s_nak_psn = flow->flow_state.r_next_psn; in hfi1_rc_rcv_tid_rdma_write_data()
4433 struct tid_rdma_flow *flow = &req->flows[iflow]; in hfi1_build_tid_rdma_write_ack() local
4454 *bth2 = full_flow_psn(flow, flow->flow_state.lpsn); in hfi1_build_tid_rdma_write_ack()
4460 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) << in hfi1_build_tid_rdma_write_ack()
4466 cpu_to_be32(flow->flow_state.resp_ib_psn); in hfi1_build_tid_rdma_write_ack()
4504 struct tid_rdma_flow *flow; in hfi1_rc_rcv_tid_rdma_ack() local
4545 flow = &req->flows[req->acked_tail]; in hfi1_rc_rcv_tid_rdma_ack()
4546 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); in hfi1_rc_rcv_tid_rdma_ack()
4549 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 || in hfi1_rc_rcv_tid_rdma_ack()
4550 cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0) in hfi1_rc_rcv_tid_rdma_ack()
4554 full_flow_psn(flow, flow->flow_state.lpsn)) >= 0 && in hfi1_rc_rcv_tid_rdma_ack()
4559 req->r_last_acked = flow->flow_state.resp_ib_psn; in hfi1_rc_rcv_tid_rdma_ack()
4575 flow = &req->flows[req->acked_tail]; in hfi1_rc_rcv_tid_rdma_ack()
4576 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); in hfi1_rc_rcv_tid_rdma_ack()
4637 flow = &req->flows[req->acked_tail]; in hfi1_rc_rcv_tid_rdma_ack()
4647 fpsn = full_flow_psn(flow, flow->flow_state.spsn); in hfi1_rc_rcv_tid_rdma_ack()
4655 if (flow->flow_state.generation != in hfi1_rc_rcv_tid_rdma_ack()
4658 flow->resync_npkts += in hfi1_rc_rcv_tid_rdma_ack()
4675 flow = &rptr->flows[fidx]; in hfi1_rc_rcv_tid_rdma_ack()
4676 gen = flow->flow_state.generation; in hfi1_rc_rcv_tid_rdma_ack()
4678 flow->flow_state.spsn != in hfi1_rc_rcv_tid_rdma_ack()
4681 lpsn = flow->flow_state.lpsn; in hfi1_rc_rcv_tid_rdma_ack()
4682 lpsn = full_flow_psn(flow, lpsn); in hfi1_rc_rcv_tid_rdma_ack()
4683 flow->npkts = in hfi1_rc_rcv_tid_rdma_ack()
4687 flow->flow_state.generation = in hfi1_rc_rcv_tid_rdma_ack()
4689 flow->flow_state.spsn = spsn; in hfi1_rc_rcv_tid_rdma_ack()
4690 flow->flow_state.lpsn = in hfi1_rc_rcv_tid_rdma_ack()
4691 flow->flow_state.spsn + in hfi1_rc_rcv_tid_rdma_ack()
4692 flow->npkts - 1; in hfi1_rc_rcv_tid_rdma_ack()
4693 flow->pkt = 0; in hfi1_rc_rcv_tid_rdma_ack()
4694 spsn += flow->npkts; in hfi1_rc_rcv_tid_rdma_ack()
4695 resync_psn += flow->npkts; in hfi1_rc_rcv_tid_rdma_ack()
4698 flow); in hfi1_rc_rcv_tid_rdma_ack()
4723 flow = &req->flows[req->acked_tail]; in hfi1_rc_rcv_tid_rdma_ack()
4724 flpsn = full_flow_psn(flow, flow->flow_state.lpsn); in hfi1_rc_rcv_tid_rdma_ack()
4728 flow); in hfi1_rc_rcv_tid_rdma_ack()
4850 struct tid_rdma_flow *flow = &req->flows[fidx]; in hfi1_build_tid_rdma_resync() local
4860 generation = kern_flow_generation_next(flow->flow_state.generation); in hfi1_build_tid_rdma_resync()
4878 struct tid_rdma_flow *flow; in hfi1_rc_rcv_tid_rdma_resync() local
4947 flow = &req->flows[flow_idx]; in hfi1_rc_rcv_tid_rdma_resync()
4948 lpsn = full_flow_psn(flow, in hfi1_rc_rcv_tid_rdma_resync()
4949 flow->flow_state.lpsn); in hfi1_rc_rcv_tid_rdma_resync()
4950 next = flow->flow_state.r_next_psn; in hfi1_rc_rcv_tid_rdma_resync()
4951 flow->npkts = delta_psn(lpsn, next - 1); in hfi1_rc_rcv_tid_rdma_resync()
4952 flow->flow_state.generation = fs->generation; in hfi1_rc_rcv_tid_rdma_resync()
4953 flow->flow_state.spsn = fs->psn; in hfi1_rc_rcv_tid_rdma_resync()
4954 flow->flow_state.lpsn = in hfi1_rc_rcv_tid_rdma_resync()
4955 flow->flow_state.spsn + flow->npkts - 1; in hfi1_rc_rcv_tid_rdma_resync()
4956 flow->flow_state.r_next_psn = in hfi1_rc_rcv_tid_rdma_resync()
4957 full_flow_psn(flow, in hfi1_rc_rcv_tid_rdma_resync()
4958 flow->flow_state.spsn); in hfi1_rc_rcv_tid_rdma_resync()
4959 fs->psn += flow->npkts; in hfi1_rc_rcv_tid_rdma_resync()
4961 flow); in hfi1_rc_rcv_tid_rdma_resync()
5197 u16 flow; in make_tid_rdma_ack() local
5261 flow = CIRC_PREV(req->acked_tail, MAX_FLOWS); in make_tid_rdma_ack()
5289 full_flow_psn(&req->flows[flow], in make_tid_rdma_ack()
5290 req->flows[flow].flow_state.lpsn)) > 0))) { in make_tid_rdma_ack()
5300 flow = req->acked_tail; in make_tid_rdma_ack()
5308 hwords += hfi1_build_tid_rdma_write_ack(qp, e, ohdr, flow, &bth1, in make_tid_rdma_ack()
5518 struct tid_rdma_flow *flow, in update_r_next_psn_fecn() argument
5529 flow->flow_state.r_next_psn = in update_r_next_psn_fecn()
5530 read_r_next_psn(dd, rcd->ctxt, flow->idx); in update_r_next_psn_fecn()