Lines Matching refs:t

159 #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))  argument
196 struct smb_direct_transport *t; member
212 static int smb_direct_post_send_data(struct smb_direct_transport *t,
218 smb_trans_direct_transfort(struct ksmbd_transport *t) in smb_trans_direct_transfort() argument
220 return container_of(t, struct smb_direct_transport, transport); in smb_trans_direct_transfort()
237 smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t) in get_free_recvmsg() argument
241 spin_lock(&t->recvmsg_queue_lock); in get_free_recvmsg()
242 if (!list_empty(&t->recvmsg_queue)) { in get_free_recvmsg()
243 recvmsg = list_first_entry(&t->recvmsg_queue, in get_free_recvmsg()
248 spin_unlock(&t->recvmsg_queue_lock); in get_free_recvmsg()
252 static void put_recvmsg(struct smb_direct_transport *t, in put_recvmsg() argument
255 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, in put_recvmsg()
258 spin_lock(&t->recvmsg_queue_lock); in put_recvmsg()
259 list_add(&recvmsg->list, &t->recvmsg_queue); in put_recvmsg()
260 spin_unlock(&t->recvmsg_queue_lock); in put_recvmsg()
264 smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t) in get_empty_recvmsg() argument
268 spin_lock(&t->empty_recvmsg_queue_lock); in get_empty_recvmsg()
269 if (!list_empty(&t->empty_recvmsg_queue)) { in get_empty_recvmsg()
270 recvmsg = list_first_entry(&t->empty_recvmsg_queue, in get_empty_recvmsg()
274 spin_unlock(&t->empty_recvmsg_queue_lock); in get_empty_recvmsg()
278 static void put_empty_recvmsg(struct smb_direct_transport *t, in put_empty_recvmsg() argument
281 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, in put_empty_recvmsg()
284 spin_lock(&t->empty_recvmsg_queue_lock); in put_empty_recvmsg()
285 list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue); in put_empty_recvmsg()
286 spin_unlock(&t->empty_recvmsg_queue_lock); in put_empty_recvmsg()
289 static void enqueue_reassembly(struct smb_direct_transport *t, in enqueue_reassembly() argument
293 spin_lock(&t->reassembly_queue_lock); in enqueue_reassembly()
294 list_add_tail(&recvmsg->list, &t->reassembly_queue); in enqueue_reassembly()
295 t->reassembly_queue_length++; in enqueue_reassembly()
303 t->reassembly_data_length += data_length; in enqueue_reassembly()
304 spin_unlock(&t->reassembly_queue_lock); in enqueue_reassembly()
307 static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t) in get_first_reassembly() argument
309 if (!list_empty(&t->reassembly_queue)) in get_first_reassembly()
310 return list_first_entry(&t->reassembly_queue, in get_first_reassembly()
318 struct smb_direct_transport *t = in smb_direct_disconnect_rdma_work() local
322 if (t->status == SMB_DIRECT_CS_CONNECTED) { in smb_direct_disconnect_rdma_work()
323 t->status = SMB_DIRECT_CS_DISCONNECTING; in smb_direct_disconnect_rdma_work()
324 rdma_disconnect(t->cm_id); in smb_direct_disconnect_rdma_work()
329 smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t) in smb_direct_disconnect_rdma_connection() argument
331 if (t->status == SMB_DIRECT_CS_CONNECTED) in smb_direct_disconnect_rdma_connection()
332 queue_work(smb_direct_wq, &t->disconnect_work); in smb_direct_disconnect_rdma_connection()
337 struct smb_direct_transport *t = container_of(work, in smb_direct_send_immediate_work() local
340 if (t->status != SMB_DIRECT_CS_CONNECTED) in smb_direct_send_immediate_work()
343 smb_direct_post_send_data(t, NULL, NULL, 0, 0); in smb_direct_send_immediate_work()
348 struct smb_direct_transport *t; in alloc_transport() local
351 t = kzalloc(sizeof(*t), GFP_KERNEL); in alloc_transport()
352 if (!t) in alloc_transport()
355 t->cm_id = cm_id; in alloc_transport()
356 cm_id->context = t; in alloc_transport()
358 t->status = SMB_DIRECT_CS_NEW; in alloc_transport()
359 init_waitqueue_head(&t->wait_status); in alloc_transport()
361 spin_lock_init(&t->reassembly_queue_lock); in alloc_transport()
362 INIT_LIST_HEAD(&t->reassembly_queue); in alloc_transport()
363 t->reassembly_data_length = 0; in alloc_transport()
364 t->reassembly_queue_length = 0; in alloc_transport()
365 init_waitqueue_head(&t->wait_reassembly_queue); in alloc_transport()
366 init_waitqueue_head(&t->wait_send_credits); in alloc_transport()
367 init_waitqueue_head(&t->wait_rw_avail_ops); in alloc_transport()
369 spin_lock_init(&t->receive_credit_lock); in alloc_transport()
370 spin_lock_init(&t->recvmsg_queue_lock); in alloc_transport()
371 INIT_LIST_HEAD(&t->recvmsg_queue); in alloc_transport()
373 spin_lock_init(&t->empty_recvmsg_queue_lock); in alloc_transport()
374 INIT_LIST_HEAD(&t->empty_recvmsg_queue); in alloc_transport()
376 init_waitqueue_head(&t->wait_send_payload_pending); in alloc_transport()
377 atomic_set(&t->send_payload_pending, 0); in alloc_transport()
378 init_waitqueue_head(&t->wait_send_pending); in alloc_transport()
379 atomic_set(&t->send_pending, 0); in alloc_transport()
381 spin_lock_init(&t->lock_new_recv_credits); in alloc_transport()
383 INIT_DELAYED_WORK(&t->post_recv_credits_work, in alloc_transport()
385 INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work); in alloc_transport()
386 INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work); in alloc_transport()
391 conn->transport = KSMBD_TRANS(t); in alloc_transport()
392 KSMBD_TRANS(t)->conn = conn; in alloc_transport()
393 KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops; in alloc_transport()
394 return t; in alloc_transport()
396 kfree(t); in alloc_transport()
400 static void free_transport(struct smb_direct_transport *t) in free_transport() argument
404 wake_up_interruptible(&t->wait_send_credits); in free_transport()
407 wait_event(t->wait_send_payload_pending, in free_transport()
408 atomic_read(&t->send_payload_pending) == 0); in free_transport()
409 wait_event(t->wait_send_pending, in free_transport()
410 atomic_read(&t->send_pending) == 0); in free_transport()
412 cancel_work_sync(&t->disconnect_work); in free_transport()
413 cancel_delayed_work_sync(&t->post_recv_credits_work); in free_transport()
414 cancel_work_sync(&t->send_immediate_work); in free_transport()
416 if (t->qp) { in free_transport()
417 ib_drain_qp(t->qp); in free_transport()
418 ib_destroy_qp(t->qp); in free_transport()
423 spin_lock(&t->reassembly_queue_lock); in free_transport()
424 recvmsg = get_first_reassembly(t); in free_transport()
427 spin_unlock(&t->reassembly_queue_lock); in free_transport()
428 put_recvmsg(t, recvmsg); in free_transport()
430 spin_unlock(&t->reassembly_queue_lock); in free_transport()
433 t->reassembly_data_length = 0; in free_transport()
435 if (t->send_cq) in free_transport()
436 ib_free_cq(t->send_cq); in free_transport()
437 if (t->recv_cq) in free_transport()
438 ib_free_cq(t->recv_cq); in free_transport()
439 if (t->pd) in free_transport()
440 ib_dealloc_pd(t->pd); in free_transport()
441 if (t->cm_id) in free_transport()
442 rdma_destroy_id(t->cm_id); in free_transport()
444 smb_direct_destroy_pools(t); in free_transport()
445 ksmbd_conn_free(KSMBD_TRANS(t)->conn); in free_transport()
446 kfree(t); in free_transport()
450 *smb_direct_alloc_sendmsg(struct smb_direct_transport *t) in smb_direct_alloc_sendmsg() argument
454 msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL); in smb_direct_alloc_sendmsg()
457 msg->transport = t; in smb_direct_alloc_sendmsg()
463 static void smb_direct_free_sendmsg(struct smb_direct_transport *t, in smb_direct_free_sendmsg() argument
469 ib_dma_unmap_single(t->cm_id->device, in smb_direct_free_sendmsg()
473 ib_dma_unmap_page(t->cm_id->device, in smb_direct_free_sendmsg()
477 mempool_free(msg, t->sendmsg_mempool); in smb_direct_free_sendmsg()
527 struct smb_direct_transport *t; in recv_done() local
530 t = recvmsg->transport; in recv_done()
537 smb_direct_disconnect_rdma_connection(t); in recv_done()
539 put_empty_recvmsg(t, recvmsg); in recv_done()
553 put_empty_recvmsg(t, recvmsg); in recv_done()
556 t->negotiation_requested = true; in recv_done()
557 t->full_packet_received = true; in recv_done()
558 wake_up_interruptible(&t->wait_status); in recv_done()
568 put_empty_recvmsg(t, recvmsg); in recv_done()
576 put_empty_recvmsg(t, recvmsg); in recv_done()
580 if (t->full_packet_received) in recv_done()
584 t->full_packet_received = false; in recv_done()
586 t->full_packet_received = true; in recv_done()
588 enqueue_reassembly(t, recvmsg, (int)data_length); in recv_done()
589 wake_up_interruptible(&t->wait_reassembly_queue); in recv_done()
591 spin_lock(&t->receive_credit_lock); in recv_done()
592 receive_credits = --(t->recv_credits); in recv_done()
593 avail_recvmsg_count = t->count_avail_recvmsg; in recv_done()
594 spin_unlock(&t->receive_credit_lock); in recv_done()
596 put_empty_recvmsg(t, recvmsg); in recv_done()
598 spin_lock(&t->receive_credit_lock); in recv_done()
599 receive_credits = --(t->recv_credits); in recv_done()
600 avail_recvmsg_count = ++(t->count_avail_recvmsg); in recv_done()
601 spin_unlock(&t->receive_credit_lock); in recv_done()
604 t->recv_credit_target = in recv_done()
607 &t->send_credits); in recv_done()
611 queue_work(smb_direct_wq, &t->send_immediate_work); in recv_done()
613 if (atomic_read(&t->send_credits) > 0) in recv_done()
614 wake_up_interruptible(&t->wait_send_credits); in recv_done()
618 &t->post_recv_credits_work, 0); in recv_done()
626 static int smb_direct_post_recv(struct smb_direct_transport *t, in smb_direct_post_recv() argument
632 recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device, in smb_direct_post_recv()
633 recvmsg->packet, t->max_recv_size, in smb_direct_post_recv()
635 ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr); in smb_direct_post_recv()
638 recvmsg->sge.length = t->max_recv_size; in smb_direct_post_recv()
639 recvmsg->sge.lkey = t->pd->local_dma_lkey; in smb_direct_post_recv()
647 ret = ib_post_recv(t->qp, &wr, NULL); in smb_direct_post_recv()
650 ib_dma_unmap_single(t->cm_id->device, in smb_direct_post_recv()
653 smb_direct_disconnect_rdma_connection(t); in smb_direct_post_recv()
659 static int smb_direct_read(struct ksmbd_transport *t, char *buf, in smb_direct_read() argument
667 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_read()
789 struct smb_direct_transport *t = container_of(work, in smb_direct_post_recv_credits() local
796 spin_lock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
797 receive_credits = t->recv_credits; in smb_direct_post_recv_credits()
798 spin_unlock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
800 if (receive_credits < t->recv_credit_target) { in smb_direct_post_recv_credits()
803 recvmsg = get_free_recvmsg(t); in smb_direct_post_recv_credits()
805 recvmsg = get_empty_recvmsg(t); in smb_direct_post_recv_credits()
818 ret = smb_direct_post_recv(t, recvmsg); in smb_direct_post_recv_credits()
821 put_recvmsg(t, recvmsg); in smb_direct_post_recv_credits()
828 spin_lock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
829 t->recv_credits += credits; in smb_direct_post_recv_credits()
830 t->count_avail_recvmsg -= credits; in smb_direct_post_recv_credits()
831 spin_unlock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
833 spin_lock(&t->lock_new_recv_credits); in smb_direct_post_recv_credits()
834 t->new_recv_credits += credits; in smb_direct_post_recv_credits()
835 spin_unlock(&t->lock_new_recv_credits); in smb_direct_post_recv_credits()
838 queue_work(smb_direct_wq, &t->send_immediate_work); in smb_direct_post_recv_credits()
844 struct smb_direct_transport *t; in send_done() local
848 t = sendmsg->transport; in send_done()
858 smb_direct_disconnect_rdma_connection(t); in send_done()
862 if (atomic_dec_and_test(&t->send_payload_pending)) in send_done()
863 wake_up(&t->wait_send_payload_pending); in send_done()
865 if (atomic_dec_and_test(&t->send_pending)) in send_done()
866 wake_up(&t->wait_send_pending); in send_done()
875 smb_direct_free_sendmsg(t, sibling); in send_done()
879 smb_direct_free_sendmsg(t, sibling); in send_done()
882 static int manage_credits_prior_sending(struct smb_direct_transport *t) in manage_credits_prior_sending() argument
886 spin_lock(&t->lock_new_recv_credits); in manage_credits_prior_sending()
887 new_credits = t->new_recv_credits; in manage_credits_prior_sending()
888 t->new_recv_credits = 0; in manage_credits_prior_sending()
889 spin_unlock(&t->lock_new_recv_credits); in manage_credits_prior_sending()
894 static int smb_direct_post_send(struct smb_direct_transport *t, in smb_direct_post_send() argument
900 atomic_inc(&t->send_payload_pending); in smb_direct_post_send()
902 atomic_inc(&t->send_pending); in smb_direct_post_send()
904 ret = ib_post_send(t->qp, wr, NULL); in smb_direct_post_send()
908 if (atomic_dec_and_test(&t->send_payload_pending)) in smb_direct_post_send()
909 wake_up(&t->wait_send_payload_pending); in smb_direct_post_send()
911 if (atomic_dec_and_test(&t->send_pending)) in smb_direct_post_send()
912 wake_up(&t->wait_send_pending); in smb_direct_post_send()
914 smb_direct_disconnect_rdma_connection(t); in smb_direct_post_send()
919 static void smb_direct_send_ctx_init(struct smb_direct_transport *t, in smb_direct_send_ctx_init() argument
930 static int smb_direct_flush_send_list(struct smb_direct_transport *t, in smb_direct_flush_send_list() argument
954 ret = smb_direct_post_send(t, &first->wr); in smb_direct_flush_send_list()
956 smb_direct_send_ctx_init(t, send_ctx, in smb_direct_flush_send_list()
960 atomic_add(send_ctx->wr_cnt, &t->send_credits); in smb_direct_flush_send_list()
961 wake_up(&t->wait_send_credits); in smb_direct_flush_send_list()
964 smb_direct_free_sendmsg(t, first); in smb_direct_flush_send_list()
970 static int wait_for_credits(struct smb_direct_transport *t, in wait_for_credits() argument
982 t->status != SMB_DIRECT_CS_CONNECTED); in wait_for_credits()
984 if (t->status != SMB_DIRECT_CS_CONNECTED) in wait_for_credits()
991 static int wait_for_send_credits(struct smb_direct_transport *t, in wait_for_send_credits() argument
997 (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) { in wait_for_send_credits()
998 ret = smb_direct_flush_send_list(t, send_ctx, false); in wait_for_send_credits()
1003 return wait_for_credits(t, &t->wait_send_credits, &t->send_credits); in wait_for_send_credits()
1006 static int smb_direct_create_header(struct smb_direct_transport *t, in smb_direct_create_header() argument
1015 sendmsg = smb_direct_alloc_sendmsg(t); in smb_direct_create_header()
1021 packet->credits_requested = cpu_to_le16(t->send_credit_target); in smb_direct_create_header()
1022 packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t)); in smb_direct_create_header()
1049 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device, in smb_direct_create_header()
1053 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr); in smb_direct_create_header()
1055 smb_direct_free_sendmsg(t, sendmsg); in smb_direct_create_header()
1061 sendmsg->sge[0].lkey = t->pd->local_dma_lkey; in smb_direct_create_header()
1111 static int post_sendmsg(struct smb_direct_transport *t, in post_sendmsg() argument
1118 ib_dma_sync_single_for_device(t->cm_id->device, in post_sendmsg()
1146 return smb_direct_post_send(t, &msg->wr); in post_sendmsg()
1149 static int smb_direct_post_send_data(struct smb_direct_transport *t, in smb_direct_post_send_data() argument
1159 ret = wait_for_send_credits(t, send_ctx); in smb_direct_post_send_data()
1167 ret = smb_direct_create_header(t, data_length, remaining_data_length, in smb_direct_post_send_data()
1170 atomic_inc(&t->send_credits); in smb_direct_post_send_data()
1179 sg_cnt = get_mapped_sg_list(t->cm_id->device, in smb_direct_post_send_data()
1190 ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt, in smb_direct_post_send_data()
1199 sge->lkey = t->pd->local_dma_lkey; in smb_direct_post_send_data()
1204 ret = post_sendmsg(t, send_ctx, msg); in smb_direct_post_send_data()
1209 smb_direct_free_sendmsg(t, msg); in smb_direct_post_send_data()
1210 atomic_inc(&t->send_credits); in smb_direct_post_send_data()
1214 static int smb_direct_writev(struct ksmbd_transport *t, in smb_direct_writev() argument
1218 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_writev()
1311 struct smb_direct_transport *t = msg->t; in read_write_done() local
1316 smb_direct_disconnect_rdma_connection(t); in read_write_done()
1319 if (atomic_inc_return(&t->rw_avail_ops) > 0) in read_write_done()
1320 wake_up(&t->wait_rw_avail_ops); in read_write_done()
1322 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port, in read_write_done()
1339 static int smb_direct_rdma_xmit(struct smb_direct_transport *t, void *buf, in smb_direct_rdma_xmit() argument
1348 ret = wait_for_credits(t, &t->wait_rw_avail_ops, &t->rw_avail_ops); in smb_direct_rdma_xmit()
1356 atomic_inc(&t->rw_avail_ops); in smb_direct_rdma_xmit()
1365 atomic_inc(&t->rw_avail_ops); in smb_direct_rdma_xmit()
1376 ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port, in smb_direct_rdma_xmit()
1385 msg->t = t; in smb_direct_rdma_xmit()
1388 first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port, in smb_direct_rdma_xmit()
1391 ret = ib_post_send(t->qp, first_wr, NULL); in smb_direct_rdma_xmit()
1401 atomic_inc(&t->rw_avail_ops); in smb_direct_rdma_xmit()
1403 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port, in smb_direct_rdma_xmit()
1411 static int smb_direct_rdma_write(struct ksmbd_transport *t, void *buf, in smb_direct_rdma_write() argument
1415 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen, in smb_direct_rdma_write()
1420 static int smb_direct_rdma_read(struct ksmbd_transport *t, void *buf, in smb_direct_rdma_read() argument
1424 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen, in smb_direct_rdma_read()
1429 static void smb_direct_disconnect(struct ksmbd_transport *t) in smb_direct_disconnect() argument
1431 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_disconnect()
1444 struct smb_direct_transport *t = cm_id->context; in smb_direct_cm_handler() local
1451 t->status = SMB_DIRECT_CS_CONNECTED; in smb_direct_cm_handler()
1452 wake_up_interruptible(&t->wait_status); in smb_direct_cm_handler()
1457 t->status = SMB_DIRECT_CS_DISCONNECTED; in smb_direct_cm_handler()
1458 wake_up_interruptible(&t->wait_status); in smb_direct_cm_handler()
1459 wake_up_interruptible(&t->wait_reassembly_queue); in smb_direct_cm_handler()
1460 wake_up(&t->wait_send_credits); in smb_direct_cm_handler()
1464 t->status = SMB_DIRECT_CS_DISCONNECTED; in smb_direct_cm_handler()
1465 wake_up_interruptible(&t->wait_status); in smb_direct_cm_handler()
1479 struct smb_direct_transport *t = context; in smb_direct_qpair_handler() local
1482 t->cm_id, ib_event_msg(event->event), event->event); in smb_direct_qpair_handler()
1487 smb_direct_disconnect_rdma_connection(t); in smb_direct_qpair_handler()
1494 static int smb_direct_send_negotiate_response(struct smb_direct_transport *t, in smb_direct_send_negotiate_response() argument
1501 sendmsg = smb_direct_alloc_sendmsg(t); in smb_direct_send_negotiate_response()
1518 cpu_to_le16(t->send_credit_target); in smb_direct_send_negotiate_response()
1519 resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t)); in smb_direct_send_negotiate_response()
1520 resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size); in smb_direct_send_negotiate_response()
1521 resp->preferred_send_size = cpu_to_le32(t->max_send_size); in smb_direct_send_negotiate_response()
1522 resp->max_receive_size = cpu_to_le32(t->max_recv_size); in smb_direct_send_negotiate_response()
1524 cpu_to_le32(t->max_fragmented_recv_size); in smb_direct_send_negotiate_response()
1527 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device, in smb_direct_send_negotiate_response()
1530 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr); in smb_direct_send_negotiate_response()
1532 smb_direct_free_sendmsg(t, sendmsg); in smb_direct_send_negotiate_response()
1538 sendmsg->sge[0].lkey = t->pd->local_dma_lkey; in smb_direct_send_negotiate_response()
1540 ret = post_sendmsg(t, NULL, sendmsg); in smb_direct_send_negotiate_response()
1542 smb_direct_free_sendmsg(t, sendmsg); in smb_direct_send_negotiate_response()
1546 wait_event(t->wait_send_pending, in smb_direct_send_negotiate_response()
1547 atomic_read(&t->send_pending) == 0); in smb_direct_send_negotiate_response()
1551 static int smb_direct_accept_client(struct smb_direct_transport *t) in smb_direct_accept_client() argument
1559 conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom, in smb_direct_accept_client()
1563 t->cm_id->device->ops.get_port_immutable(t->cm_id->device, in smb_direct_accept_client()
1564 t->cm_id->port_num, in smb_direct_accept_client()
1579 ret = rdma_accept(t->cm_id, &conn_param); in smb_direct_accept_client()
1585 wait_event_interruptible(t->wait_status, in smb_direct_accept_client()
1586 t->status != SMB_DIRECT_CS_NEW); in smb_direct_accept_client()
1587 if (t->status != SMB_DIRECT_CS_CONNECTED) in smb_direct_accept_client()
1592 static int smb_direct_negotiate(struct smb_direct_transport *t) in smb_direct_negotiate() argument
1598 recvmsg = get_free_recvmsg(t); in smb_direct_negotiate()
1603 ret = smb_direct_post_recv(t, recvmsg); in smb_direct_negotiate()
1609 t->negotiation_requested = false; in smb_direct_negotiate()
1610 ret = smb_direct_accept_client(t); in smb_direct_negotiate()
1616 smb_direct_post_recv_credits(&t->post_recv_credits_work.work); in smb_direct_negotiate()
1619 ret = wait_event_interruptible_timeout(t->wait_status, in smb_direct_negotiate()
1620 t->negotiation_requested || in smb_direct_negotiate()
1621 t->status == SMB_DIRECT_CS_DISCONNECTED, in smb_direct_negotiate()
1623 if (ret <= 0 || t->status == SMB_DIRECT_CS_DISCONNECTED) { in smb_direct_negotiate()
1633 t->max_recv_size = min_t(int, t->max_recv_size, in smb_direct_negotiate()
1635 t->max_send_size = min_t(int, t->max_send_size, in smb_direct_negotiate()
1637 t->max_fragmented_send_size = in smb_direct_negotiate()
1640 ret = smb_direct_send_negotiate_response(t, ret); in smb_direct_negotiate()
1643 put_recvmsg(t, recvmsg); in smb_direct_negotiate()
1647 static int smb_direct_init_params(struct smb_direct_transport *t, in smb_direct_init_params() argument
1650 struct ib_device *device = t->cm_id->device; in smb_direct_init_params()
1656 t->max_send_size = smb_direct_max_send_size; in smb_direct_init_params()
1657 max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 2; in smb_direct_init_params()
1659 pr_err("max_send_size %d is too large\n", t->max_send_size); in smb_direct_init_params()
1669 t->max_rdma_rw_size = smb_direct_max_read_write_size; in smb_direct_init_params()
1670 max_pages = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1; in smb_direct_init_params()
1672 max_rw_wrs += rdma_rw_mr_factor(device, t->cm_id->port_num, in smb_direct_init_params()
1707 t->recv_credits = 0; in smb_direct_init_params()
1708 t->count_avail_recvmsg = 0; in smb_direct_init_params()
1710 t->recv_credit_max = smb_direct_receive_credit_max; in smb_direct_init_params()
1711 t->recv_credit_target = 10; in smb_direct_init_params()
1712 t->new_recv_credits = 0; in smb_direct_init_params()
1714 t->send_credit_target = smb_direct_send_credit_target; in smb_direct_init_params()
1715 atomic_set(&t->send_credits, 0); in smb_direct_init_params()
1716 atomic_set(&t->rw_avail_ops, smb_direct_max_outstanding_rw_ops); in smb_direct_init_params()
1718 t->max_send_size = smb_direct_max_send_size; in smb_direct_init_params()
1719 t->max_recv_size = smb_direct_max_receive_size; in smb_direct_init_params()
1720 t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size; in smb_direct_init_params()
1723 cap->max_recv_wr = t->recv_credit_max; in smb_direct_init_params()
1731 static void smb_direct_destroy_pools(struct smb_direct_transport *t) in smb_direct_destroy_pools() argument
1735 while ((recvmsg = get_free_recvmsg(t))) in smb_direct_destroy_pools()
1736 mempool_free(recvmsg, t->recvmsg_mempool); in smb_direct_destroy_pools()
1737 while ((recvmsg = get_empty_recvmsg(t))) in smb_direct_destroy_pools()
1738 mempool_free(recvmsg, t->recvmsg_mempool); in smb_direct_destroy_pools()
1740 mempool_destroy(t->recvmsg_mempool); in smb_direct_destroy_pools()
1741 t->recvmsg_mempool = NULL; in smb_direct_destroy_pools()
1743 kmem_cache_destroy(t->recvmsg_cache); in smb_direct_destroy_pools()
1744 t->recvmsg_cache = NULL; in smb_direct_destroy_pools()
1746 mempool_destroy(t->sendmsg_mempool); in smb_direct_destroy_pools()
1747 t->sendmsg_mempool = NULL; in smb_direct_destroy_pools()
1749 kmem_cache_destroy(t->sendmsg_cache); in smb_direct_destroy_pools()
1750 t->sendmsg_cache = NULL; in smb_direct_destroy_pools()
1753 static int smb_direct_create_pools(struct smb_direct_transport *t) in smb_direct_create_pools() argument
1759 snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t); in smb_direct_create_pools()
1760 t->sendmsg_cache = kmem_cache_create(name, in smb_direct_create_pools()
1764 if (!t->sendmsg_cache) in smb_direct_create_pools()
1767 t->sendmsg_mempool = mempool_create(t->send_credit_target, in smb_direct_create_pools()
1769 t->sendmsg_cache); in smb_direct_create_pools()
1770 if (!t->sendmsg_mempool) in smb_direct_create_pools()
1773 snprintf(name, sizeof(name), "smb_direct_resp_%p", t); in smb_direct_create_pools()
1774 t->recvmsg_cache = kmem_cache_create(name, in smb_direct_create_pools()
1776 t->max_recv_size, in smb_direct_create_pools()
1778 if (!t->recvmsg_cache) in smb_direct_create_pools()
1781 t->recvmsg_mempool = in smb_direct_create_pools()
1782 mempool_create(t->recv_credit_max, mempool_alloc_slab, in smb_direct_create_pools()
1783 mempool_free_slab, t->recvmsg_cache); in smb_direct_create_pools()
1784 if (!t->recvmsg_mempool) in smb_direct_create_pools()
1787 INIT_LIST_HEAD(&t->recvmsg_queue); in smb_direct_create_pools()
1789 for (i = 0; i < t->recv_credit_max; i++) { in smb_direct_create_pools()
1790 recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL); in smb_direct_create_pools()
1793 recvmsg->transport = t; in smb_direct_create_pools()
1794 list_add(&recvmsg->list, &t->recvmsg_queue); in smb_direct_create_pools()
1796 t->count_avail_recvmsg = t->recv_credit_max; in smb_direct_create_pools()
1800 smb_direct_destroy_pools(t); in smb_direct_create_pools()
1804 static int smb_direct_create_qpair(struct smb_direct_transport *t, in smb_direct_create_qpair() argument
1810 t->pd = ib_alloc_pd(t->cm_id->device, 0); in smb_direct_create_qpair()
1811 if (IS_ERR(t->pd)) { in smb_direct_create_qpair()
1813 ret = PTR_ERR(t->pd); in smb_direct_create_qpair()
1814 t->pd = NULL; in smb_direct_create_qpair()
1818 t->send_cq = ib_alloc_cq(t->cm_id->device, t, in smb_direct_create_qpair()
1819 t->send_credit_target, 0, IB_POLL_WORKQUEUE); in smb_direct_create_qpair()
1820 if (IS_ERR(t->send_cq)) { in smb_direct_create_qpair()
1822 ret = PTR_ERR(t->send_cq); in smb_direct_create_qpair()
1823 t->send_cq = NULL; in smb_direct_create_qpair()
1827 t->recv_cq = ib_alloc_cq(t->cm_id->device, t, in smb_direct_create_qpair()
1830 if (IS_ERR(t->recv_cq)) { in smb_direct_create_qpair()
1832 ret = PTR_ERR(t->recv_cq); in smb_direct_create_qpair()
1833 t->recv_cq = NULL; in smb_direct_create_qpair()
1839 qp_attr.qp_context = t; in smb_direct_create_qpair()
1843 qp_attr.send_cq = t->send_cq; in smb_direct_create_qpair()
1844 qp_attr.recv_cq = t->recv_cq; in smb_direct_create_qpair()
1847 ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr); in smb_direct_create_qpair()
1853 t->qp = t->cm_id->qp; in smb_direct_create_qpair()
1854 t->cm_id->event_handler = smb_direct_cm_handler; in smb_direct_create_qpair()
1858 if (t->qp) { in smb_direct_create_qpair()
1859 ib_destroy_qp(t->qp); in smb_direct_create_qpair()
1860 t->qp = NULL; in smb_direct_create_qpair()
1862 if (t->recv_cq) { in smb_direct_create_qpair()
1863 ib_destroy_cq(t->recv_cq); in smb_direct_create_qpair()
1864 t->recv_cq = NULL; in smb_direct_create_qpair()
1866 if (t->send_cq) { in smb_direct_create_qpair()
1867 ib_destroy_cq(t->send_cq); in smb_direct_create_qpair()
1868 t->send_cq = NULL; in smb_direct_create_qpair()
1870 if (t->pd) { in smb_direct_create_qpair()
1871 ib_dealloc_pd(t->pd); in smb_direct_create_qpair()
1872 t->pd = NULL; in smb_direct_create_qpair()
1877 static int smb_direct_prepare(struct ksmbd_transport *t) in smb_direct_prepare() argument
1879 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_prepare()
1922 struct smb_direct_transport *t; in smb_direct_handle_connect_request() local
1931 t = alloc_transport(new_cm_id); in smb_direct_handle_connect_request()
1932 if (!t) in smb_direct_handle_connect_request()
1935 KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop, in smb_direct_handle_connect_request()
1936 KSMBD_TRANS(t)->conn, "ksmbd:r%u", in smb_direct_handle_connect_request()
1938 if (IS_ERR(KSMBD_TRANS(t)->handler)) { in smb_direct_handle_connect_request()
1939 int ret = PTR_ERR(KSMBD_TRANS(t)->handler); in smb_direct_handle_connect_request()
1942 free_transport(t); in smb_direct_handle_connect_request()