Lines Matching refs:crq

225 	entry->fmt = evt->crq.format;  in ibmvfc_trc_start()
262 entry->fmt = evt->crq.format; in ibmvfc_trc_end()
818 evt->crq.valid = 0x80; in ibmvfc_init_event_pool()
819 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); in ibmvfc_init_event_pool()
890 struct ibmvfc_queue *crq = &vhost->crq; in ibmvfc_release_crq_queue() local
904 ibmvfc_free_queue(vhost, crq); in ibmvfc_release_crq_queue()
933 spin_lock(vhost->crq.q_lock); in ibmvfc_reenable_crq_queue()
936 spin_unlock(vhost->crq.q_lock); in ibmvfc_reenable_crq_queue()
956 struct ibmvfc_queue *crq = &vhost->crq; in ibmvfc_reset_crq() local
968 spin_lock(vhost->crq.q_lock); in ibmvfc_reset_crq()
975 memset(crq->msgs.crq, 0, PAGE_SIZE); in ibmvfc_reset_crq()
976 crq->cur = 0; in ibmvfc_reset_crq()
980 crq->msg_token, PAGE_SIZE); in ibmvfc_reset_crq()
988 spin_unlock(vhost->crq.q_lock); in ibmvfc_reset_crq()
1118 spin_lock_irqsave(&vhost->crq.l_lock, flags); in ibmvfc_purge_requests()
1119 list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list) in ibmvfc_purge_requests()
1121 list_splice_init(&vhost->crq.sent, &vhost->purge); in ibmvfc_purge_requests()
1122 spin_unlock_irqrestore(&vhost->crq.l_lock, flags); in ibmvfc_purge_requests()
1555 evt->crq.format = format; in ibmvfc_init_event()
1677 __be64 *crq_as_u64 = (__be64 *) &evt->crq; in ibmvfc_send_event()
1683 if (evt->crq.format == IBMVFC_CMD_FORMAT) in ibmvfc_send_event()
1685 else if (evt->crq.format == IBMVFC_MAD_FORMAT) in ibmvfc_send_event()
1903 vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset); in ibmvfc_init_vfc_cmd()
1948 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_queuecommand()
2035 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_bsg_timeout()
2093 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_bsg_plogi()
2211 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_bsg_request()
2220 mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + in ibmvfc_bsg_request()
2298 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_reset_device()
2438 queues = &vhost->crq; in ibmvfc_wait_for_ops()
2618 spin_lock(&vhost->crq.l_lock); in ibmvfc_cancel_all_sq()
2619 list_for_each_entry(evt, &vhost->crq.sent, queue_list) { in ibmvfc_cancel_all_sq()
2625 spin_unlock(&vhost->crq.l_lock); in ibmvfc_cancel_all_sq()
2635 evt = ibmvfc_init_tmf(&vhost->crq, sdev, type); in ibmvfc_cancel_all_sq()
2708 if (evt->crq.format == IBMVFC_CMD_FORMAT && in ibmvfc_match_key()
2754 spin_lock(&vhost->crq.l_lock); in ibmvfc_abort_task_set()
2755 list_for_each_entry(evt, &vhost->crq.sent, queue_list) { in ibmvfc_abort_task_set()
2761 spin_unlock(&vhost->crq.l_lock); in ibmvfc_abort_task_set()
2771 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_abort_task_set()
3113 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, in ibmvfc_handle_async() argument
3116 const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event)); in ibmvfc_handle_async()
3120 " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id), in ibmvfc_handle_async()
3121 be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name), in ibmvfc_handle_async()
3122 ibmvfc_get_link_state(crq->link_state)); in ibmvfc_handle_async()
3124 switch (be64_to_cpu(crq->event)) { in ibmvfc_handle_async()
3126 switch (crq->link_state) { in ibmvfc_handle_async()
3165 if (!crq->scsi_id && !crq->wwpn && !crq->node_name) in ibmvfc_handle_async()
3167 if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id) in ibmvfc_handle_async()
3169 if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn) in ibmvfc_handle_async()
3171 if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name) in ibmvfc_handle_async()
3173 if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO) in ibmvfc_handle_async()
3175 if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) { in ibmvfc_handle_async()
3192 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event); in ibmvfc_handle_async()
3204 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, in ibmvfc_handle_crq() argument
3208 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); in ibmvfc_handle_crq()
3210 switch (crq->valid) { in ibmvfc_handle_crq()
3212 switch (crq->format) { in ibmvfc_handle_crq()
3227 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); in ibmvfc_handle_crq()
3234 if (crq->format == IBMVFC_PARTITION_MIGRATED) { in ibmvfc_handle_crq()
3241 } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) { in ibmvfc_handle_crq()
3242 dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format); in ibmvfc_handle_crq()
3247 dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format); in ibmvfc_handle_crq()
3253 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid); in ibmvfc_handle_crq()
3257 if (crq->format == IBMVFC_ASYNC_EVENT) in ibmvfc_handle_crq()
3264 if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) { in ibmvfc_handle_crq()
3266 crq->ioba); in ibmvfc_handle_crq()
3272 crq->ioba); in ibmvfc_handle_crq()
3641 struct ibmvfc_async_crq *crq; in ibmvfc_next_async_crq() local
3643 crq = &async_crq->msgs.async[async_crq->cur]; in ibmvfc_next_async_crq()
3644 if (crq->valid & 0x80) { in ibmvfc_next_async_crq()
3649 crq = NULL; in ibmvfc_next_async_crq()
3651 return crq; in ibmvfc_next_async_crq()
3663 struct ibmvfc_queue *queue = &vhost->crq; in ibmvfc_next_crq()
3664 struct ibmvfc_crq *crq; in ibmvfc_next_crq() local
3666 crq = &queue->msgs.crq[queue->cur]; in ibmvfc_next_crq()
3667 if (crq->valid & 0x80) { in ibmvfc_next_crq()
3672 crq = NULL; in ibmvfc_next_crq()
3674 return crq; in ibmvfc_next_crq()
3708 struct ibmvfc_crq *crq; in ibmvfc_tasklet() local
3716 spin_lock(vhost->crq.q_lock); in ibmvfc_tasklet()
3726 while ((crq = ibmvfc_next_crq(vhost)) != NULL) { in ibmvfc_tasklet()
3727 ibmvfc_handle_crq(crq, vhost, &evt_doneq); in ibmvfc_tasklet()
3728 crq->valid = 0; in ibmvfc_tasklet()
3738 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) { in ibmvfc_tasklet()
3740 ibmvfc_handle_crq(crq, vhost, &evt_doneq); in ibmvfc_tasklet()
3741 crq->valid = 0; in ibmvfc_tasklet()
3747 spin_unlock(vhost->crq.q_lock); in ibmvfc_tasklet()
3778 static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, in ibmvfc_handle_scrq() argument
3781 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); in ibmvfc_handle_scrq()
3783 switch (crq->valid) { in ibmvfc_handle_scrq()
3789 dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid); in ibmvfc_handle_scrq()
3799 crq->ioba); in ibmvfc_handle_scrq()
3805 crq->ioba); in ibmvfc_handle_scrq()
3816 struct ibmvfc_crq *crq; in ibmvfc_next_scrq() local
3818 crq = &scrq->msgs.scrq[scrq->cur].crq; in ibmvfc_next_scrq()
3819 if (crq->valid & 0x80) { in ibmvfc_next_scrq()
3824 crq = NULL; in ibmvfc_next_scrq()
3826 return crq; in ibmvfc_next_scrq()
3831 struct ibmvfc_crq *crq; in ibmvfc_drain_sub_crq() local
3839 while ((crq = ibmvfc_next_scrq(scrq)) != NULL) { in ibmvfc_drain_sub_crq()
3840 ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq); in ibmvfc_drain_sub_crq()
3841 crq->valid = 0; in ibmvfc_drain_sub_crq()
3846 if ((crq = ibmvfc_next_scrq(scrq)) != NULL) { in ibmvfc_drain_sub_crq()
3848 ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq); in ibmvfc_drain_sub_crq()
3849 crq->valid = 0; in ibmvfc_drain_sub_crq()
4026 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_tgt_send_prli()
4133 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_tgt_send_plogi()
4209 evt = ibmvfc_get_event(&vhost->crq); in __ibmvfc_tgt_get_implicit_logout_evt()
4375 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_tgt_move_login()
4475 mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + in ibmvfc_init_passthru()
4480 mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + in ibmvfc_init_passthru()
4484 mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + in ibmvfc_init_passthru()
4541 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_adisc_timeout()
4591 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_tgt_adisc()
4694 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_tgt_query_target()
4866 struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_discover_targets()
4940 struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_channel_setup()
5006 struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_channel_enquiry()
5127 struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_npiv_login()
5164 if (list_empty(&vhost->crq.sent) && in ibmvfc_npiv_logout_done()
5192 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_npiv_logout()
5648 fmt_size = sizeof(*queue->msgs.crq); in ibmvfc_alloc_queue()
5703 struct ibmvfc_queue *crq = &vhost->crq; in ibmvfc_init_crq() local
5706 if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT)) in ibmvfc_init_crq()
5710 crq->msg_token, PAGE_SIZE); in ibmvfc_init_crq()
5746 ibmvfc_free_queue(vhost, crq); in ibmvfc_init_crq()