Lines Matching refs:fcport

24 	struct qedf_rport *fcport;  in qedf_cmd_timeout()  local
26 fcport = io_req->fcport; in qedf_cmd_timeout()
27 if (io_req->fcport == NULL) { in qedf_cmd_timeout()
32 qedf = fcport->qedf; in qedf_cmd_timeout()
63 qedf_restart_rport(fcport); in qedf_cmd_timeout()
298 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type) in qedf_alloc_cmd() argument
300 struct qedf_ctx *qedf = fcport->qedf; in qedf_alloc_cmd()
309 free_sqes = atomic_read(&fcport->free_sqes); in qedf_alloc_cmd()
319 if ((atomic_read(&fcport->num_active_ios) >= in qedf_alloc_cmd()
323 atomic_read(&fcport->num_active_ios)); in qedf_alloc_cmd()
362 atomic_inc(&fcport->num_active_ios); in qedf_alloc_cmd()
363 atomic_dec(&fcport->free_sqes); in qedf_alloc_cmd()
368 io_req->fcport = fcport; in qedf_alloc_cmd()
406 struct qedf_ctx *qedf = io_req->fcport->qedf; in qedf_free_mp_resc()
437 struct qedf_rport *fcport = io_req->fcport; in qedf_release_cmd() local
441 QEDF_WARN(&fcport->qedf->dbg_ctx, in qedf_release_cmd()
452 atomic_dec(&fcport->num_active_ios); in qedf_release_cmd()
454 if (atomic_read(&fcport->num_active_ios) < 0) { in qedf_release_cmd()
455 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n"); in qedf_release_cmd()
461 io_req->fcport = NULL; in qedf_release_cmd()
466 io_req->fcport = NULL; in qedf_release_cmd()
586 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, in qedf_init_task() argument
596 struct qedf_ctx *qedf = fcport->qedf; in qedf_init_task()
628 io_req->task_params->conn_cid = fcport->fw_cid; in qedf_init_task()
631 io_req->task_params->is_tape_device = fcport->dev_type; in qedf_init_task()
680 struct qedf_rport *fcport = io_req->fcport; in qedf_init_mp_task() local
681 struct qedf_ctx *qedf = io_req->fcport->qedf; in qedf_init_mp_task()
708 io_req->task_params->conn_cid = fcport->fw_cid; in qedf_init_mp_task()
712 io_req->task_params->is_tape_device = fcport->dev_type; in qedf_init_mp_task()
758 u16 qedf_get_sqe_idx(struct qedf_rport *fcport) in qedf_get_sqe_idx() argument
760 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe)); in qedf_get_sqe_idx()
763 rval = fcport->sq_prod_idx; in qedf_get_sqe_idx()
766 fcport->sq_prod_idx++; in qedf_get_sqe_idx()
767 fcport->fw_sq_prod_idx++; in qedf_get_sqe_idx()
768 if (fcport->sq_prod_idx == total_sqe) in qedf_get_sqe_idx()
769 fcport->sq_prod_idx = 0; in qedf_get_sqe_idx()
774 void qedf_ring_doorbell(struct qedf_rport *fcport) in qedf_ring_doorbell() argument
785 dbell.sq_prod = fcport->fw_sq_prod_idx; in qedf_ring_doorbell()
791 writel(*(u32 *)&dbell, fcport->p_doorbell); in qedf_ring_doorbell()
800 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req, in qedf_trace_io() argument
803 struct qedf_ctx *qedf = fcport->qedf; in qedf_trace_io()
814 io_log->port_id = fcport->rdata->ids.port_id; in qedf_trace_io()
847 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) in qedf_post_io_req() argument
888 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || in qedf_post_io_req()
889 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_post_io_req()
901 sqe_idx = qedf_get_sqe_idx(fcport); in qedf_post_io_req()
902 sqe = &fcport->sq[sqe_idx]; in qedf_post_io_req()
916 qedf_init_task(fcport, lport, io_req, task_ctx, sqe); in qedf_post_io_req()
919 qedf_ring_doorbell(fcport); in qedf_post_io_req()
925 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ); in qedf_post_io_req()
937 struct qedf_rport *fcport; in qedf_queuecommand() local
998 fcport = (struct qedf_rport *)&rp[1]; in qedf_queuecommand()
1000 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || in qedf_queuecommand()
1001 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_queuecommand()
1010 atomic_inc(&fcport->ios_to_queue); in qedf_queuecommand()
1012 if (fcport->retry_delay_timestamp) { in qedf_queuecommand()
1014 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_queuecommand()
1015 if (time_after(jiffies, fcport->retry_delay_timestamp)) { in qedf_queuecommand()
1016 fcport->retry_delay_timestamp = 0; in qedf_queuecommand()
1018 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_queuecommand()
1021 atomic_dec(&fcport->ios_to_queue); in qedf_queuecommand()
1024 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_queuecommand()
1027 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); in qedf_queuecommand()
1030 atomic_dec(&fcport->ios_to_queue); in qedf_queuecommand()
1037 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_queuecommand()
1038 if (qedf_post_io_req(fcport, io_req)) { in qedf_queuecommand()
1041 atomic_inc(&fcport->free_sqes); in qedf_queuecommand()
1044 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_queuecommand()
1045 atomic_dec(&fcport->ios_to_queue); in qedf_queuecommand()
1055 struct qedf_ctx *qedf = io_req->fcport->qedf; in qedf_parse_fcp_rsp()
1124 struct qedf_rport *fcport; in qedf_scsi_completion() local
1171 fcport = io_req->fcport; in qedf_scsi_completion()
1177 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || in qedf_scsi_completion()
1178 (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) && in qedf_scsi_completion()
1179 sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) { in qedf_scsi_completion()
1285 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_scsi_completion()
1286 fcport->retry_delay_timestamp = in qedf_scsi_completion()
1288 spin_unlock_irqrestore(&fcport->rport_lock, in qedf_scsi_completion()
1306 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP); in qedf_scsi_completion()
1409 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP); in qedf_scsi_done()
1434 struct qedf_rport *fcport = io_req->fcport; in qedf_process_warning_compl() local
1445 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, " in qedf_process_warning_compl()
1447 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), in qedf_process_warning_compl()
1451 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " in qedf_process_warning_compl()
1469 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { in qedf_process_warning_compl()
1510 if (io_req->fcport == NULL) { in qedf_process_error_detect()
1521 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, " in qedf_process_error_detect()
1523 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), in qedf_process_error_detect()
1527 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " in qedf_process_error_detect()
1534 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) || in qedf_process_error_detect()
1535 (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) && in qedf_process_error_detect()
1536 io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) { in qedf_process_error_detect()
1585 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun) in qedf_flush_active_ios() argument
1596 if (!fcport) { in qedf_flush_active_ios()
1602 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_flush_active_ios()
1607 qedf = fcport->qedf; in qedf_flush_active_ios()
1615 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && in qedf_flush_active_ios()
1617 while (atomic_read(&fcport->ios_to_queue)) { in qedf_flush_active_ios()
1620 atomic_read(&fcport->ios_to_queue)); in qedf_flush_active_ios()
1624 atomic_read(&fcport->ios_to_queue)); in qedf_flush_active_ios()
1635 atomic_read(&fcport->num_active_ios), fcport, in qedf_flush_active_ios()
1636 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id); in qedf_flush_active_ios()
1641 set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); in qedf_flush_active_ios()
1643 set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); in qedf_flush_active_ios()
1644 fcport->lun_reset_lun = lun; in qedf_flush_active_ios()
1652 if (!io_req->fcport) in qedf_flush_active_ios()
1670 if (io_req->fcport != fcport) in qedf_flush_active_ios()
1803 flush_cnt, atomic_read(&fcport->num_active_ios)); in qedf_flush_active_ios()
1805 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && in qedf_flush_active_ios()
1807 while (atomic_read(&fcport->num_active_ios)) { in qedf_flush_active_ios()
1811 atomic_read(&fcport->num_active_ios), in qedf_flush_active_ios()
1817 atomic_read(&fcport->num_active_ios)); in qedf_flush_active_ios()
1820 if (io_req->fcport && in qedf_flush_active_ios()
1821 io_req->fcport == fcport) { in qedf_flush_active_ios()
1843 clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); in qedf_flush_active_ios()
1844 clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); in qedf_flush_active_ios()
1856 struct qedf_rport *fcport = io_req->fcport; in qedf_initiate_abts() local
1867 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_initiate_abts()
1873 qedf = fcport->qedf; in qedf_initiate_abts()
1874 rdata = fcport->rdata; in qedf_initiate_abts()
1897 if (!atomic_read(&fcport->free_sqes)) { in qedf_initiate_abts()
1903 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_initiate_abts()
1937 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_initiate_abts()
1939 sqe_idx = qedf_get_sqe_idx(fcport); in qedf_initiate_abts()
1940 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_abts()
1945 qedf_ring_doorbell(fcport); in qedf_initiate_abts()
1947 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_initiate_abts()
1960 struct qedf_rport *fcport = io_req->fcport; in qedf_process_abts_compl() local
1971 if (!fcport) { in qedf_process_abts_compl()
1982 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || in qedf_process_abts_compl()
1983 test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) { in qedf_process_abts_compl()
2048 struct qedf_ctx *qedf = io_req->fcport->qedf; in qedf_init_mp_req()
2150 struct qedf_rport *fcport; in qedf_initiate_cleanup() local
2159 fcport = io_req->fcport; in qedf_initiate_cleanup()
2160 if (!fcport) { in qedf_initiate_cleanup()
2166 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_initiate_cleanup()
2171 qedf = fcport->qedf; in qedf_initiate_cleanup()
2192 if (!atomic_read(&fcport->free_sqes)) { in qedf_initiate_cleanup()
2212 refcount, fcport, fcport->rdata->ids.port_id); in qedf_initiate_cleanup()
2220 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_initiate_cleanup()
2222 sqe_idx = qedf_get_sqe_idx(fcport); in qedf_initiate_cleanup()
2223 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_cleanup()
2228 qedf_ring_doorbell(fcport); in qedf_initiate_cleanup()
2230 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_initiate_cleanup()
2285 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, in qedf_execute_tmf() argument
2290 struct qedf_ctx *qedf = fcport->qedf; in qedf_execute_tmf()
2306 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_execute_tmf()
2312 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD); in qedf_execute_tmf()
2326 io_req->fcport = fcport; in qedf_execute_tmf()
2351 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_execute_tmf()
2353 sqe_idx = qedf_get_sqe_idx(fcport); in qedf_execute_tmf()
2354 sqe = &fcport->sq[sqe_idx]; in qedf_execute_tmf()
2357 qedf_init_task(fcport, lport, io_req, task, sqe); in qedf_execute_tmf()
2358 qedf_ring_doorbell(fcport); in qedf_execute_tmf()
2360 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_execute_tmf()
2383 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_execute_tmf()
2393 qedf_flush_active_ios(fcport, lun); in qedf_execute_tmf()
2395 qedf_flush_active_ios(fcport, -1); in qedf_execute_tmf()
2412 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1]; in qedf_initiate_tmf() local
2419 struct fc_rport_priv *rdata = fcport->rdata; in qedf_initiate_tmf()
2454 if (!fcport) { in qedf_initiate_tmf()
2460 qedf = fcport->qedf; in qedf_initiate_tmf()
2468 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_initiate_tmf()
2486 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_initiate_tmf()
2487 if (!fcport->rdata) in qedf_initiate_tmf()
2489 fcport); in qedf_initiate_tmf()
2493 fcport, fcport->rdata->ids.port_id); in qedf_initiate_tmf()
2498 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags); in qedf_initiate_tmf()