Lines Matching refs:ha

80 	struct qla_hw_data *ha = vha->hw;  in qla2x00_get_async_timeout()  local
83 tmo = ha->r_a_tov / 10 * 2; in qla2x00_get_async_timeout()
84 if (IS_QLAFX00(ha)) { in qla2x00_get_async_timeout()
86 } else if (!IS_FWI2_CAPABLE(ha)) { in qla2x00_get_async_timeout()
91 tmo = ha->login_timeout; in qla2x00_get_async_timeout()
609 struct qla_hw_data *ha = vha->hw; in qla2x00_is_reserved_id() local
611 if (IS_FWI2_CAPABLE(ha)) in qla2x00_is_reserved_id()
614 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || in qla2x00_is_reserved_id()
632 struct qla_hw_data *ha = vha->hw; in qla2x00_find_new_loop_id() local
637 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_find_new_loop_id()
639 dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE); in qla2x00_find_new_loop_id()
645 set_bit(dev->loop_id, ha->loop_id_map); in qla2x00_find_new_loop_id()
647 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_find_new_loop_id()
663 struct qla_hw_data *ha = fcport->vha->hw; in qla2x00_clear_loop_id() local
669 clear_bit(fcport->loop_id, ha->loop_id_map); in qla2x00_clear_loop_id()
1170 struct qla_hw_data *ha = vha->hw; in qla24xx_async_gpdb_sp_done() local
1191 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, in qla24xx_async_gpdb_sp_done()
1326 struct qla_hw_data *ha = vha->hw; in qla24xx_async_gpdb() local
1352 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); in qla24xx_async_gpdb()
1385 dma_pool_free(ha->s_dma_pool, pd, pd_dma); in qla24xx_async_gpdb()
2295 struct qla_hw_data *ha = vha->hw; in qla83xx_nic_core_fw_load() local
2304 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT; in qla83xx_nic_core_fw_load()
2305 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT; in qla83xx_nic_core_fw_load()
2325 if (ha->flags.nic_core_reset_owner) { in qla83xx_nic_core_fw_load()
2346 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2)); in qla83xx_nic_core_fw_load()
2349 if (ha->flags.nic_core_reset_owner) { in qla83xx_nic_core_fw_load()
2378 struct qla_hw_data *ha = vha->hw; in qla2x00_initialize_adapter() local
2379 struct req_que *req = ha->req_q_map[0]; in qla2x00_initialize_adapter()
2380 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla2x00_initialize_adapter()
2387 ha->flags.chip_reset_done = 0; in qla2x00_initialize_adapter()
2389 ha->flags.pci_channel_io_perm_failure = 0; in qla2x00_initialize_adapter()
2390 ha->flags.eeh_busy = 0; in qla2x00_initialize_adapter()
2398 ha->isp_abort_cnt = 0; in qla2x00_initialize_adapter()
2399 ha->beacon_blink_led = 0; in qla2x00_initialize_adapter()
2401 set_bit(0, ha->req_qid_map); in qla2x00_initialize_adapter()
2402 set_bit(0, ha->rsp_qid_map); in qla2x00_initialize_adapter()
2406 rval = ha->isp_ops->pci_config(vha); in qla2x00_initialize_adapter()
2413 ha->isp_ops->reset_chip(vha); in qla2x00_initialize_adapter()
2416 if (IS_QLA28XX(ha)) { in qla2x00_initialize_adapter()
2418 ha->flags.secure_adapter = 1; in qla2x00_initialize_adapter()
2420 (ha->flags.secure_adapter) ? "Yes" : "No"); in qla2x00_initialize_adapter()
2431 if (IS_QLA8044(ha)) { in qla2x00_initialize_adapter()
2442 ha->isp_ops->get_flash_version(vha, req->ring); in qla2x00_initialize_adapter()
2447 ha->fc4_type_priority = FC4_PRIORITY_FCP; in qla2x00_initialize_adapter()
2449 ha->isp_ops->nvram_config(vha); in qla2x00_initialize_adapter()
2451 if (ha->fc4_type_priority != FC4_PRIORITY_FCP && in qla2x00_initialize_adapter()
2452 ha->fc4_type_priority != FC4_PRIORITY_NVME) in qla2x00_initialize_adapter()
2453 ha->fc4_type_priority = FC4_PRIORITY_FCP; in qla2x00_initialize_adapter()
2456 ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe"); in qla2x00_initialize_adapter()
2458 if (ha->flags.disable_serdes) { in qla2x00_initialize_adapter()
2475 rval = ha->isp_ops->chip_diag(vha); in qla2x00_initialize_adapter()
2483 if (IS_QLA84XX(ha)) { in qla2x00_initialize_adapter()
2484 ha->cs84xx = qla84xx_get_chip(vha); in qla2x00_initialize_adapter()
2485 if (!ha->cs84xx) { in qla2x00_initialize_adapter()
2499 ha->flags.chip_reset_done = 1; in qla2x00_initialize_adapter()
2501 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { in qla2x00_initialize_adapter()
2512 if (IS_QLA8031(ha)) { in qla2x00_initialize_adapter()
2519 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) in qla2x00_initialize_adapter()
2522 if (IS_P3P_TYPE(ha)) in qla2x00_initialize_adapter()
2541 struct qla_hw_data *ha = vha->hw; in qla2100_pci_config() local
2542 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2100_pci_config()
2544 pci_set_master(ha->pdev); in qla2100_pci_config()
2545 pci_try_set_mwi(ha->pdev); in qla2100_pci_config()
2547 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla2100_pci_config()
2549 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla2100_pci_config()
2551 pci_disable_rom(ha->pdev); in qla2100_pci_config()
2554 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2100_pci_config()
2555 ha->pci_attr = rd_reg_word(&reg->ctrl_status); in qla2100_pci_config()
2556 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2100_pci_config()
2573 struct qla_hw_data *ha = vha->hw; in qla2300_pci_config() local
2574 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2300_pci_config()
2576 pci_set_master(ha->pdev); in qla2300_pci_config()
2577 pci_try_set_mwi(ha->pdev); in qla2300_pci_config()
2579 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla2300_pci_config()
2582 if (IS_QLA2322(ha) || IS_QLA6322(ha)) in qla2300_pci_config()
2584 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla2300_pci_config()
2593 if (IS_QLA2300(ha)) { in qla2300_pci_config()
2594 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2300_pci_config()
2610 ha->fb_rev = RD_FB_CMD_REG(ha, reg); in qla2300_pci_config()
2612 if (ha->fb_rev == FPM_2300) in qla2300_pci_config()
2613 pci_clear_mwi(ha->pdev); in qla2300_pci_config()
2628 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2300_pci_config()
2631 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); in qla2300_pci_config()
2633 pci_disable_rom(ha->pdev); in qla2300_pci_config()
2636 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2300_pci_config()
2637 ha->pci_attr = rd_reg_word(&reg->ctrl_status); in qla2300_pci_config()
2638 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2300_pci_config()
2654 struct qla_hw_data *ha = vha->hw; in qla24xx_pci_config() local
2655 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla24xx_pci_config()
2657 pci_set_master(ha->pdev); in qla24xx_pci_config()
2658 pci_try_set_mwi(ha->pdev); in qla24xx_pci_config()
2660 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla24xx_pci_config()
2663 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla24xx_pci_config()
2665 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); in qla24xx_pci_config()
2668 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) in qla24xx_pci_config()
2669 pcix_set_mmrbc(ha->pdev, 2048); in qla24xx_pci_config()
2672 if (pci_is_pcie(ha->pdev)) in qla24xx_pci_config()
2673 pcie_set_readrq(ha->pdev, 4096); in qla24xx_pci_config()
2675 pci_disable_rom(ha->pdev); in qla24xx_pci_config()
2677 ha->chip_revision = ha->pdev->revision; in qla24xx_pci_config()
2680 spin_lock_irqsave(&ha->hardware_lock, flags); in qla24xx_pci_config()
2681 ha->pci_attr = rd_reg_dword(&reg->ctrl_status); in qla24xx_pci_config()
2682 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla24xx_pci_config()
2697 struct qla_hw_data *ha = vha->hw; in qla25xx_pci_config() local
2699 pci_set_master(ha->pdev); in qla25xx_pci_config()
2700 pci_try_set_mwi(ha->pdev); in qla25xx_pci_config()
2702 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); in qla25xx_pci_config()
2705 pci_write_config_word(ha->pdev, PCI_COMMAND, w); in qla25xx_pci_config()
2708 if (pci_is_pcie(ha->pdev)) in qla25xx_pci_config()
2709 pcie_set_readrq(ha->pdev, 4096); in qla25xx_pci_config()
2711 pci_disable_rom(ha->pdev); in qla25xx_pci_config()
2713 ha->chip_revision = ha->pdev->revision; in qla25xx_pci_config()
2730 struct qla_hw_data *ha = vha->hw; in qla2x00_isp_firmware() local
2735 if (ha->flags.disable_risc_code_load) { in qla2x00_isp_firmware()
2739 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); in qla2x00_isp_firmware()
2764 struct qla_hw_data *ha = vha->hw; in qla2x00_reset_chip() local
2765 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_reset_chip()
2770 if (unlikely(pci_channel_offline(ha->pdev))) in qla2x00_reset_chip()
2773 ha->isp_ops->disable_intrs(ha); in qla2x00_reset_chip()
2775 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_reset_chip()
2779 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); in qla2x00_reset_chip()
2781 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); in qla2x00_reset_chip()
2783 if (!IS_QLA2100(ha)) { in qla2x00_reset_chip()
2786 if (IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_reset_chip()
2807 if (!IS_QLA2200(ha)) { in qla2x00_reset_chip()
2817 if (IS_QLA2200(ha)) { in qla2x00_reset_chip()
2818 WRT_FB_CMD_REG(ha, reg, 0xa000); in qla2x00_reset_chip()
2819 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ in qla2x00_reset_chip()
2821 WRT_FB_CMD_REG(ha, reg, 0x00fc); in qla2x00_reset_chip()
2825 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) in qla2x00_reset_chip()
2851 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_reset_chip()
2876 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_reset_chip()
2878 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) in qla2x00_reset_chip()
2888 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); in qla2x00_reset_chip()
2891 if (!IS_QLA2100(ha)) { in qla2x00_reset_chip()
2896 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_reset_chip()
2921 struct qla_hw_data *ha = vha->hw; in qla_chk_risc_recovery() local
2922 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla_chk_risc_recovery()
2928 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) in qla_chk_risc_recovery()
2971 struct qla_hw_data *ha = vha->hw; in qla24xx_reset_risc() local
2972 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla24xx_reset_risc()
2979 spin_lock_irqsave(&ha->hardware_lock, flags); in qla24xx_reset_risc()
2991 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
3001 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); in qla24xx_reset_risc()
3017 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
3035 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
3087 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); in qla24xx_reset_risc()
3094 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla24xx_reset_risc()
3098 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling"); in qla24xx_reset_risc()
3100 if (IS_NOPOLLING_TYPE(ha)) in qla24xx_reset_risc()
3101 ha->isp_ops->enable_intrs(ha); in qla24xx_reset_risc()
3194 struct qla_hw_data *ha = vha->hw; in qla24xx_reset_chip() local
3197 if (pci_channel_offline(ha->pdev) && in qla24xx_reset_chip()
3198 ha->flags.pci_channel_io_perm_failure) { in qla24xx_reset_chip()
3202 ha->isp_ops->disable_intrs(ha); in qla24xx_reset_chip()
3222 struct qla_hw_data *ha = vha->hw; in qla2x00_chip_diag() local
3223 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_chip_diag()
3228 struct req_que *req = ha->req_q_map[0]; in qla2x00_chip_diag()
3236 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_chip_diag()
3264 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { in qla2x00_chip_diag()
3265 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); in qla2x00_chip_diag()
3268 data = RD_MAILBOX_REG(ha, reg, 0); in qla2x00_chip_diag()
3280 mb[1] = RD_MAILBOX_REG(ha, reg, 1); in qla2x00_chip_diag()
3281 mb[2] = RD_MAILBOX_REG(ha, reg, 2); in qla2x00_chip_diag()
3282 mb[3] = RD_MAILBOX_REG(ha, reg, 3); in qla2x00_chip_diag()
3283 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); in qla2x00_chip_diag()
3292 ha->product_id[0] = mb[1]; in qla2x00_chip_diag()
3293 ha->product_id[1] = mb[2]; in qla2x00_chip_diag()
3294 ha->product_id[2] = mb[3]; in qla2x00_chip_diag()
3295 ha->product_id[3] = mb[4]; in qla2x00_chip_diag()
3299 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; in qla2x00_chip_diag()
3301 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * in qla2x00_chip_diag()
3304 if (IS_QLA2200(ha) && in qla2x00_chip_diag()
3305 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { in qla2x00_chip_diag()
3309 ha->device_type |= DT_ISP2200A; in qla2x00_chip_diag()
3310 ha->fw_transfer_size = 128; in qla2x00_chip_diag()
3314 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_chip_diag()
3324 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_chip_diag()
3331 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_chip_diag()
3346 struct qla_hw_data *ha = vha->hw; in qla24xx_chip_diag() local
3347 struct req_que *req = ha->req_q_map[0]; in qla24xx_chip_diag()
3349 if (IS_P3P_TYPE(ha)) in qla24xx_chip_diag()
3352 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; in qla24xx_chip_diag()
3372 struct qla_hw_data *ha = vha->hw; in qla2x00_init_fce_trace() local
3374 if (!IS_FWI2_CAPABLE(ha)) in qla2x00_init_fce_trace()
3377 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && in qla2x00_init_fce_trace()
3378 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) in qla2x00_init_fce_trace()
3381 if (ha->fce) { in qla2x00_init_fce_trace()
3389 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, in qla2x00_init_fce_trace()
3399 ha->fce_mb, &ha->fce_bufs); in qla2x00_init_fce_trace()
3403 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma); in qla2x00_init_fce_trace()
3410 ha->flags.fce_enabled = 1; in qla2x00_init_fce_trace()
3411 ha->fce_dma = tc_dma; in qla2x00_init_fce_trace()
3412 ha->fce = tc; in qla2x00_init_fce_trace()
3421 struct qla_hw_data *ha = vha->hw; in qla2x00_init_eft_trace() local
3423 if (!IS_FWI2_CAPABLE(ha)) in qla2x00_init_eft_trace()
3426 if (ha->eft) { in qla2x00_init_eft_trace()
3434 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, in qla2x00_init_eft_trace()
3447 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma); in qla2x00_init_eft_trace()
3454 ha->eft_dma = tc_dma; in qla2x00_init_eft_trace()
3455 ha->eft = tc; in qla2x00_init_eft_trace()
3470 struct qla_hw_data *ha = vha->hw; in qla2x00_alloc_fw_dump() local
3471 struct req_que *req = ha->req_q_map[0]; in qla2x00_alloc_fw_dump()
3472 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla2x00_alloc_fw_dump()
3478 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { in qla2x00_alloc_fw_dump()
3480 } else if (IS_QLA23XX(ha)) { in qla2x00_alloc_fw_dump()
3482 mem_size = (ha->fw_memory_size - 0x11000 + 1) * in qla2x00_alloc_fw_dump()
3484 } else if (IS_FWI2_CAPABLE(ha)) { in qla2x00_alloc_fw_dump()
3485 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) in qla2x00_alloc_fw_dump()
3487 else if (IS_QLA81XX(ha)) in qla2x00_alloc_fw_dump()
3489 else if (IS_QLA25XX(ha)) in qla2x00_alloc_fw_dump()
3494 mem_size = (ha->fw_memory_size - 0x100000 + 1) * in qla2x00_alloc_fw_dump()
3496 if (ha->mqenable) { in qla2x00_alloc_fw_dump()
3497 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && in qla2x00_alloc_fw_dump()
3498 !IS_QLA28XX(ha)) in qla2x00_alloc_fw_dump()
3504 mq_size += (ha->max_req_queues - 1) * in qla2x00_alloc_fw_dump()
3506 mq_size += (ha->max_rsp_queues - 1) * in qla2x00_alloc_fw_dump()
3509 if (ha->tgt.atio_ring) in qla2x00_alloc_fw_dump()
3510 mq_size += ha->tgt.atio_q_length * sizeof(request_t); in qla2x00_alloc_fw_dump()
3513 if (ha->fce) in qla2x00_alloc_fw_dump()
3516 if (ha->eft) in qla2x00_alloc_fw_dump()
3520 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { in qla2x00_alloc_fw_dump()
3521 struct fwdt *fwdt = ha->fwdt; in qla2x00_alloc_fw_dump()
3540 dump_size += ha->fwdt[1].dump_size; in qla2x00_alloc_fw_dump()
3547 ha->chain_offset = dump_size; in qla2x00_alloc_fw_dump()
3549 if (ha->exchoffld_buf) in qla2x00_alloc_fw_dump()
3551 ha->exchoffld_size; in qla2x00_alloc_fw_dump()
3552 if (ha->exlogin_buf) in qla2x00_alloc_fw_dump()
3554 ha->exlogin_size; in qla2x00_alloc_fw_dump()
3557 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) { in qla2x00_alloc_fw_dump()
3561 __func__, dump_size, ha->fw_dump_len, in qla2x00_alloc_fw_dump()
3562 ha->fw_dump_alloc_len); in qla2x00_alloc_fw_dump()
3570 mutex_lock(&ha->optrom_mutex); in qla2x00_alloc_fw_dump()
3571 if (ha->fw_dumped) { in qla2x00_alloc_fw_dump()
3572 memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len); in qla2x00_alloc_fw_dump()
3573 vfree(ha->fw_dump); in qla2x00_alloc_fw_dump()
3574 ha->fw_dump = fw_dump; in qla2x00_alloc_fw_dump()
3575 ha->fw_dump_alloc_len = dump_size; in qla2x00_alloc_fw_dump()
3580 vfree(ha->fw_dump); in qla2x00_alloc_fw_dump()
3581 ha->fw_dump = fw_dump; in qla2x00_alloc_fw_dump()
3583 ha->fw_dump_len = ha->fw_dump_alloc_len = in qla2x00_alloc_fw_dump()
3589 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { in qla2x00_alloc_fw_dump()
3590 ha->mpi_fw_dump = (char *)fw_dump + in qla2x00_alloc_fw_dump()
3591 ha->fwdt[1].dump_size; in qla2x00_alloc_fw_dump()
3592 mutex_unlock(&ha->optrom_mutex); in qla2x00_alloc_fw_dump()
3596 ha->fw_dump->signature[0] = 'Q'; in qla2x00_alloc_fw_dump()
3597 ha->fw_dump->signature[1] = 'L'; in qla2x00_alloc_fw_dump()
3598 ha->fw_dump->signature[2] = 'G'; in qla2x00_alloc_fw_dump()
3599 ha->fw_dump->signature[3] = 'C'; in qla2x00_alloc_fw_dump()
3600 ha->fw_dump->version = htonl(1); in qla2x00_alloc_fw_dump()
3602 ha->fw_dump->fixed_size = htonl(fixed_size); in qla2x00_alloc_fw_dump()
3603 ha->fw_dump->mem_size = htonl(mem_size); in qla2x00_alloc_fw_dump()
3604 ha->fw_dump->req_q_size = htonl(req_q_size); in qla2x00_alloc_fw_dump()
3605 ha->fw_dump->rsp_q_size = htonl(rsp_q_size); in qla2x00_alloc_fw_dump()
3607 ha->fw_dump->eft_size = htonl(eft_size); in qla2x00_alloc_fw_dump()
3608 ha->fw_dump->eft_addr_l = in qla2x00_alloc_fw_dump()
3609 htonl(LSD(ha->eft_dma)); in qla2x00_alloc_fw_dump()
3610 ha->fw_dump->eft_addr_h = in qla2x00_alloc_fw_dump()
3611 htonl(MSD(ha->eft_dma)); in qla2x00_alloc_fw_dump()
3613 ha->fw_dump->header_size = in qla2x00_alloc_fw_dump()
3617 mutex_unlock(&ha->optrom_mutex); in qla2x00_alloc_fw_dump()
3670 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) in qla2x00_alloc_outstanding_cmds() argument
3676 if (!IS_FWI2_CAPABLE(ha)) in qla2x00_alloc_outstanding_cmds()
3679 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count) in qla2x00_alloc_outstanding_cmds()
3680 req->num_outstanding_cmds = ha->cur_fw_xcb_count; in qla2x00_alloc_outstanding_cmds()
3682 req->num_outstanding_cmds = ha->cur_fw_iocb_count; in qla2x00_alloc_outstanding_cmds()
3822 struct qla_hw_data *ha = vha->hw; in qla24xx_detect_sfp() local
3823 struct nvram_81xx *nv = ha->nvram; in qla24xx_detect_sfp()
3831 ha->flags.lr_detected = 0; in qla24xx_detect_sfp()
3832 if (IS_BPM_RANGE_CAPABLE(ha) && in qla24xx_detect_sfp()
3835 ha->flags.lr_detected = 1; in qla24xx_detect_sfp()
3836 ha->lr_distance = in qla24xx_detect_sfp()
3852 ha->flags.lr_detected = 0; in qla24xx_detect_sfp()
3856 ha->flags.lr_detected = 1; in qla24xx_detect_sfp()
3859 ha->lr_distance = LR_DISTANCE_10K; in qla24xx_detect_sfp()
3861 ha->lr_distance = LR_DISTANCE_5K; in qla24xx_detect_sfp()
3867 types[ha->flags.lr_detected], in qla24xx_detect_sfp()
3868 ha->flags.lr_detected ? lengths[ha->lr_distance] : in qla24xx_detect_sfp()
3870 used_nvram, ll, ha->flags.lr_detected, ha->lr_distance); in qla24xx_detect_sfp()
3871 return ha->flags.lr_detected; in qla24xx_detect_sfp()
3878 struct qla_hw_data *ha = vha->hw; in qla_init_iocb_limit() local
3880 num_qps = ha->num_qpairs + 1; in qla_init_iocb_limit()
3881 limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100; in qla_init_iocb_limit()
3883 ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count; in qla_init_iocb_limit()
3884 ha->base_qpair->fwres.iocbs_limit = limit; in qla_init_iocb_limit()
3885 ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps; in qla_init_iocb_limit()
3886 ha->base_qpair->fwres.iocbs_used = 0; in qla_init_iocb_limit()
3887 for (i = 0; i < ha->max_qpairs; i++) { in qla_init_iocb_limit()
3888 if (ha->queue_pair_map[i]) { in qla_init_iocb_limit()
3889 ha->queue_pair_map[i]->fwres.iocbs_total = in qla_init_iocb_limit()
3890 ha->orig_fw_iocb_count; in qla_init_iocb_limit()
3891 ha->queue_pair_map[i]->fwres.iocbs_limit = limit; in qla_init_iocb_limit()
3892 ha->queue_pair_map[i]->fwres.iocbs_qp_limit = in qla_init_iocb_limit()
3894 ha->queue_pair_map[i]->fwres.iocbs_used = 0; in qla_init_iocb_limit()
3910 struct qla_hw_data *ha = vha->hw; in qla2x00_setup_chip() local
3911 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_setup_chip()
3916 if (IS_P3P_TYPE(ha)) { in qla2x00_setup_chip()
3917 rval = ha->isp_ops->load_risc(vha, &srisc_address); in qla2x00_setup_chip()
3925 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { in qla2x00_setup_chip()
3927 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_setup_chip()
3930 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_setup_chip()
3937 rval = ha->isp_ops->load_risc(vha, &srisc_address); in qla2x00_setup_chip()
3949 ha->flags.exlogins_enabled = 1; in qla2x00_setup_chip()
3952 ha->flags.exchoffld_enabled = 1; in qla2x00_setup_chip()
3962 ha->isp_ops->reset_chip(vha); in qla2x00_setup_chip()
3963 ha->isp_ops->chip_diag(vha); in qla2x00_setup_chip()
3967 if (IS_ZIO_THRESHOLD_CAPABLE(ha)) in qla2x00_setup_chip()
3969 ha->last_zio_threshold); in qla2x00_setup_chip()
3980 fw_major_version = ha->fw_major_version; in qla2x00_setup_chip()
3981 if (IS_P3P_TYPE(ha)) in qla2x00_setup_chip()
3987 ha->flags.npiv_supported = 0; in qla2x00_setup_chip()
3988 if (IS_QLA2XXX_MIDTYPE(ha) && in qla2x00_setup_chip()
3989 (ha->fw_attributes & BIT_2)) { in qla2x00_setup_chip()
3990 ha->flags.npiv_supported = 1; in qla2x00_setup_chip()
3991 if ((!ha->max_npiv_vports) || in qla2x00_setup_chip()
3992 ((ha->max_npiv_vports + 1) % in qla2x00_setup_chip()
3994 ha->max_npiv_vports = in qla2x00_setup_chip()
4004 rval = qla2x00_alloc_outstanding_cmds(ha, in qla2x00_setup_chip()
4009 if (!fw_major_version && !(IS_P3P_TYPE(ha))) in qla2x00_setup_chip()
4012 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha))) in qla2x00_setup_chip()
4025 if (ql2xrdpenable || ha->flags.scm_supported_f || in qla2x00_setup_chip()
4026 ha->flags.edif_enabled) in qla2x00_setup_chip()
4031 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { in qla2x00_setup_chip()
4033 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_setup_chip()
4034 if (IS_QLA2300(ha)) in qla2x00_setup_chip()
4041 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_setup_chip()
4044 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) in qla2x00_setup_chip()
4045 ha->flags.fac_supported = 1; in qla2x00_setup_chip()
4046 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { in qla2x00_setup_chip()
4051 ha->flags.fac_supported = 1; in qla2x00_setup_chip()
4052 ha->fdt_block_size = size << 2; in qla2x00_setup_chip()
4056 ha->fw_major_version, ha->fw_minor_version, in qla2x00_setup_chip()
4057 ha->fw_subminor_version); in qla2x00_setup_chip()
4059 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || in qla2x00_setup_chip()
4060 IS_QLA28XX(ha)) { in qla2x00_setup_chip()
4061 ha->flags.fac_supported = 0; in qla2x00_setup_chip()
4110 struct qla_hw_data *ha = vha->hw; in qla2x00_update_fw_options() local
4112 memset(ha->fw_options, 0, sizeof(ha->fw_options)); in qla2x00_update_fw_options()
4113 qla2x00_get_fw_options(vha, ha->fw_options); in qla2x00_update_fw_options()
4115 if (IS_QLA2100(ha) || IS_QLA2200(ha)) in qla2x00_update_fw_options()
4122 ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options)); in qla2x00_update_fw_options()
4124 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; in qla2x00_update_fw_options()
4125 if (ha->fw_seriallink_options[3] & BIT_2) { in qla2x00_update_fw_options()
4126 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; in qla2x00_update_fw_options()
4129 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); in qla2x00_update_fw_options()
4130 emphasis = (ha->fw_seriallink_options[2] & in qla2x00_update_fw_options()
4132 tx_sens = ha->fw_seriallink_options[0] & in qla2x00_update_fw_options()
4134 rx_sens = (ha->fw_seriallink_options[0] & in qla2x00_update_fw_options()
4136 ha->fw_options[10] = (emphasis << 14) | (swing << 8); in qla2x00_update_fw_options()
4137 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { in qla2x00_update_fw_options()
4140 ha->fw_options[10] |= (tx_sens << 4) | rx_sens; in qla2x00_update_fw_options()
4141 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) in qla2x00_update_fw_options()
4142 ha->fw_options[10] |= BIT_5 | in qla2x00_update_fw_options()
4147 swing = (ha->fw_seriallink_options[2] & in qla2x00_update_fw_options()
4149 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); in qla2x00_update_fw_options()
4150 tx_sens = ha->fw_seriallink_options[1] & in qla2x00_update_fw_options()
4152 rx_sens = (ha->fw_seriallink_options[1] & in qla2x00_update_fw_options()
4154 ha->fw_options[11] = (emphasis << 14) | (swing << 8); in qla2x00_update_fw_options()
4155 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { in qla2x00_update_fw_options()
4158 ha->fw_options[11] |= (tx_sens << 4) | rx_sens; in qla2x00_update_fw_options()
4159 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) in qla2x00_update_fw_options()
4160 ha->fw_options[11] |= BIT_5 | in qla2x00_update_fw_options()
4167 ha->fw_options[3] |= BIT_13; in qla2x00_update_fw_options()
4170 if (ha->flags.enable_led_scheme) in qla2x00_update_fw_options()
4171 ha->fw_options[2] |= BIT_12; in qla2x00_update_fw_options()
4174 if (IS_QLA6312(ha)) in qla2x00_update_fw_options()
4175 ha->fw_options[2] |= BIT_13; in qla2x00_update_fw_options()
4178 if (ha->operating_mode == P2P) { in qla2x00_update_fw_options()
4179 ha->fw_options[2] |= BIT_3; in qla2x00_update_fw_options()
4182 __func__, ha->fw_options[2]); in qla2x00_update_fw_options()
4186 qla2x00_set_fw_options(vha, ha->fw_options); in qla2x00_update_fw_options()
4193 struct qla_hw_data *ha = vha->hw; in qla24xx_update_fw_options() local
4195 if (IS_P3P_TYPE(ha)) in qla24xx_update_fw_options()
4200 ha->fw_options[3] |= BIT_12; in qla24xx_update_fw_options()
4203 if (ha->operating_mode == P2P) { in qla24xx_update_fw_options()
4204 ha->fw_options[2] |= BIT_3; in qla24xx_update_fw_options()
4207 __func__, ha->fw_options[2]); in qla24xx_update_fw_options()
4211 if (ql2xmvasynctoatio && !ha->flags.edif_enabled && in qla24xx_update_fw_options()
4212 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) { in qla24xx_update_fw_options()
4215 ha->fw_options[2] |= BIT_11; in qla24xx_update_fw_options()
4217 ha->fw_options[2] &= ~BIT_11; in qla24xx_update_fw_options()
4220 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || in qla24xx_update_fw_options()
4221 IS_QLA28XX(ha)) { in qla24xx_update_fw_options()
4228 ha->fw_options[2] |= BIT_4; in qla24xx_update_fw_options()
4230 ha->fw_options[2] &= ~(BIT_4); in qla24xx_update_fw_options()
4234 ha->fw_options[2] |= BIT_8; in qla24xx_update_fw_options()
4236 ha->fw_options[2] &= ~BIT_8; in qla24xx_update_fw_options()
4242 if (ha->flags.edif_enabled && in qla24xx_update_fw_options()
4244 ha->fw_options[3] |= BIT_15; in qla24xx_update_fw_options()
4245 ha->flags.n2n_fw_acc_sec = 1; in qla24xx_update_fw_options()
4247 ha->fw_options[3] &= ~BIT_15; in qla24xx_update_fw_options()
4248 ha->flags.n2n_fw_acc_sec = 0; in qla24xx_update_fw_options()
4252 if (ql2xrdpenable || ha->flags.scm_supported_f || in qla24xx_update_fw_options()
4253 ha->flags.edif_enabled) in qla24xx_update_fw_options()
4254 ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB; in qla24xx_update_fw_options()
4257 if (IS_BPM_RANGE_CAPABLE(ha)) in qla24xx_update_fw_options()
4258 ha->fw_options[3] |= BIT_10; in qla24xx_update_fw_options()
4262 __func__, ha->fw_options[1], ha->fw_options[2], in qla24xx_update_fw_options()
4263 ha->fw_options[3], vha->host->active_mode); in qla24xx_update_fw_options()
4265 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3]) in qla24xx_update_fw_options()
4266 qla2x00_set_fw_options(vha, ha->fw_options); in qla24xx_update_fw_options()
4269 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) in qla24xx_update_fw_options()
4273 le16_to_cpu(ha->fw_seriallink_options24[1]), in qla24xx_update_fw_options()
4274 le16_to_cpu(ha->fw_seriallink_options24[2]), in qla24xx_update_fw_options()
4275 le16_to_cpu(ha->fw_seriallink_options24[3])); in qla24xx_update_fw_options()
4285 struct qla_hw_data *ha = vha->hw; in qla2x00_config_rings() local
4286 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_config_rings()
4287 struct req_que *req = ha->req_q_map[0]; in qla2x00_config_rings()
4288 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla2x00_config_rings()
4291 ha->init_cb->request_q_outpointer = cpu_to_le16(0); in qla2x00_config_rings()
4292 ha->init_cb->response_q_inpointer = cpu_to_le16(0); in qla2x00_config_rings()
4293 ha->init_cb->request_q_length = cpu_to_le16(req->length); in qla2x00_config_rings()
4294 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); in qla2x00_config_rings()
4295 put_unaligned_le64(req->dma, &ha->init_cb->request_q_address); in qla2x00_config_rings()
4296 put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address); in qla2x00_config_rings()
4298 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0); in qla2x00_config_rings()
4299 wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0); in qla2x00_config_rings()
4300 wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0); in qla2x00_config_rings()
4301 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0); in qla2x00_config_rings()
4302 rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ in qla2x00_config_rings()
4308 struct qla_hw_data *ha = vha->hw; in qla24xx_config_rings() local
4309 device_reg_t *reg = ISP_QUE_REG(ha, 0); in qla24xx_config_rings()
4310 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; in qla24xx_config_rings()
4314 struct req_que *req = ha->req_q_map[0]; in qla24xx_config_rings()
4315 struct rsp_que *rsp = ha->rsp_q_map[0]; in qla24xx_config_rings()
4318 icb = (struct init_cb_24xx *)ha->init_cb; in qla24xx_config_rings()
4328 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); in qla24xx_config_rings()
4329 put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address); in qla24xx_config_rings()
4331 if (IS_SHADOW_REG_CAPABLE(ha)) in qla24xx_config_rings()
4334 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || in qla24xx_config_rings()
4335 IS_QLA28XX(ha)) { in qla24xx_config_rings()
4338 if (ha->flags.msix_enabled) { in qla24xx_config_rings()
4339 msix = &ha->msix_entries[1]; in qla24xx_config_rings()
4353 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && in qla24xx_config_rings()
4354 (ha->flags.msix_enabled)) { in qla24xx_config_rings()
4356 ha->flags.disable_msix_handshake = 1; in qla24xx_config_rings()
4378 if (ha->set_data_rate) { in qla24xx_config_rings()
4381 qla2x00_get_link_speed_str(ha, ha->set_data_rate)); in qla24xx_config_rings()
4382 icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13); in qla24xx_config_rings()
4404 struct qla_hw_data *ha = vha->hw; in qla2x00_init_rings() local
4408 (struct mid_init_cb_24xx *) ha->init_cb; in qla2x00_init_rings()
4410 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_init_rings()
4413 for (que = 0; que < ha->max_req_queues; que++) { in qla2x00_init_rings()
4414 req = ha->req_q_map[que]; in qla2x00_init_rings()
4415 if (!req || !test_bit(que, ha->req_qid_map)) in qla2x00_init_rings()
4430 for (que = 0; que < ha->max_rsp_queues; que++) { in qla2x00_init_rings()
4431 rsp = ha->rsp_q_map[que]; in qla2x00_init_rings()
4432 if (!rsp || !test_bit(que, ha->rsp_qid_map)) in qla2x00_init_rings()
4437 if (IS_QLAFX00(ha)) in qla2x00_init_rings()
4443 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; in qla2x00_init_rings()
4444 ha->tgt.atio_ring_index = 0; in qla2x00_init_rings()
4448 ha->isp_ops->config_rings(vha); in qla2x00_init_rings()
4450 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_init_rings()
4452 if (IS_QLAFX00(ha)) { in qla2x00_init_rings()
4453 rval = qlafx00_init_firmware(vha, ha->init_cb_size); in qla2x00_init_rings()
4458 ha->isp_ops->update_fw_options(vha); in qla2x00_init_rings()
4466 if (ha->flags.npiv_supported) { in qla2x00_init_rings()
4467 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) in qla2x00_init_rings()
4468 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; in qla2x00_init_rings()
4469 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); in qla2x00_init_rings()
4472 if (IS_FWI2_CAPABLE(ha)) { in qla2x00_init_rings()
4475 cpu_to_le16(ha->cur_fw_xcb_count); in qla2x00_init_rings()
4476 ha->flags.dport_enabled = in qla2x00_init_rings()
4480 (ha->flags.dport_enabled) ? "enabled" : "disabled"); in qla2x00_init_rings()
4482 ha->flags.fawwpn_enabled = in qla2x00_init_rings()
4486 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); in qla2x00_init_rings()
4490 if (ha->flags.edif_enabled) in qla2x00_init_rings()
4493 rval = qla2x00_init_firmware(vha, ha->init_cb_size); in qla2x00_init_rings()
4501 QLA_FW_STARTED(ha); in qla2x00_init_rings()
4522 struct qla_hw_data *ha = vha->hw; in qla2x00_fw_ready() local
4528 if (IS_P3P_TYPE(ha)) in qla2x00_fw_ready()
4537 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { in qla2x00_fw_ready()
4559 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { in qla2x00_fw_ready()
4590 qla2x00_get_retry_cnt(vha, &ha->retry_count, in qla2x00_fw_ready()
4591 &ha->login_timeout, &ha->r_a_tov); in qla2x00_fw_ready()
4615 ha->flags.isp82xx_fw_hung) in qla2x00_fw_ready()
4662 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_hba() local
4663 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); in qla2x00_configure_hba()
4671 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || in qla2x00_configure_hba()
4672 IS_CNA_CAPABLE(ha) || in qla2x00_configure_hba()
4679 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) && in qla2x00_configure_hba()
4700 ha->min_external_loopid = SNS_FIRST_LOOP_ID; in qla2x00_configure_hba()
4701 ha->operating_mode = LOOP; in qla2x00_configure_hba()
4706 ha->switch_cap = 0; in qla2x00_configure_hba()
4707 ha->current_topology = ISP_CFG_NL; in qla2x00_configure_hba()
4713 ha->switch_cap = sw_cap; in qla2x00_configure_hba()
4714 ha->current_topology = ISP_CFG_FL; in qla2x00_configure_hba()
4720 ha->switch_cap = 0; in qla2x00_configure_hba()
4721 ha->operating_mode = P2P; in qla2x00_configure_hba()
4722 ha->current_topology = ISP_CFG_N; in qla2x00_configure_hba()
4728 ha->switch_cap = sw_cap; in qla2x00_configure_hba()
4729 ha->operating_mode = P2P; in qla2x00_configure_hba()
4730 ha->current_topology = ISP_CFG_F; in qla2x00_configure_hba()
4737 ha->switch_cap = 0; in qla2x00_configure_hba()
4738 ha->current_topology = ISP_CFG_NL; in qla2x00_configure_hba()
4749 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_configure_hba()
4753 } else if (!(topo == 2 && ha->flags.n2n_bigger)) in qla2x00_configure_hba()
4755 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_configure_hba()
4772 struct qla_hw_data *ha = vha->hw; in qla2x00_set_model_info() local
4773 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && in qla2x00_set_model_info()
4774 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); in qla2x00_set_model_info()
4779 memcpy(ha->model_number, model, len); in qla2x00_set_model_info()
4780 st = en = ha->model_number; in qla2x00_set_model_info()
4788 index = (ha->pdev->subsystem_device & 0xff); in qla2x00_set_model_info()
4790 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && in qla2x00_set_model_info()
4792 strlcpy(ha->model_desc, in qla2x00_set_model_info()
4794 sizeof(ha->model_desc)); in qla2x00_set_model_info()
4796 index = (ha->pdev->subsystem_device & 0xff); in qla2x00_set_model_info()
4798 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && in qla2x00_set_model_info()
4800 strlcpy(ha->model_number, in qla2x00_set_model_info()
4802 sizeof(ha->model_number)); in qla2x00_set_model_info()
4803 strlcpy(ha->model_desc, in qla2x00_set_model_info()
4805 sizeof(ha->model_desc)); in qla2x00_set_model_info()
4807 strlcpy(ha->model_number, def, in qla2x00_set_model_info()
4808 sizeof(ha->model_number)); in qla2x00_set_model_info()
4811 if (IS_FWI2_CAPABLE(ha)) in qla2x00_set_model_info()
4812 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, in qla2x00_set_model_info()
4813 sizeof(ha->model_desc)); in qla2x00_set_model_info()
4822 struct qla_hw_data *ha = vha->hw; in qla2xxx_nvram_wwn_from_ofw() local
4823 struct pci_dev *pdev = ha->pdev; in qla2xxx_nvram_wwn_from_ofw()
4858 struct qla_hw_data *ha = vha->hw; in qla2x00_nvram_config() local
4859 init_cb_t *icb = ha->init_cb; in qla2x00_nvram_config()
4860 nvram_t *nv = ha->nvram; in qla2x00_nvram_config()
4861 uint8_t *ptr = ha->nvram; in qla2x00_nvram_config()
4862 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_nvram_config()
4867 ha->nvram_size = sizeof(*nv); in qla2x00_nvram_config()
4868 ha->nvram_base = 0; in qla2x00_nvram_config()
4869 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) in qla2x00_nvram_config()
4871 ha->nvram_base = 0x80; in qla2x00_nvram_config()
4874 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); in qla2x00_nvram_config()
4875 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) in qla2x00_nvram_config()
4881 nv, ha->nvram_size); in qla2x00_nvram_config()
4897 memset(nv, 0, ha->nvram_size); in qla2x00_nvram_config()
4900 if (IS_QLA23XX(ha)) { in qla2x00_nvram_config()
4907 } else if (IS_QLA2200(ha)) { in qla2x00_nvram_config()
4913 } else if (IS_QLA2100(ha)) { in qla2x00_nvram_config()
4945 memset(icb, 0, ha->init_cb_size); in qla2x00_nvram_config()
4955 if (IS_QLA23XX(ha)) { in qla2x00_nvram_config()
4961 if (IS_QLA2300(ha)) { in qla2x00_nvram_config()
4962 if (ha->fb_rev == FPM_2310) { in qla2x00_nvram_config()
4963 strcpy(ha->model_number, "QLA2310"); in qla2x00_nvram_config()
4965 strcpy(ha->model_number, "QLA2300"); in qla2x00_nvram_config()
4971 } else if (IS_QLA2200(ha)) { in qla2x00_nvram_config()
4983 strcpy(ha->model_number, "QLA22xx"); in qla2x00_nvram_config()
4985 strcpy(ha->model_number, "QLA2100"); in qla2x00_nvram_config()
5002 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); in qla2x00_nvram_config()
5029 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); in qla2x00_nvram_config()
5031 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) in qla2x00_nvram_config()
5032 ha->flags.disable_risc_code_load = 0; in qla2x00_nvram_config()
5033 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); in qla2x00_nvram_config()
5034 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); in qla2x00_nvram_config()
5035 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); in qla2x00_nvram_config()
5036 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; in qla2x00_nvram_config()
5037 ha->flags.disable_serdes = 0; in qla2x00_nvram_config()
5039 ha->operating_mode = in qla2x00_nvram_config()
5042 memcpy(ha->fw_seriallink_options, nv->seriallink_options, in qla2x00_nvram_config()
5043 sizeof(ha->fw_seriallink_options)); in qla2x00_nvram_config()
5046 ha->serial0 = icb->port_name[5]; in qla2x00_nvram_config()
5047 ha->serial1 = icb->port_name[6]; in qla2x00_nvram_config()
5048 ha->serial2 = icb->port_name[7]; in qla2x00_nvram_config()
5054 ha->retry_count = nv->retry_count; in qla2x00_nvram_config()
5061 ha->login_timeout = nv->login_timeout; in qla2x00_nvram_config()
5064 ha->r_a_tov = 100; in qla2x00_nvram_config()
5066 ha->loop_reset_delay = nv->reset_delay; in qla2x00_nvram_config()
5079 ha->loop_down_abort_time = in qla2x00_nvram_config()
5082 ha->link_down_timeout = nv->link_down_timeout; in qla2x00_nvram_config()
5083 ha->loop_down_abort_time = in qla2x00_nvram_config()
5084 (LOOP_DOWN_TIME - ha->link_down_timeout); in qla2x00_nvram_config()
5090 ha->port_down_retry_count = nv->port_down_retry_count; in qla2x00_nvram_config()
5092 ha->port_down_retry_count = qlport_down_retry; in qla2x00_nvram_config()
5094 ha->login_retry_count = nv->retry_count; in qla2x00_nvram_config()
5095 if (ha->port_down_retry_count == nv->port_down_retry_count && in qla2x00_nvram_config()
5096 ha->port_down_retry_count > 3) in qla2x00_nvram_config()
5097 ha->login_retry_count = ha->port_down_retry_count; in qla2x00_nvram_config()
5098 else if (ha->port_down_retry_count > (int)ha->login_retry_count) in qla2x00_nvram_config()
5099 ha->login_retry_count = ha->port_down_retry_count; in qla2x00_nvram_config()
5101 ha->login_retry_count = ql2xloginretrycount; in qla2x00_nvram_config()
5108 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { in qla2x00_nvram_config()
5121 ha->zio_mode = icb->add_firmware_options[0] & in qla2x00_nvram_config()
5123 ha->zio_timer = icb->interrupt_delay_timer ? in qla2x00_nvram_config()
5129 if (ha->zio_mode != QLA_ZIO_DISABLED) { in qla2x00_nvram_config()
5130 ha->zio_mode = QLA_ZIO_MODE_6; in qla2x00_nvram_config()
5134 ha->zio_mode, ha->zio_timer * 100); in qla2x00_nvram_config()
5136 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; in qla2x00_nvram_config()
5137 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; in qla2x00_nvram_config()
5282 struct qla_hw_data *ha = vha->hw; in qla_get_login_template() local
5287 memset(ha->init_cb, 0, ha->init_cb_size); in qla_get_login_template()
5288 sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size); in qla_get_login_template()
5289 rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, in qla_get_login_template()
5290 ha->init_cb, sz); in qla_get_login_template()
5296 q = (__be32 *)&ha->plogi_els_payld.fl_csp; in qla_get_login_template()
5298 bp = (uint32_t *)ha->init_cb; in qla_get_login_template()
5300 ha->flags.plogi_template_valid = 1; in qla_get_login_template()
5320 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_loop() local
5349 if ((ha->current_topology == ISP_CFG_FL || in qla2x00_configure_loop()
5350 ha->current_topology == ISP_CFG_F) && in qla2x00_configure_loop()
5356 } else if (ha->current_topology == ISP_CFG_NL || in qla2x00_configure_loop()
5357 ha->current_topology == ISP_CFG_N) { in qla2x00_configure_loop()
5393 ha->flags.fw_init_done = 1; in qla2x00_configure_loop()
5399 if (ha->flags.edif_enabled && DBELL_INACTIVE(vha)) in qla2x00_configure_loop()
5401 ha->link_data_rate); in qla2x00_configure_loop()
5409 spin_lock_irqsave(&ha->tgt.atio_lock, flags); in qla2x00_configure_loop()
5411 spin_unlock_irqrestore(&ha->tgt.atio_lock, in qla2x00_configure_loop()
5488 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_local_loop() local
5492 if (N2N_TOPO(ha)) in qla2x00_configure_local_loop()
5500 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); in qla2x00_configure_local_loop()
5501 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, in qla2x00_configure_local_loop()
5509 ha->gid_list, entries * sizeof(*ha->gid_list)); in qla2x00_configure_local_loop()
5539 gid = ha->gid_list; in qla2x00_configure_local_loop()
5544 if (IS_QLA2100(ha) || IS_QLA2200(ha)) in qla2x00_configure_local_loop()
5548 gid = (void *)gid + ha->gid_list_info_size; in qla2x00_configure_local_loop()
5557 (ha->current_topology == ISP_CFG_NL)) in qla2x00_configure_local_loop()
5581 if (ha->current_topology != ISP_CFG_N) { in qla2x00_configure_local_loop()
5633 fcport->fp_speed = ha->link_data_rate; in qla2x00_configure_local_loop()
5682 struct qla_hw_data *ha = vha->hw; in qla2x00_iidma_fcport() local
5684 if (!IS_IIDMA_CAPABLE(ha)) in qla2x00_iidma_fcport()
5691 fcport->fp_speed > ha->link_data_rate || in qla2x00_iidma_fcport()
5692 !ha->flags.gpsc_supported) in qla2x00_iidma_fcport()
5704 qla2x00_get_link_speed_str(ha, fcport->fp_speed), in qla2x00_iidma_fcport()
5932 struct qla_hw_data *ha = vha->hw; in qla2x00_configure_fabric() local
5936 if (IS_FWI2_CAPABLE(ha)) in qla2x00_configure_fabric()
5967 loop_id = NPH_SNS_LID(ha); in qla2x00_configure_fabric()
5968 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, in qla2x00_configure_fabric()
6031 if (USE_ASYNC_SCAN(ha)) { in qla2x00_configure_fabric()
6081 struct qla_hw_data *ha = vha->hw; in qla2x00_find_all_fabric_devs() local
6082 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); in qla2x00_find_all_fabric_devs()
6088 if (!ha->swl) in qla2x00_find_all_fabric_devs()
6089 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t), in qla2x00_find_all_fabric_devs()
6091 swl = ha->swl; in qla2x00_find_all_fabric_devs()
6097 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); in qla2x00_find_all_fabric_devs()
6138 loop_id = ha->min_external_loopid; in qla2x00_find_all_fabric_devs()
6139 for (; loop_id <= ha->max_loop_id; loop_id++) { in qla2x00_find_all_fabric_devs()
6143 if (ha->current_topology == ISP_CFG_FL && in qla2x00_find_all_fabric_devs()
6214 (vha->d_id.b24 & 0xffff00)) && ha->current_topology == in qla2x00_find_all_fabric_devs()
6373 struct qla_hw_data *ha = vha->hw; in qla2x00_reserve_mgmt_server_loop_id() local
6376 set_bit(NPH_MGMT_SERVER, ha->loop_id_map); in qla2x00_reserve_mgmt_server_loop_id()
6381 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_reserve_mgmt_server_loop_id()
6389 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_reserve_mgmt_server_loop_id()
6416 struct qla_hw_data *ha = vha->hw; in qla2x00_fabric_login() local
6429 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id, in qla2x00_fabric_login()
6482 if (IS_FWI2_CAPABLE(ha)) { in qla2x00_fabric_login()
6507 ha->isp_ops->fabric_logout(vha, fcport->loop_id, in qla2x00_fabric_login()
6525 ha->isp_ops->fabric_logout(vha, fcport->loop_id, in qla2x00_fabric_login()
6638 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) in qla2x00_perform_loop_resync() argument
6642 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { in qla2x00_perform_loop_resync()
6644 atomic_set(&ha->loop_down_timer, 0); in qla2x00_perform_loop_resync()
6645 if (!(ha->device_flags & DFLG_NO_CABLE)) { in qla2x00_perform_loop_resync()
6646 atomic_set(&ha->loop_state, LOOP_UP); in qla2x00_perform_loop_resync()
6647 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); in qla2x00_perform_loop_resync()
6648 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); in qla2x00_perform_loop_resync()
6649 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); in qla2x00_perform_loop_resync()
6651 rval = qla2x00_loop_resync(ha); in qla2x00_perform_loop_resync()
6653 atomic_set(&ha->loop_state, LOOP_DEAD); in qla2x00_perform_loop_resync()
6655 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); in qla2x00_perform_loop_resync()
6666 struct qla_hw_data *ha = base_vha->hw; in qla2x00_update_fcports() local
6669 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_update_fcports()
6676 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_update_fcports()
6679 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_update_fcports()
6685 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_update_fcports()
6692 struct qla_hw_data *ha = vha->hw; in qla83xx_reset_ownership() local
6698 if (IS_QLA8044(ha)) { in qla83xx_reset_ownership()
6713 (i != ha->portnum)) { in qla83xx_reset_ownership()
6723 ((i + 8) != ha->portnum)) { in qla83xx_reset_ownership()
6733 drv_presence_mask = ~((1 << (ha->portnum)) | in qla83xx_reset_ownership()
6741 (ha->portnum < fcoe_other_function)) { in qla83xx_reset_ownership()
6744 ha->flags.nic_core_reset_owner = 1; in qla83xx_reset_ownership()
6752 struct qla_hw_data *ha = vha->hw; in __qla83xx_set_drv_ack() local
6757 drv_ack |= (1 << ha->portnum); in __qla83xx_set_drv_ack()
6768 struct qla_hw_data *ha = vha->hw; in __qla83xx_clear_drv_ack() local
6773 drv_ack &= ~(1 << ha->portnum); in __qla83xx_clear_drv_ack()
6807 struct qla_hw_data *ha = vha->hw; in qla83xx_idc_audit() local
6812 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000); in qla83xx_idc_audit()
6813 idc_audit_reg = (ha->portnum) | in qla83xx_idc_audit()
6814 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8); in qla83xx_idc_audit()
6820 jiffies_to_msecs(ha->idc_audit_ts)) / 1000); in qla83xx_idc_audit()
6821 idc_audit_reg = (ha->portnum) | in qla83xx_idc_audit()
6837 struct qla_hw_data *ha = vha->hw; in qla83xx_initiating_reset() local
6850 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) { in qla83xx_initiating_reset()
6892 struct qla_hw_data *ha = vha->hw; in qla83xx_check_driver_presence() local
6895 if (drv_presence & (1 << ha->portnum)) in qla83xx_check_driver_presence()
6905 struct qla_hw_data *ha = vha->hw; in qla83xx_nic_core_reset() local
6921 ha->portnum); in qla83xx_nic_core_reset()
6938 ha->flags.nic_core_hung = 0; in qla83xx_nic_core_reset()
6953 struct qla_hw_data *ha = vha->hw; in qla2xxx_mctp_dump() local
6956 if (!IS_MCTP_CAPABLE(ha)) { in qla2xxx_mctp_dump()
6963 if (!ha->mctp_dump) { in qla2xxx_mctp_dump()
6964 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev, in qla2xxx_mctp_dump()
6965 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL); in qla2xxx_mctp_dump()
6967 if (!ha->mctp_dump) { in qla2xxx_mctp_dump()
6975 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, in qla2xxx_mctp_dump()
6983 vha->host_no, ha->mctp_dump); in qla2xxx_mctp_dump()
6984 ha->mctp_dumped = 1; in qla2xxx_mctp_dump()
6987 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) { in qla2xxx_mctp_dump()
6988 ha->flags.nic_core_reset_hdlr_active = 1; in qla2xxx_mctp_dump()
6997 ha->flags.nic_core_reset_hdlr_active = 0; in qla2xxx_mctp_dump()
7015 struct qla_hw_data *ha = vha->hw; in qla2x00_quiesce_io() local
7020 "Quiescing I/O - ha=%p.\n", ha); in qla2x00_quiesce_io()
7022 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); in qla2x00_quiesce_io()
7027 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_quiesce_io()
7028 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { in qla2x00_quiesce_io()
7030 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_quiesce_io()
7034 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_quiesce_io()
7037 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_quiesce_io()
7051 struct qla_hw_data *ha = vha->hw; in qla2x00_abort_isp_cleanup() local
7060 if (!(IS_P3P_TYPE(ha))) in qla2x00_abort_isp_cleanup()
7062 ha->flags.chip_reset_done = 0; in qla2x00_abort_isp_cleanup()
7067 "Performing ISP error recovery - ha=%p.\n", ha); in qla2x00_abort_isp_cleanup()
7069 ha->flags.purge_mbox = 1; in qla2x00_abort_isp_cleanup()
7074 if (!(IS_P3P_TYPE(ha))) in qla2x00_abort_isp_cleanup()
7075 ha->isp_ops->reset_chip(vha); in qla2x00_abort_isp_cleanup()
7077 ha->link_data_rate = PORT_SPEED_UNKNOWN; in qla2x00_abort_isp_cleanup()
7078 SAVE_TOPO(ha); in qla2x00_abort_isp_cleanup()
7079 ha->flags.rida_fmt2 = 0; in qla2x00_abort_isp_cleanup()
7080 ha->flags.n2n_ae = 0; in qla2x00_abort_isp_cleanup()
7081 ha->flags.lip_ae = 0; in qla2x00_abort_isp_cleanup()
7082 ha->current_topology = 0; in qla2x00_abort_isp_cleanup()
7083 QLA_FW_STOPPED(ha); in qla2x00_abort_isp_cleanup()
7084 ha->flags.fw_init_done = 0; in qla2x00_abort_isp_cleanup()
7085 ha->chip_reset++; in qla2x00_abort_isp_cleanup()
7086 ha->base_qpair->chip_reset = ha->chip_reset; in qla2x00_abort_isp_cleanup()
7087 ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0; in qla2x00_abort_isp_cleanup()
7088 ha->base_qpair->prev_completion_cnt = 0; in qla2x00_abort_isp_cleanup()
7089 for (i = 0; i < ha->max_qpairs; i++) { in qla2x00_abort_isp_cleanup()
7090 if (ha->queue_pair_map[i]) { in qla2x00_abort_isp_cleanup()
7091 ha->queue_pair_map[i]->chip_reset = in qla2x00_abort_isp_cleanup()
7092 ha->base_qpair->chip_reset; in qla2x00_abort_isp_cleanup()
7093 ha->queue_pair_map[i]->cmd_cnt = in qla2x00_abort_isp_cleanup()
7094 ha->queue_pair_map[i]->cmd_completion_cnt = 0; in qla2x00_abort_isp_cleanup()
7095 ha->base_qpair->prev_completion_cnt = 0; in qla2x00_abort_isp_cleanup()
7100 if (atomic_read(&ha->num_pend_mbx_stage3)) { in qla2x00_abort_isp_cleanup()
7101 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); in qla2x00_abort_isp_cleanup()
7102 complete(&ha->mbx_intr_comp); in qla2x00_abort_isp_cleanup()
7106 while (atomic_read(&ha->num_pend_mbx_stage3) || in qla2x00_abort_isp_cleanup()
7107 atomic_read(&ha->num_pend_mbx_stage2) || in qla2x00_abort_isp_cleanup()
7108 atomic_read(&ha->num_pend_mbx_stage1)) { in qla2x00_abort_isp_cleanup()
7114 ha->flags.purge_mbox = 0; in qla2x00_abort_isp_cleanup()
7121 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
7122 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { in qla2x00_abort_isp_cleanup()
7124 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
7128 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
7131 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
7143 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
7144 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { in qla2x00_abort_isp_cleanup()
7146 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
7151 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
7154 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp_cleanup()
7157 if (IS_P3P_TYPE(ha)) { in qla2x00_abort_isp_cleanup()
7187 struct qla_hw_data *ha = vha->hw; in qla2x00_abort_isp() local
7189 struct req_que *req = ha->req_q_map[0]; in qla2x00_abort_isp()
7198 if (qla2x00_isp_reg_stat(ha)) { in qla2x00_abort_isp()
7205 ha->flags.chip_reset_done = 1; in qla2x00_abort_isp()
7212 if (IS_QLA8031(ha)) { in qla2x00_abort_isp()
7220 if (unlikely(pci_channel_offline(ha->pdev) && in qla2x00_abort_isp()
7221 ha->flags.pci_channel_io_perm_failure)) { in qla2x00_abort_isp()
7242 ha->isp_ops->get_flash_version(vha, req->ring); in qla2x00_abort_isp()
7244 if (qla2x00_isp_reg_stat(ha)) { in qla2x00_abort_isp()
7249 ha->isp_ops->nvram_config(vha); in qla2x00_abort_isp()
7251 if (qla2x00_isp_reg_stat(ha)) { in qla2x00_abort_isp()
7269 ha->isp_ops->enable_intrs(ha); in qla2x00_abort_isp()
7271 ha->isp_abort_cnt = 0; in qla2x00_abort_isp()
7274 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) in qla2x00_abort_isp()
7276 if (ha->fce) { in qla2x00_abort_isp()
7277 ha->flags.fce_enabled = 1; in qla2x00_abort_isp()
7278 memset(ha->fce, 0, in qla2x00_abort_isp()
7279 fce_calc_size(ha->fce_bufs)); in qla2x00_abort_isp()
7281 ha->fce_dma, ha->fce_bufs, ha->fce_mb, in qla2x00_abort_isp()
7282 &ha->fce_bufs); in qla2x00_abort_isp()
7287 ha->flags.fce_enabled = 0; in qla2x00_abort_isp()
7291 if (ha->eft) { in qla2x00_abort_isp()
7292 memset(ha->eft, 0, EFT_SIZE); in qla2x00_abort_isp()
7294 ha->eft_dma, EFT_NUM_BUFFERS); in qla2x00_abort_isp()
7304 if (ha->isp_abort_cnt == 0) { in qla2x00_abort_isp()
7318 ha->isp_abort_cnt--; in qla2x00_abort_isp()
7321 ha->isp_abort_cnt); in qla2x00_abort_isp()
7325 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; in qla2x00_abort_isp()
7328 "more times.\n", ha->isp_abort_cnt); in qla2x00_abort_isp()
7344 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp()
7345 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { in qla2x00_abort_isp()
7348 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp()
7352 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_abort_isp()
7356 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla2x00_abort_isp()
7358 if (IS_QLA8031(ha)) { in qla2x00_abort_isp()
7387 struct qla_hw_data *ha = vha->hw; in qla2x00_restart_isp() local
7392 status = ha->isp_ops->chip_diag(vha); in qla2x00_restart_isp()
7405 ha->flags.chip_reset_done = 1; in qla2x00_restart_isp()
7408 qla25xx_init_queues(ha); in qla2x00_restart_isp()
7417 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); in qla2x00_restart_isp()
7424 qla25xx_init_queues(struct qla_hw_data *ha) in qla25xx_init_queues() argument
7428 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); in qla25xx_init_queues()
7432 for (i = 1; i < ha->max_rsp_queues; i++) { in qla25xx_init_queues()
7433 rsp = ha->rsp_q_map[i]; in qla25xx_init_queues()
7434 if (rsp && test_bit(i, ha->rsp_qid_map)) { in qla25xx_init_queues()
7447 for (i = 1; i < ha->max_req_queues; i++) { in qla25xx_init_queues()
7448 req = ha->req_q_map[i]; in qla25xx_init_queues()
7449 if (req && test_bit(i, ha->req_qid_map)) { in qla25xx_init_queues()
7477 struct qla_hw_data *ha = vha->hw; in qla2x00_reset_adapter() local
7478 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_reset_adapter()
7481 ha->isp_ops->disable_intrs(ha); in qla2x00_reset_adapter()
7483 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_reset_adapter()
7488 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla2x00_reset_adapter()
7497 struct qla_hw_data *ha = vha->hw; in qla24xx_reset_adapter() local
7498 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; in qla24xx_reset_adapter()
7500 if (IS_P3P_TYPE(ha)) in qla24xx_reset_adapter()
7504 ha->isp_ops->disable_intrs(ha); in qla24xx_reset_adapter()
7506 spin_lock_irqsave(&ha->hardware_lock, flags); in qla24xx_reset_adapter()
7511 spin_unlock_irqrestore(&ha->hardware_lock, flags); in qla24xx_reset_adapter()
7513 if (IS_NOPOLLING_TYPE(ha)) in qla24xx_reset_adapter()
7514 ha->isp_ops->enable_intrs(ha); in qla24xx_reset_adapter()
7526 struct qla_hw_data *ha = vha->hw; in qla24xx_nvram_wwn_from_ofw() local
7527 struct pci_dev *pdev = ha->pdev; in qla24xx_nvram_wwn_from_ofw()
7552 struct qla_hw_data *ha = vha->hw; in qla24xx_nvram_config() local
7555 icb = (struct init_cb_24xx *)ha->init_cb; in qla24xx_nvram_config()
7556 nv = ha->nvram; in qla24xx_nvram_config()
7559 if (ha->port_no == 0) { in qla24xx_nvram_config()
7560 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; in qla24xx_nvram_config()
7561 ha->vpd_base = FA_NVRAM_VPD0_ADDR; in qla24xx_nvram_config()
7563 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; in qla24xx_nvram_config()
7564 ha->vpd_base = FA_NVRAM_VPD1_ADDR; in qla24xx_nvram_config()
7567 ha->nvram_size = sizeof(*nv); in qla24xx_nvram_config()
7568 ha->vpd_size = FA_NVRAM_VPD_SIZE; in qla24xx_nvram_config()
7571 ha->vpd = ha->nvram + VPD_OFFSET; in qla24xx_nvram_config()
7572 ha->isp_ops->read_nvram(vha, ha->vpd, in qla24xx_nvram_config()
7573 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); in qla24xx_nvram_config()
7577 ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size); in qla24xx_nvram_config()
7578 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) in qla24xx_nvram_config()
7584 nv, ha->nvram_size); in qla24xx_nvram_config()
7601 memset(nv, 0, ha->nvram_size); in qla24xx_nvram_config()
7609 nv->port_name[1] = 0x00 + ha->port_no + 1; in qla24xx_nvram_config()
7653 memset(icb, 0, ha->init_cb_size); in qla24xx_nvram_config()
7672 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); in qla24xx_nvram_config()
7698 ha->flags.disable_risc_code_load = 0; in qla24xx_nvram_config()
7699 ha->flags.enable_lip_reset = 0; in qla24xx_nvram_config()
7700 ha->flags.enable_lip_full_login = in qla24xx_nvram_config()
7702 ha->flags.enable_target_reset = in qla24xx_nvram_config()
7704 ha->flags.enable_led_scheme = 0; in qla24xx_nvram_config()
7705 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; in qla24xx_nvram_config()
7707 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & in qla24xx_nvram_config()
7710 memcpy(ha->fw_seriallink_options24, nv->seriallink_options, in qla24xx_nvram_config()
7711 sizeof(ha->fw_seriallink_options24)); in qla24xx_nvram_config()
7714 ha->serial0 = icb->port_name[5]; in qla24xx_nvram_config()
7715 ha->serial1 = icb->port_name[6]; in qla24xx_nvram_config()
7716 ha->serial2 = icb->port_name[7]; in qla24xx_nvram_config()
7722 ha->retry_count = le16_to_cpu(nv->login_retry_count); in qla24xx_nvram_config()
7729 ha->login_timeout = le16_to_cpu(nv->login_timeout); in qla24xx_nvram_config()
7732 ha->r_a_tov = 100; in qla24xx_nvram_config()
7734 ha->loop_reset_delay = nv->reset_delay; in qla24xx_nvram_config()
7747 ha->loop_down_abort_time = in qla24xx_nvram_config()
7750 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); in qla24xx_nvram_config()
7751 ha->loop_down_abort_time = in qla24xx_nvram_config()
7752 (LOOP_DOWN_TIME - ha->link_down_timeout); in qla24xx_nvram_config()
7756 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); in qla24xx_nvram_config()
7758 ha->port_down_retry_count = qlport_down_retry; in qla24xx_nvram_config()
7761 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); in qla24xx_nvram_config()
7762 if (ha->port_down_retry_count == in qla24xx_nvram_config()
7764 ha->port_down_retry_count > 3) in qla24xx_nvram_config()
7765 ha->login_retry_count = ha->port_down_retry_count; in qla24xx_nvram_config()
7766 else if (ha->port_down_retry_count > (int)ha->login_retry_count) in qla24xx_nvram_config()
7767 ha->login_retry_count = ha->port_down_retry_count; in qla24xx_nvram_config()
7769 ha->login_retry_count = ql2xloginretrycount; in qla24xx_nvram_config()
7776 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & in qla24xx_nvram_config()
7778 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? in qla24xx_nvram_config()
7783 if (ha->zio_mode != QLA_ZIO_DISABLED) { in qla24xx_nvram_config()
7784 ha->zio_mode = QLA_ZIO_MODE_6; in qla24xx_nvram_config()
7788 ha->zio_mode, ha->zio_timer * 100); in qla24xx_nvram_config()
7791 (uint32_t)ha->zio_mode); in qla24xx_nvram_config()
7792 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); in qla24xx_nvram_config()
7893 struct qla_hw_data *ha = vha->hw; in qla28xx_get_aux_images() local
7898 if (!ha->flt_region_aux_img_status_pri) { in qla28xx_get_aux_images()
7904 ha->flt_region_aux_img_status_pri, in qla28xx_get_aux_images()
7930 if (!ha->flt_region_aux_img_status_sec) { in qla28xx_get_aux_images()
7937 ha->flt_region_aux_img_status_sec, in qla28xx_get_aux_images()
7991 struct qla_hw_data *ha = vha->hw; in qla27xx_get_active_image() local
7996 if (!ha->flt_region_img_status_pri) { in qla27xx_get_active_image()
8002 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) != in qla27xx_get_active_image()
8031 if (!ha->flt_region_img_status_sec) { in qla27xx_get_active_image()
8037 ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2); in qla27xx_get_active_image()
8101 struct qla_hw_data *ha = vha->hw; in qla24xx_load_risc_flash() local
8102 struct req_que *req = ha->req_q_map[0]; in qla24xx_load_risc_flash()
8103 struct fwdt *fwdt = ha->fwdt; in qla24xx_load_risc_flash()
8135 dlen = ha->fw_transfer_size >> 2; in qla24xx_load_risc_flash()
8161 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) in qla24xx_load_risc_flash()
8245 struct qla_hw_data *ha = vha->hw; in qla2x00_load_risc() local
8246 struct req_que *req = ha->req_q_map[0]; in qla2x00_load_risc()
8302 wlen = (uint16_t)(ha->fw_transfer_size >> 1); in qla2x00_load_risc()
8348 struct qla_hw_data *ha = vha->hw; in qla24xx_load_risc_blob() local
8349 struct req_que *req = ha->req_q_map[0]; in qla24xx_load_risc_blob()
8350 struct fwdt *fwdt = ha->fwdt; in qla24xx_load_risc_blob()
8389 dlen = ha->fw_transfer_size >> 2; in qla24xx_load_risc_blob()
8417 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) in qla24xx_load_risc_blob()
8515 struct qla_hw_data *ha = vha->hw; in qla81xx_load_risc() local
8527 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) in qla81xx_load_risc()
8537 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec); in qla81xx_load_risc()
8544 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); in qla81xx_load_risc()
8550 if (!rval || !ha->flt_region_gold_fw) in qla81xx_load_risc()
8555 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); in qla81xx_load_risc()
8560 ha->flags.running_gold_fw = 1; in qla81xx_load_risc()
8568 struct qla_hw_data *ha = vha->hw; in qla2x00_try_to_stop_firmware() local
8570 if (ha->flags.pci_channel_io_perm_failure) in qla2x00_try_to_stop_firmware()
8572 if (!IS_FWI2_CAPABLE(ha)) in qla2x00_try_to_stop_firmware()
8574 if (!ha->fw_major_version) in qla2x00_try_to_stop_firmware()
8576 if (!ha->flags.fw_started) in qla2x00_try_to_stop_firmware()
8582 ha->isp_ops->reset_chip(vha); in qla2x00_try_to_stop_firmware()
8583 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) in qla2x00_try_to_stop_firmware()
8592 QLA_FW_STOPPED(ha); in qla2x00_try_to_stop_firmware()
8593 ha->flags.fw_init_done = 0; in qla2x00_try_to_stop_firmware()
8602 struct qla_hw_data *ha = vha->hw; in qla24xx_configure_vhba() local
8603 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); in qla24xx_configure_vhba()
8612 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); in qla24xx_configure_vhba()
8618 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, in qla24xx_configure_vhba()
8651 struct qla_hw_data *ha = vha->hw; in qla84xx_get_chip() local
8657 if (cs84xx->bus == ha->pdev->bus) { in qla84xx_get_chip()
8670 cs84xx->bus = ha->pdev->bus; in qla84xx_get_chip()
8693 struct qla_hw_data *ha = vha->hw; in qla84xx_put_chip() local
8695 if (ha->cs84xx) in qla84xx_put_chip()
8696 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); in qla84xx_put_chip()
8704 struct qla_hw_data *ha = vha->hw; in qla84xx_init_chip() local
8706 mutex_lock(&ha->cs84xx->fw_update_mutex); in qla84xx_init_chip()
8710 mutex_unlock(&ha->cs84xx->fw_update_mutex); in qla84xx_init_chip()
8728 struct qla_hw_data *ha = vha->hw; in qla81xx_nvram_config() local
8733 icb = (struct init_cb_81xx *)ha->init_cb; in qla81xx_nvram_config()
8734 nv = ha->nvram; in qla81xx_nvram_config()
8737 ha->nvram_size = sizeof(*nv); in qla81xx_nvram_config()
8738 ha->vpd_size = FA_NVRAM_VPD_SIZE; in qla81xx_nvram_config()
8739 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) in qla81xx_nvram_config()
8740 ha->vpd_size = FA_VPD_SIZE_82XX; in qla81xx_nvram_config()
8742 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) in qla81xx_nvram_config()
8746 ha->vpd = ha->nvram + VPD_OFFSET; in qla81xx_nvram_config()
8748 faddr = ha->flt_region_vpd; in qla81xx_nvram_config()
8749 if (IS_QLA28XX(ha)) { in qla81xx_nvram_config()
8751 faddr = ha->flt_region_vpd_sec; in qla81xx_nvram_config()
8757 ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size); in qla81xx_nvram_config()
8760 faddr = ha->flt_region_nvram; in qla81xx_nvram_config()
8761 if (IS_QLA28XX(ha)) { in qla81xx_nvram_config()
8763 faddr = ha->flt_region_nvram_sec; in qla81xx_nvram_config()
8769 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); in qla81xx_nvram_config()
8772 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) in qla81xx_nvram_config()
8778 nv, ha->nvram_size); in qla81xx_nvram_config()
8795 memset(nv, 0, ha->nvram_size); in qla81xx_nvram_config()
8802 nv->port_name[1] = 0x00 + ha->port_no + 1; in qla81xx_nvram_config()
8836 nv->enode_mac[5] = 0x06 + ha->port_no + 1; in qla81xx_nvram_config()
8841 if (IS_T10_PI_CAPABLE(ha)) in qla81xx_nvram_config()
8847 memset(icb, 0, ha->init_cb_size); in qla81xx_nvram_config()
8874 icb->enode_mac[5] = 0x06 + ha->port_no + 1; in qla81xx_nvram_config()
8878 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); in qla81xx_nvram_config()
8879 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); in qla81xx_nvram_config()
8904 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { in qla81xx_nvram_config()
8906 ha->flags.scm_supported_a = 1; in qla81xx_nvram_config()
8910 ha->flags.disable_risc_code_load = 0; in qla81xx_nvram_config()
8911 ha->flags.enable_lip_reset = 0; in qla81xx_nvram_config()
8912 ha->flags.enable_lip_full_login = in qla81xx_nvram_config()
8914 ha->flags.enable_target_reset = in qla81xx_nvram_config()
8916 ha->flags.enable_led_scheme = 0; in qla81xx_nvram_config()
8917 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; in qla81xx_nvram_config()
8919 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & in qla81xx_nvram_config()
8923 ha->serial0 = icb->port_name[5]; in qla81xx_nvram_config()
8924 ha->serial1 = icb->port_name[6]; in qla81xx_nvram_config()
8925 ha->serial2 = icb->port_name[7]; in qla81xx_nvram_config()
8931 ha->retry_count = le16_to_cpu(nv->login_retry_count); in qla81xx_nvram_config()
8938 ha->login_timeout = le16_to_cpu(nv->login_timeout); in qla81xx_nvram_config()
8941 ha->r_a_tov = 100; in qla81xx_nvram_config()
8943 ha->loop_reset_delay = nv->reset_delay; in qla81xx_nvram_config()
8956 ha->loop_down_abort_time = in qla81xx_nvram_config()
8959 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); in qla81xx_nvram_config()
8960 ha->loop_down_abort_time = in qla81xx_nvram_config()
8961 (LOOP_DOWN_TIME - ha->link_down_timeout); in qla81xx_nvram_config()
8965 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); in qla81xx_nvram_config()
8967 ha->port_down_retry_count = qlport_down_retry; in qla81xx_nvram_config()
8970 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); in qla81xx_nvram_config()
8971 if (ha->port_down_retry_count == in qla81xx_nvram_config()
8973 ha->port_down_retry_count > 3) in qla81xx_nvram_config()
8974 ha->login_retry_count = ha->port_down_retry_count; in qla81xx_nvram_config()
8975 else if (ha->port_down_retry_count > (int)ha->login_retry_count) in qla81xx_nvram_config()
8976 ha->login_retry_count = ha->port_down_retry_count; in qla81xx_nvram_config()
8978 ha->login_retry_count = ql2xloginretrycount; in qla81xx_nvram_config()
8982 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) in qla81xx_nvram_config()
8987 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & in qla81xx_nvram_config()
8989 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? in qla81xx_nvram_config()
8995 if (ha->zio_mode != QLA_ZIO_DISABLED) { in qla81xx_nvram_config()
8996 ha->zio_mode = QLA_ZIO_MODE_6; in qla81xx_nvram_config()
9000 ha->zio_mode, in qla81xx_nvram_config()
9001 ha->zio_timer * 100); in qla81xx_nvram_config()
9004 (uint32_t)ha->zio_mode); in qla81xx_nvram_config()
9005 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); in qla81xx_nvram_config()
9016 ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha); in qla81xx_nvram_config()
9029 struct qla_hw_data *ha = vha->hw; in qla82xx_restart_isp() local
9036 ha->flags.chip_reset_done = 1; in qla82xx_restart_isp()
9041 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); in qla82xx_restart_isp()
9062 ha->isp_ops->enable_intrs(ha); in qla82xx_restart_isp()
9064 ha->isp_abort_cnt = 0; in qla82xx_restart_isp()
9070 if (ha->fce) { in qla82xx_restart_isp()
9071 ha->flags.fce_enabled = 1; in qla82xx_restart_isp()
9072 memset(ha->fce, 0, in qla82xx_restart_isp()
9073 fce_calc_size(ha->fce_bufs)); in qla82xx_restart_isp()
9075 ha->fce_dma, ha->fce_bufs, ha->fce_mb, in qla82xx_restart_isp()
9076 &ha->fce_bufs); in qla82xx_restart_isp()
9081 ha->flags.fce_enabled = 0; in qla82xx_restart_isp()
9085 if (ha->eft) { in qla82xx_restart_isp()
9086 memset(ha->eft, 0, EFT_SIZE); in qla82xx_restart_isp()
9088 ha->eft_dma, EFT_NUM_BUFFERS); in qla82xx_restart_isp()
9101 spin_lock_irqsave(&ha->vport_slock, flags); in qla82xx_restart_isp()
9102 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { in qla82xx_restart_isp()
9105 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla82xx_restart_isp()
9109 spin_lock_irqsave(&ha->vport_slock, flags); in qla82xx_restart_isp()
9113 spin_unlock_irqrestore(&ha->vport_slock, flags); in qla82xx_restart_isp()
9150 struct qla_hw_data *ha = vha->hw; in qla24xx_get_fcp_prio() local
9152 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) in qla24xx_get_fcp_prio()
9156 entries = ha->fcp_prio_cfg->num_entries; in qla24xx_get_fcp_prio()
9157 pri_entry = &ha->fcp_prio_cfg->entry[0]; in qla24xx_get_fcp_prio()
9305 struct qla_hw_data *ha = vha->hw; in qla2xxx_create_qpair() local
9310 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) { in qla2xxx_create_qpair()
9328 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; in qla2xxx_create_qpair()
9331 mutex_lock(&ha->mq_lock); in qla2xxx_create_qpair()
9332 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); in qla2xxx_create_qpair()
9333 if (ha->num_qpairs >= ha->max_qpairs) { in qla2xxx_create_qpair()
9334 mutex_unlock(&ha->mq_lock); in qla2xxx_create_qpair()
9339 ha->num_qpairs++; in qla2xxx_create_qpair()
9340 set_bit(qpair_id, ha->qpair_qid_map); in qla2xxx_create_qpair()
9341 ha->queue_pair_map[qpair_id] = qpair; in qla2xxx_create_qpair()
9344 qpair->fw_started = ha->flags.fw_started; in qla2xxx_create_qpair()
9346 qpair->chip_reset = ha->base_qpair->chip_reset; in qla2xxx_create_qpair()
9347 qpair->enable_class_2 = ha->base_qpair->enable_class_2; in qla2xxx_create_qpair()
9349 ha->base_qpair->enable_explicit_conf; in qla2xxx_create_qpair()
9351 for (i = 0; i < ha->msix_count; i++) { in qla2xxx_create_qpair()
9352 msix = &ha->msix_entries[i]; in qla2xxx_create_qpair()
9368 qpair->pdev = ha->pdev; in qla2xxx_create_qpair()
9369 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) in qla2xxx_create_qpair()
9372 mutex_unlock(&ha->mq_lock); in qla2xxx_create_qpair()
9375 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp); in qla2xxx_create_qpair()
9382 qpair->rsp = ha->rsp_q_map[rsp_id]; in qla2xxx_create_qpair()
9385 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos, in qla2xxx_create_qpair()
9393 qpair->req = ha->req_q_map[req_id]; in qla2xxx_create_qpair()
9399 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { in qla2xxx_create_qpair()
9400 if (ha->fw_attributes & BIT_4) in qla2xxx_create_qpair()
9431 mutex_lock(&ha->mq_lock); in qla2xxx_create_qpair()
9437 ha->queue_pair_map[qpair_id] = NULL; in qla2xxx_create_qpair()
9438 clear_bit(qpair_id, ha->qpair_qid_map); in qla2xxx_create_qpair()
9439 ha->num_qpairs--; in qla2xxx_create_qpair()
9440 mutex_unlock(&ha->mq_lock); in qla2xxx_create_qpair()
9449 struct qla_hw_data *ha = qpair->hw; in qla2xxx_delete_qpair() local
9461 mutex_lock(&ha->mq_lock); in qla2xxx_delete_qpair()
9462 ha->queue_pair_map[qpair->id] = NULL; in qla2xxx_delete_qpair()
9463 clear_bit(qpair->id, ha->qpair_qid_map); in qla2xxx_delete_qpair()
9464 ha->num_qpairs--; in qla2xxx_delete_qpair()
9473 mutex_unlock(&ha->mq_lock); in qla2xxx_delete_qpair()