Lines Matching refs:qi
1181 if (iommu->qi) { in free_iommu()
1182 free_page((unsigned long)iommu->qi->desc); in free_iommu()
1183 kfree(iommu->qi->desc_status); in free_iommu()
1184 kfree(iommu->qi); in free_iommu()
1197 static inline void reclaim_free_desc(struct q_inval *qi) in reclaim_free_desc() argument
1199 while (qi->desc_status[qi->free_tail] == QI_DONE || in reclaim_free_desc()
1200 qi->desc_status[qi->free_tail] == QI_ABORT) { in reclaim_free_desc()
1201 qi->desc_status[qi->free_tail] = QI_FREE; in reclaim_free_desc()
1202 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; in reclaim_free_desc()
1203 qi->free_cnt++; in reclaim_free_desc()
1237 struct qi_desc *desc = iommu->qi->desc + head; in qi_dump_fault()
1256 desc = iommu->qi->desc + head; in qi_dump_fault()
1268 struct q_inval *qi = iommu->qi; in qi_check_fault() local
1271 if (qi->desc_status[wait_index] == QI_ABORT) in qi_check_fault()
1286 struct qi_desc *desc = qi->desc + head; in qi_check_fault()
1293 memcpy(desc, qi->desc + (wait_index << shift), in qi_check_fault()
1316 if (qi->desc_status[head] == QI_IN_USE) in qi_check_fault()
1317 qi->desc_status[head] = QI_ABORT; in qi_check_fault()
1321 if (qi->desc_status[wait_index] == QI_ABORT) in qi_check_fault()
1343 struct q_inval *qi = iommu->qi; in qi_submit_sync() local
1354 if (!qi) in qi_submit_sync()
1374 raw_spin_lock_irqsave(&qi->q_lock, flags); in qi_submit_sync()
1380 while (qi->free_cnt < count + 2) { in qi_submit_sync()
1381 raw_spin_unlock_irqrestore(&qi->q_lock, flags); in qi_submit_sync()
1383 raw_spin_lock_irqsave(&qi->q_lock, flags); in qi_submit_sync()
1386 index = qi->free_head; in qi_submit_sync()
1392 memcpy(qi->desc + offset, &desc[i], 1 << shift); in qi_submit_sync()
1393 qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE; in qi_submit_sync()
1397 qi->desc_status[wait_index] = QI_IN_USE; in qi_submit_sync()
1403 wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]); in qi_submit_sync()
1408 memcpy(qi->desc + offset, &wait_desc, 1 << shift); in qi_submit_sync()
1410 qi->free_head = (qi->free_head + count + 1) % QI_LENGTH; in qi_submit_sync()
1411 qi->free_cnt -= count + 1; in qi_submit_sync()
1417 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG); in qi_submit_sync()
1419 while (qi->desc_status[wait_index] != QI_DONE) { in qi_submit_sync()
1431 raw_spin_unlock(&qi->q_lock); in qi_submit_sync()
1433 raw_spin_lock(&qi->q_lock); in qi_submit_sync()
1437 qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE; in qi_submit_sync()
1439 reclaim_free_desc(qi); in qi_submit_sync()
1440 raw_spin_unlock_irqrestore(&qi->q_lock, flags); in qi_submit_sync()
1673 struct q_inval *qi = iommu->qi; in __dmar_enable_qi() local
1674 u64 val = virt_to_phys(qi->desc); in __dmar_enable_qi()
1676 qi->free_head = qi->free_tail = 0; in __dmar_enable_qi()
1677 qi->free_cnt = QI_LENGTH; in __dmar_enable_qi()
1709 struct q_inval *qi; in dmar_enable_qi() local
1718 if (iommu->qi) in dmar_enable_qi()
1721 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); in dmar_enable_qi()
1722 if (!iommu->qi) in dmar_enable_qi()
1725 qi = iommu->qi; in dmar_enable_qi()
1734 kfree(qi); in dmar_enable_qi()
1735 iommu->qi = NULL; in dmar_enable_qi()
1739 qi->desc = page_address(desc_page); in dmar_enable_qi()
1741 qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC); in dmar_enable_qi()
1742 if (!qi->desc_status) { in dmar_enable_qi()
1743 free_page((unsigned long) qi->desc); in dmar_enable_qi()
1744 kfree(qi); in dmar_enable_qi()
1745 iommu->qi = NULL; in dmar_enable_qi()
1749 raw_spin_lock_init(&qi->q_lock); in dmar_enable_qi()
2112 if (!iommu->qi) in dmar_reenable_qi()