Lines Matching refs:msc

74 	struct msc		*msc;  member
94 struct msc *msc; member
129 struct msc { struct
306 return win->entry.next == &win->msc->win_list; in msc_is_last_win()
318 return list_first_entry(&win->msc->win_list, struct msc_window, in msc_next_window()
354 msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty) in msc_find_window() argument
359 if (list_empty(&msc->win_list)) in msc_find_window()
367 list_for_each_entry(win, &msc->win_list, entry) { in msc_find_window()
391 static struct msc_window *msc_oldest_window(struct msc *msc) in msc_oldest_window() argument
395 if (list_empty(&msc->win_list)) in msc_oldest_window()
398 win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true); in msc_oldest_window()
402 return list_first_entry(&msc->win_list, struct msc_window, entry); in msc_oldest_window()
440 static struct msc_iter *msc_iter_install(struct msc *msc) in msc_iter_install() argument
448 mutex_lock(&msc->buf_mutex); in msc_iter_install()
456 if (msc->enabled) { in msc_iter_install()
462 iter->msc = msc; in msc_iter_install()
464 list_add_tail(&iter->entry, &msc->iter_list); in msc_iter_install()
466 mutex_unlock(&msc->buf_mutex); in msc_iter_install()
471 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc) in msc_iter_remove() argument
473 mutex_lock(&msc->buf_mutex); in msc_iter_remove()
475 mutex_unlock(&msc->buf_mutex); in msc_iter_remove()
498 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc) in msc_iter_win_start() argument
504 iter->start_win = msc_oldest_window(msc); in msc_iter_win_start()
581 struct msc *msc = iter->msc; in msc_buffer_iterate() local
589 if (msc_iter_win_start(iter, msc)) in msc_buffer_iterate()
654 static void msc_buffer_clear_hw_header(struct msc *msc) in msc_buffer_clear_hw_header() argument
659 list_for_each_entry(win, &msc->win_list, entry) { in msc_buffer_clear_hw_header()
672 static int intel_th_msu_init(struct msc *msc) in intel_th_msu_init() argument
676 if (!msc->do_irq) in intel_th_msu_init()
679 if (!msc->mbuf) in intel_th_msu_init()
682 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); in intel_th_msu_init()
683 mintctl |= msc->index ? M1BLIE : M0BLIE; in intel_th_msu_init()
684 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); in intel_th_msu_init()
685 if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) { in intel_th_msu_init()
686 dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n"); in intel_th_msu_init()
687 msc->do_irq = 0; in intel_th_msu_init()
691 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); in intel_th_msu_init()
692 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); in intel_th_msu_init()
697 static void intel_th_msu_deinit(struct msc *msc) in intel_th_msu_deinit() argument
701 if (!msc->do_irq) in intel_th_msu_deinit()
704 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); in intel_th_msu_deinit()
705 mintctl &= msc->index ? ~M1BLIE : ~M0BLIE; in intel_th_msu_deinit()
706 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); in intel_th_msu_deinit()
717 if (!win->msc->mbuf) in msc_win_set_lockout()
731 atomic_inc(&win->msc->user_count); in msc_win_set_lockout()
733 atomic_dec(&win->msc->user_count); in msc_win_set_lockout()
746 dev_warn_ratelimited(msc_dev(win->msc), in msc_win_set_lockout()
761 static int msc_configure(struct msc *msc) in msc_configure() argument
765 lockdep_assert_held(&msc->buf_mutex); in msc_configure()
767 if (msc->mode > MSC_MODE_MULTI) in msc_configure()
770 if (msc->mode == MSC_MODE_MULTI) { in msc_configure()
771 if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE)) in msc_configure()
774 msc_buffer_clear_hw_header(msc); in msc_configure()
777 msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR); in msc_configure()
778 msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE); in msc_configure()
780 reg = msc->base_addr >> PAGE_SHIFT; in msc_configure()
781 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR); in msc_configure()
783 if (msc->mode == MSC_MODE_SINGLE) { in msc_configure()
784 reg = msc->nr_pages; in msc_configure()
785 iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE); in msc_configure()
788 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); in msc_configure()
792 reg |= msc->mode << __ffs(MSC_MODE); in msc_configure()
793 reg |= msc->burst_len << __ffs(MSC_LEN); in msc_configure()
795 if (msc->wrap) in msc_configure()
798 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); in msc_configure()
800 intel_th_msu_init(msc); in msc_configure()
802 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; in msc_configure()
803 intel_th_trace_enable(msc->thdev); in msc_configure()
804 msc->enabled = 1; in msc_configure()
806 if (msc->mbuf && msc->mbuf->activate) in msc_configure()
807 msc->mbuf->activate(msc->mbuf_priv); in msc_configure()
819 static void msc_disable(struct msc *msc) in msc_disable() argument
821 struct msc_window *win = msc->cur_win; in msc_disable()
824 lockdep_assert_held(&msc->buf_mutex); in msc_disable()
826 if (msc->mode == MSC_MODE_MULTI) in msc_disable()
829 if (msc->mbuf && msc->mbuf->deactivate) in msc_disable()
830 msc->mbuf->deactivate(msc->mbuf_priv); in msc_disable()
831 intel_th_msu_deinit(msc); in msc_disable()
832 intel_th_trace_disable(msc->thdev); in msc_disable()
834 if (msc->mode == MSC_MODE_SINGLE) { in msc_disable()
835 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); in msc_disable()
836 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); in msc_disable()
838 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); in msc_disable()
839 msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1); in msc_disable()
840 dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n", in msc_disable()
841 reg, msc->single_sz, msc->single_wrap); in msc_disable()
844 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); in msc_disable()
846 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); in msc_disable()
848 if (msc->mbuf && msc->mbuf->ready) in msc_disable()
849 msc->mbuf->ready(msc->mbuf_priv, win->sgt, in msc_disable()
852 msc->enabled = 0; in msc_disable()
854 iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR); in msc_disable()
855 iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE); in msc_disable()
857 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n", in msc_disable()
858 ioread32(msc->reg_base + REG_MSU_MSC0NWSA)); in msc_disable()
860 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); in msc_disable()
861 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); in msc_disable()
863 reg = ioread32(msc->reg_base + REG_MSU_MSUSTS); in msc_disable()
864 reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; in msc_disable()
865 iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS); in msc_disable()
870 struct msc *msc = dev_get_drvdata(&thdev->dev); in intel_th_msc_activate() local
873 if (!atomic_inc_unless_negative(&msc->user_count)) in intel_th_msc_activate()
876 mutex_lock(&msc->buf_mutex); in intel_th_msc_activate()
879 if (list_empty(&msc->iter_list)) in intel_th_msc_activate()
880 ret = msc_configure(msc); in intel_th_msc_activate()
882 mutex_unlock(&msc->buf_mutex); in intel_th_msc_activate()
885 atomic_dec(&msc->user_count); in intel_th_msc_activate()
892 struct msc *msc = dev_get_drvdata(&thdev->dev); in intel_th_msc_deactivate() local
894 mutex_lock(&msc->buf_mutex); in intel_th_msc_deactivate()
895 if (msc->enabled) { in intel_th_msc_deactivate()
896 msc_disable(msc); in intel_th_msc_deactivate()
897 atomic_dec(&msc->user_count); in intel_th_msc_deactivate()
899 mutex_unlock(&msc->buf_mutex); in intel_th_msc_deactivate()
912 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size) in msc_buffer_contig_alloc() argument
922 ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL); in msc_buffer_contig_alloc()
932 sg_set_buf(msc->single_sgt.sgl, page_address(page), size); in msc_buffer_contig_alloc()
934 ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1, in msc_buffer_contig_alloc()
939 msc->nr_pages = nr_pages; in msc_buffer_contig_alloc()
940 msc->base = page_address(page); in msc_buffer_contig_alloc()
941 msc->base_addr = sg_dma_address(msc->single_sgt.sgl); in msc_buffer_contig_alloc()
949 sg_free_table(&msc->single_sgt); in msc_buffer_contig_alloc()
959 static void msc_buffer_contig_free(struct msc *msc) in msc_buffer_contig_free() argument
963 dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, in msc_buffer_contig_free()
965 sg_free_table(&msc->single_sgt); in msc_buffer_contig_free()
967 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { in msc_buffer_contig_free()
968 struct page *page = virt_to_page(msc->base + off); in msc_buffer_contig_free()
974 msc->nr_pages = 0; in msc_buffer_contig_free()
984 static struct page *msc_buffer_contig_get_page(struct msc *msc, in msc_buffer_contig_get_page() argument
987 if (pgoff >= msc->nr_pages) in msc_buffer_contig_get_page()
990 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT)); in msc_buffer_contig_get_page()
1005 block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent, in __msc_buffer_win_alloc()
1018 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, in __msc_buffer_win_alloc()
1027 static void msc_buffer_set_uc(struct msc *msc) in msc_buffer_set_uc() argument
1033 if (msc->mode == MSC_MODE_SINGLE) { in msc_buffer_set_uc()
1034 set_memory_uc((unsigned long)msc->base, msc->nr_pages); in msc_buffer_set_uc()
1038 list_for_each_entry(win, &msc->win_list, entry) { in msc_buffer_set_uc()
1047 static void msc_buffer_set_wb(struct msc *msc) in msc_buffer_set_wb() argument
1053 if (msc->mode == MSC_MODE_SINGLE) { in msc_buffer_set_wb()
1054 set_memory_wb((unsigned long)msc->base, msc->nr_pages); in msc_buffer_set_wb()
1058 list_for_each_entry(win, &msc->win_list, entry) { in msc_buffer_set_wb()
1068 msc_buffer_set_uc(struct msc *msc) {} in msc_buffer_set_uc() argument
1069 static inline void msc_buffer_set_wb(struct msc *msc) {} in msc_buffer_set_wb() argument
1082 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) in msc_buffer_win_alloc() argument
1094 win->msc = msc; in msc_buffer_win_alloc()
1099 if (!list_empty(&msc->win_list)) { in msc_buffer_win_alloc()
1100 struct msc_window *prev = list_last_entry(&msc->win_list, in msc_buffer_win_alloc()
1107 if (msc->mbuf && msc->mbuf->alloc_window) in msc_buffer_win_alloc()
1108 ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt, in msc_buffer_win_alloc()
1119 if (list_empty(&msc->win_list)) { in msc_buffer_win_alloc()
1120 msc->base = msc_win_base(win); in msc_buffer_win_alloc()
1121 msc->base_addr = msc_win_base_dma(win); in msc_buffer_win_alloc()
1122 msc->cur_win = win; in msc_buffer_win_alloc()
1125 list_add_tail(&win->entry, &msc->win_list); in msc_buffer_win_alloc()
1126 msc->nr_pages += nr_blocks; in msc_buffer_win_alloc()
1136 static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) in __msc_buffer_win_free() argument
1145 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, in __msc_buffer_win_free()
1159 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) in msc_buffer_win_free() argument
1161 msc->nr_pages -= win->nr_blocks; in msc_buffer_win_free()
1164 if (list_empty(&msc->win_list)) { in msc_buffer_win_free()
1165 msc->base = NULL; in msc_buffer_win_free()
1166 msc->base_addr = 0; in msc_buffer_win_free()
1169 if (msc->mbuf && msc->mbuf->free_window) in msc_buffer_win_free()
1170 msc->mbuf->free_window(msc->mbuf_priv, win->sgt); in msc_buffer_win_free()
1172 __msc_buffer_win_free(msc, win); in msc_buffer_win_free()
1184 static void msc_buffer_relink(struct msc *msc) in msc_buffer_relink() argument
1189 list_for_each_entry(win, &msc->win_list, entry) { in msc_buffer_relink()
1200 next_win = list_first_entry(&msc->win_list, in msc_buffer_relink()
1238 static void msc_buffer_multi_free(struct msc *msc) in msc_buffer_multi_free() argument
1242 list_for_each_entry_safe(win, iter, &msc->win_list, entry) in msc_buffer_multi_free()
1243 msc_buffer_win_free(msc, win); in msc_buffer_multi_free()
1246 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages, in msc_buffer_multi_alloc() argument
1252 ret = msc_buffer_win_alloc(msc, nr_pages[i]); in msc_buffer_multi_alloc()
1254 msc_buffer_multi_free(msc); in msc_buffer_multi_alloc()
1259 msc_buffer_relink(msc); in msc_buffer_multi_alloc()
1273 static void msc_buffer_free(struct msc *msc) in msc_buffer_free() argument
1275 msc_buffer_set_wb(msc); in msc_buffer_free()
1277 if (msc->mode == MSC_MODE_SINGLE) in msc_buffer_free()
1278 msc_buffer_contig_free(msc); in msc_buffer_free()
1279 else if (msc->mode == MSC_MODE_MULTI) in msc_buffer_free()
1280 msc_buffer_multi_free(msc); in msc_buffer_free()
1299 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages, in msc_buffer_alloc() argument
1305 if (atomic_read(&msc->user_count) != -1) in msc_buffer_alloc()
1308 if (msc->mode == MSC_MODE_SINGLE) { in msc_buffer_alloc()
1312 ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT); in msc_buffer_alloc()
1313 } else if (msc->mode == MSC_MODE_MULTI) { in msc_buffer_alloc()
1314 ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins); in msc_buffer_alloc()
1320 msc_buffer_set_uc(msc); in msc_buffer_alloc()
1325 if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1)) in msc_buffer_alloc()
1343 static int msc_buffer_unlocked_free_unless_used(struct msc *msc) in msc_buffer_unlocked_free_unless_used() argument
1347 count = atomic_cmpxchg(&msc->user_count, 0, -1); in msc_buffer_unlocked_free_unless_used()
1354 msc_buffer_free(msc); in msc_buffer_unlocked_free_unless_used()
1366 static int msc_buffer_free_unless_used(struct msc *msc) in msc_buffer_free_unless_used() argument
1370 mutex_lock(&msc->buf_mutex); in msc_buffer_free_unless_used()
1371 ret = msc_buffer_unlocked_free_unless_used(msc); in msc_buffer_free_unless_used()
1372 mutex_unlock(&msc->buf_mutex); in msc_buffer_free_unless_used()
1387 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff) in msc_buffer_get_page() argument
1393 if (msc->mode == MSC_MODE_SINGLE) in msc_buffer_get_page()
1394 return msc_buffer_contig_get_page(msc, pgoff); in msc_buffer_get_page()
1396 list_for_each_entry(win, &msc->win_list, entry) in msc_buffer_get_page()
1453 struct msc *msc = dev_get_drvdata(&thdev->dev); in intel_th_msc_open() local
1459 iter = msc_iter_install(msc); in intel_th_msc_open()
1471 struct msc *msc = iter->msc; in intel_th_msc_release() local
1473 msc_iter_remove(iter, msc); in intel_th_msc_release()
1479 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) in msc_single_to_user() argument
1481 unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len; in msc_single_to_user()
1484 if (msc->single_wrap) { in msc_single_to_user()
1485 start += msc->single_sz; in msc_single_to_user()
1488 if (copy_to_user(buf, msc->base + start, tocopy)) in msc_single_to_user()
1498 tocopy = min(rem, msc->single_sz - start); in msc_single_to_user()
1499 if (copy_to_user(buf, msc->base + start, tocopy)) in msc_single_to_user()
1508 if (copy_to_user(buf, msc->base + start, rem)) in msc_single_to_user()
1518 struct msc *msc = iter->msc; in intel_th_msc_read() local
1523 if (!atomic_inc_unless_negative(&msc->user_count)) in intel_th_msc_read()
1526 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap) in intel_th_msc_read()
1527 size = msc->single_sz; in intel_th_msc_read()
1529 size = msc->nr_pages << PAGE_SHIFT; in intel_th_msc_read()
1540 if (msc->mode == MSC_MODE_SINGLE) { in intel_th_msc_read()
1541 ret = msc_single_to_user(msc, buf, off, len); in intel_th_msc_read()
1544 } else if (msc->mode == MSC_MODE_MULTI) { in intel_th_msc_read()
1558 atomic_dec(&msc->user_count); in intel_th_msc_read()
1570 struct msc *msc = iter->msc; in msc_mmap_open() local
1572 atomic_inc(&msc->mmap_count); in msc_mmap_open()
1578 struct msc *msc = iter->msc; in msc_mmap_close() local
1581 if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex)) in msc_mmap_close()
1585 for (pg = 0; pg < msc->nr_pages; pg++) { in msc_mmap_close()
1586 struct page *page = msc_buffer_get_page(msc, pg); in msc_mmap_close()
1596 atomic_dec(&msc->user_count); in msc_mmap_close()
1597 mutex_unlock(&msc->buf_mutex); in msc_mmap_close()
1603 struct msc *msc = iter->msc; in msc_mmap_fault() local
1605 vmf->page = msc_buffer_get_page(msc, vmf->pgoff); in msc_mmap_fault()
1626 struct msc *msc = iter->msc; in intel_th_msc_mmap() local
1636 if (!atomic_inc_unless_negative(&msc->user_count)) in intel_th_msc_mmap()
1639 if (msc->mode != MSC_MODE_SINGLE && in intel_th_msc_mmap()
1640 msc->mode != MSC_MODE_MULTI) in intel_th_msc_mmap()
1643 if (size >> PAGE_SHIFT != msc->nr_pages) in intel_th_msc_mmap()
1646 atomic_set(&msc->mmap_count, 1); in intel_th_msc_mmap()
1651 atomic_dec(&msc->user_count); in intel_th_msc_mmap()
1670 struct msc *msc = dev_get_drvdata(&thdev->dev); in intel_th_msc_wait_empty() local
1676 reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS); in intel_th_msc_wait_empty()
1681 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); in intel_th_msc_wait_empty()
1684 static int intel_th_msc_init(struct msc *msc) in intel_th_msc_init() argument
1686 atomic_set(&msc->user_count, -1); in intel_th_msc_init()
1688 msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI; in intel_th_msc_init()
1689 mutex_init(&msc->buf_mutex); in intel_th_msc_init()
1690 INIT_LIST_HEAD(&msc->win_list); in intel_th_msc_init()
1691 INIT_LIST_HEAD(&msc->iter_list); in intel_th_msc_init()
1693 msc->burst_len = in intel_th_msc_init()
1694 (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >> in intel_th_msc_init()
1700 static int msc_win_switch(struct msc *msc) in msc_win_switch() argument
1704 if (list_empty(&msc->win_list)) in msc_win_switch()
1707 first = list_first_entry(&msc->win_list, struct msc_window, entry); in msc_win_switch()
1709 if (msc_is_last_win(msc->cur_win)) in msc_win_switch()
1710 msc->cur_win = first; in msc_win_switch()
1712 msc->cur_win = list_next_entry(msc->cur_win, entry); in msc_win_switch()
1714 msc->base = msc_win_base(msc->cur_win); in msc_win_switch()
1715 msc->base_addr = msc_win_base_dma(msc->cur_win); in msc_win_switch()
1717 intel_th_trace_switch(msc->thdev); in msc_win_switch()
1729 struct msc *msc = dev_get_drvdata(dev); in intel_th_msc_window_unlock() local
1735 win = msc_find_window(msc, sgt, false); in intel_th_msc_window_unlock()
1740 if (msc->switch_on_unlock == win) { in intel_th_msc_window_unlock()
1741 msc->switch_on_unlock = NULL; in intel_th_msc_window_unlock()
1742 msc_win_switch(msc); in intel_th_msc_window_unlock()
1749 struct msc *msc = container_of(work, struct msc, work); in msc_work() local
1751 intel_th_msc_deactivate(msc->thdev); in msc_work()
1756 struct msc *msc = dev_get_drvdata(&thdev->dev); in intel_th_msc_interrupt() local
1757 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); in intel_th_msc_interrupt()
1758 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; in intel_th_msc_interrupt()
1761 if (!msc->do_irq || !msc->mbuf) in intel_th_msc_interrupt()
1767 return msc->enabled ? IRQ_HANDLED : IRQ_NONE; in intel_th_msc_interrupt()
1769 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); in intel_th_msc_interrupt()
1771 if (!msc->enabled) in intel_th_msc_interrupt()
1775 win = msc->cur_win; in intel_th_msc_interrupt()
1784 if (msc->stop_on_full) in intel_th_msc_interrupt()
1785 schedule_work(&msc->work); in intel_th_msc_interrupt()
1787 msc->switch_on_unlock = next_win; in intel_th_msc_interrupt()
1795 msc_win_switch(msc); in intel_th_msc_interrupt()
1797 if (msc->mbuf && msc->mbuf->ready) in intel_th_msc_interrupt()
1798 msc->mbuf->ready(msc->mbuf_priv, win->sgt, in intel_th_msc_interrupt()
1814 struct msc *msc = dev_get_drvdata(dev); in wrap_show() local
1816 return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap); in wrap_show()
1823 struct msc *msc = dev_get_drvdata(dev); in wrap_store() local
1831 msc->wrap = !!val; in wrap_store()
1838 static void msc_buffer_unassign(struct msc *msc) in msc_buffer_unassign() argument
1840 lockdep_assert_held(&msc->buf_mutex); in msc_buffer_unassign()
1842 if (!msc->mbuf) in msc_buffer_unassign()
1845 msc->mbuf->unassign(msc->mbuf_priv); in msc_buffer_unassign()
1846 msu_buffer_put(msc->mbuf); in msc_buffer_unassign()
1847 msc->mbuf_priv = NULL; in msc_buffer_unassign()
1848 msc->mbuf = NULL; in msc_buffer_unassign()
1854 struct msc *msc = dev_get_drvdata(dev); in mode_show() local
1855 const char *mode = msc_mode[msc->mode]; in mode_show()
1858 mutex_lock(&msc->buf_mutex); in mode_show()
1859 if (msc->mbuf) in mode_show()
1860 mode = msc->mbuf->name; in mode_show()
1862 mutex_unlock(&msc->buf_mutex); in mode_show()
1872 struct msc *msc = dev_get_drvdata(dev); in mode_store() local
1895 if (!msc->do_irq) { in mode_store()
1908 if (i == MSC_MODE_MULTI && msc->multi_is_broken) in mode_store()
1911 mutex_lock(&msc->buf_mutex); in mode_store()
1915 if (mbuf && mbuf == msc->mbuf) { in mode_store()
1921 ret = msc_buffer_unlocked_free_unless_used(msc); in mode_store()
1933 msc_buffer_unassign(msc); in mode_store()
1934 msc->mbuf_priv = mbuf_priv; in mode_store()
1935 msc->mbuf = mbuf; in mode_store()
1937 msc_buffer_unassign(msc); in mode_store()
1940 msc->mode = i; in mode_store()
1945 mutex_unlock(&msc->buf_mutex); in mode_store()
1955 struct msc *msc = dev_get_drvdata(dev); in nr_pages_show() local
1959 mutex_lock(&msc->buf_mutex); in nr_pages_show()
1961 if (msc->mode == MSC_MODE_SINGLE) in nr_pages_show()
1962 count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages); in nr_pages_show()
1963 else if (msc->mode == MSC_MODE_MULTI) { in nr_pages_show()
1964 list_for_each_entry(win, &msc->win_list, entry) { in nr_pages_show()
1973 mutex_unlock(&msc->buf_mutex); in nr_pages_show()
1982 struct msc *msc = dev_get_drvdata(dev); in nr_pages_store() local
1992 ret = msc_buffer_free_unless_used(msc); in nr_pages_store()
2015 if (nr_wins && msc->mode == MSC_MODE_SINGLE) { in nr_pages_store()
2038 mutex_lock(&msc->buf_mutex); in nr_pages_store()
2039 ret = msc_buffer_alloc(msc, win, nr_wins); in nr_pages_store()
2040 mutex_unlock(&msc->buf_mutex); in nr_pages_store()
2054 struct msc *msc = dev_get_drvdata(dev); in win_switch_store() local
2066 mutex_lock(&msc->buf_mutex); in win_switch_store()
2072 if (msc->mode == MSC_MODE_MULTI && !msc->mbuf) in win_switch_store()
2073 ret = msc_win_switch(msc); in win_switch_store()
2074 mutex_unlock(&msc->buf_mutex); in win_switch_store()
2084 struct msc *msc = dev_get_drvdata(dev); in stop_on_full_show() local
2086 return sprintf(buf, "%d\n", msc->stop_on_full); in stop_on_full_show()
2093 struct msc *msc = dev_get_drvdata(dev); in stop_on_full_store() local
2096 ret = kstrtobool(buf, &msc->stop_on_full); in stop_on_full_store()
2122 struct msc *msc; in intel_th_msc_probe() local
2134 msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL); in intel_th_msc_probe()
2135 if (!msc) in intel_th_msc_probe()
2140 msc->do_irq = 1; in intel_th_msc_probe()
2143 msc->multi_is_broken = 1; in intel_th_msc_probe()
2145 msc->index = thdev->id; in intel_th_msc_probe()
2147 msc->thdev = thdev; in intel_th_msc_probe()
2148 msc->reg_base = base + msc->index * 0x100; in intel_th_msc_probe()
2149 msc->msu_base = base; in intel_th_msc_probe()
2151 INIT_WORK(&msc->work, msc_work); in intel_th_msc_probe()
2152 err = intel_th_msc_init(msc); in intel_th_msc_probe()
2156 dev_set_drvdata(dev, msc); in intel_th_msc_probe()
2163 struct msc *msc = dev_get_drvdata(&thdev->dev); in intel_th_msc_remove() local
2173 ret = msc_buffer_free_unless_used(msc); in intel_th_msc_remove()