Lines Matching defs:msc

58  * @entry:	window list linkage (msc::win_list)
74 struct msc *msc;
80 * struct msc_iter - iterator for msc buffer
81 * @entry: msc::iter_list linkage
82 * @msc: pointer to the MSC device
94 struct msc *msc;
106 * struct msc - MSC device representation
129 struct msc {
306 return win->entry.next == &win->msc->win_list;
318 return list_first_entry(&win->msc->win_list, struct msc_window,
346 * @msc: MSC device
354 msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
359 if (list_empty(&msc->win_list))
367 list_for_each_entry(win, &msc->win_list, entry) {
384 * @msc: MSC device
387 * msc::user_count reference.
391 static struct msc_window *msc_oldest_window(struct msc *msc)
395 if (list_empty(&msc->win_list))
398 win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true);
402 return list_first_entry(&msc->win_list, struct msc_window, entry);
440 static struct msc_iter *msc_iter_install(struct msc *msc)
448 mutex_lock(&msc->buf_mutex);
451 * Reading and tracing are mutually exclusive; if msc is
453 * will prevent enabling the msc and the rest of fops don't
456 if (msc->enabled) {
462 iter->msc = msc;
464 list_add_tail(&iter->entry, &msc->iter_list);
466 mutex_unlock(&msc->buf_mutex);
471 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
473 mutex_lock(&msc->buf_mutex);
475 mutex_unlock(&msc->buf_mutex);
498 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
504 iter->start_win = msc_oldest_window(msc);
572 * Caller should have msc::user_count reference to make sure the buffer
581 struct msc *msc = iter->msc;
589 if (msc_iter_win_start(iter, msc))
652 * @msc: MSC device
654 static void msc_buffer_clear_hw_header(struct msc *msc)
659 list_for_each_entry(win, &msc->win_list, entry) {
670 static int intel_th_msu_init(struct msc *msc)
674 if (!msc->do_irq)
677 if (!msc->mbuf)
680 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
681 mintctl |= msc->index ? M1BLIE : M0BLIE;
682 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
683 if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
684 dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
685 msc->do_irq = 0;
689 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
690 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
695 static void intel_th_msu_deinit(struct msc *msc)
699 if (!msc->do_irq)
702 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
703 mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
704 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
715 if (!win->msc->mbuf)
729 atomic_inc(&win->msc->user_count);
731 atomic_dec(&win->msc->user_count);
744 dev_warn_ratelimited(msc_dev(win->msc),
753 * @msc: the MSC device to configure
756 * into a given MSC. Then, enable tracing and set msc::enabled.
757 * The latter is serialized on msc::buf_mutex, so make sure to hold it.
759 static int msc_configure(struct msc *msc)
763 lockdep_assert_held(&msc->buf_mutex);
765 if (msc->mode > MSC_MODE_MULTI)
768 if (msc->mode == MSC_MODE_MULTI) {
769 if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
772 msc_buffer_clear_hw_header(msc);
775 msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR);
776 msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE);
778 reg = msc->base_addr >> PAGE_SHIFT;
779 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
781 if (msc->mode == MSC_MODE_SINGLE) {
782 reg = msc->nr_pages;
783 iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE);
786 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
790 reg |= msc->mode << __ffs(MSC_MODE);
791 reg |= msc->burst_len << __ffs(MSC_LEN);
793 if (msc->wrap)
796 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
798 intel_th_msu_init(msc);
800 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
801 intel_th_trace_enable(msc->thdev);
802 msc->enabled = 1;
804 if (msc->mbuf && msc->mbuf->activate)
805 msc->mbuf->activate(msc->mbuf_priv);
812 * @msc: MSC device to disable
814 * If @msc is enabled, disable tracing on the switch and then disable MSC
815 * storage. Caller must hold msc::buf_mutex.
817 static void msc_disable(struct msc *msc)
819 struct msc_window *win = msc->cur_win;
822 lockdep_assert_held(&msc->buf_mutex);
824 if (msc->mode == MSC_MODE_MULTI)
827 if (msc->mbuf && msc->mbuf->deactivate)
828 msc->mbuf->deactivate(msc->mbuf_priv);
829 intel_th_msu_deinit(msc);
830 intel_th_trace_disable(msc->thdev);
832 if (msc->mode == MSC_MODE_SINGLE) {
833 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
834 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
836 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
837 msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1);
838 dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n",
839 reg, msc->single_sz, msc->single_wrap);
842 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
844 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
846 if (msc->mbuf && msc->mbuf->ready)
847 msc->mbuf->ready(msc->mbuf_priv, win->sgt,
850 msc->enabled = 0;
852 iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR);
853 iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE);
855 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
856 ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
858 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
859 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
861 reg = ioread32(msc->reg_base + REG_MSU_MSUSTS);
862 reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
863 iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS);
868 struct msc *msc = dev_get_drvdata(&thdev->dev);
871 if (!atomic_inc_unless_negative(&msc->user_count))
874 mutex_lock(&msc->buf_mutex);
877 if (list_empty(&msc->iter_list))
878 ret = msc_configure(msc);
880 mutex_unlock(&msc->buf_mutex);
883 atomic_dec(&msc->user_count);
890 struct msc *msc = dev_get_drvdata(&thdev->dev);
892 mutex_lock(&msc->buf_mutex);
893 if (msc->enabled) {
894 msc_disable(msc);
895 atomic_dec(&msc->user_count);
897 mutex_unlock(&msc->buf_mutex);
902 * @msc: MSC device
905 * This modifies msc::base, which requires msc::buf_mutex to serialize, so the
910 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
920 ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
930 sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
932 ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
937 msc->nr_pages = nr_pages;
938 msc->base = page_address(page);
939 msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
947 sg_free_table(&msc->single_sgt);
955 * @msc: MSC configured in SINGLE mode
957 static void msc_buffer_contig_free(struct msc *msc)
961 dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
963 sg_free_table(&msc->single_sgt);
965 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
966 struct page *page = virt_to_page(msc->base + off);
972 msc->nr_pages = 0;
977 * @msc: MSC configured in SINGLE mode
982 static struct page *msc_buffer_contig_get_page(struct msc *msc,
985 if (pgoff >= msc->nr_pages)
988 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
1003 block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
1016 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
1025 static void msc_buffer_set_uc(struct msc *msc)
1031 if (msc->mode == MSC_MODE_SINGLE) {
1032 set_memory_uc((unsigned long)msc->base, msc->nr_pages);
1036 list_for_each_entry(win, &msc->win_list, entry) {
1045 static void msc_buffer_set_wb(struct msc *msc)
1051 if (msc->mode == MSC_MODE_SINGLE) {
1052 set_memory_wb((unsigned long)msc->base, msc->nr_pages);
1056 list_for_each_entry(win, &msc->win_list, entry) {
1066 msc_buffer_set_uc(struct msc *msc) {}
1067 static inline void msc_buffer_set_wb(struct msc *msc) {}
1082 * @msc: MSC device
1085 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1090 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
1102 win->msc = msc;
1107 if (!list_empty(&msc->win_list)) {
1108 struct msc_window *prev = list_last_entry(&msc->win_list,
1115 if (msc->mbuf && msc->mbuf->alloc_window)
1116 ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt,
1127 if (list_empty(&msc->win_list)) {
1128 msc->base = msc_win_base(win);
1129 msc->base_addr = msc_win_base_dma(win);
1130 msc->cur_win = win;
1133 list_add_tail(&win->entry, &msc->win_list);
1134 msc->nr_pages += nr_blocks;
1144 static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
1153 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
1161 * @msc: MSC device
1164 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1167 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
1169 msc->nr_pages -= win->nr_blocks;
1172 if (list_empty(&msc->win_list)) {
1173 msc->base = NULL;
1174 msc->base_addr = 0;
1177 if (msc->mbuf && msc->mbuf->free_window)
1178 msc->mbuf->free_window(msc->mbuf_priv, win->sgt);
1180 __msc_buffer_win_free(msc, win);
1187 * @msc: MSC device
1189 * This traverses msc::win_list, which requires msc::buf_mutex to serialize,
1192 static void msc_buffer_relink(struct msc *msc)
1196 /* call with msc::mutex locked */
1197 list_for_each_entry(win, &msc->win_list, entry) {
1208 next_win = list_first_entry(&msc->win_list,
1246 static void msc_buffer_multi_free(struct msc *msc)
1250 list_for_each_entry_safe(win, iter, &msc->win_list, entry)
1251 msc_buffer_win_free(msc, win);
1254 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages,
1260 ret = msc_buffer_win_alloc(msc, nr_pages[i]);
1262 msc_buffer_multi_free(msc);
1267 msc_buffer_relink(msc);
1274 * @msc: MSC device
1278 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to
1281 static void msc_buffer_free(struct msc *msc)
1283 msc_buffer_set_wb(msc);
1285 if (msc->mode == MSC_MODE_SINGLE)
1286 msc_buffer_contig_free(msc);
1287 else if (msc->mode == MSC_MODE_MULTI)
1288 msc_buffer_multi_free(msc);
1293 * @msc: MSC device
1296 * Allocate a storage buffer for MSC, depending on the msc::mode, it will be
1302 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1307 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
1313 if (atomic_read(&msc->user_count) != -1)
1316 if (msc->mode == MSC_MODE_SINGLE) {
1320 ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT);
1321 } else if (msc->mode == MSC_MODE_MULTI) {
1322 ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
1328 msc_buffer_set_uc(msc);
1333 if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1))
1342 * @msc: MSC device
1346 * Caller needs to hold msc::buf_mutex.
1351 static int msc_buffer_unlocked_free_unless_used(struct msc *msc)
1355 count = atomic_cmpxchg(&msc->user_count, 0, -1);
1362 msc_buffer_free(msc);
1370 * @msc: MSC device
1374 static int msc_buffer_free_unless_used(struct msc *msc)
1378 mutex_lock(&msc->buf_mutex);
1379 ret = msc_buffer_unlocked_free_unless_used(msc);
1380 mutex_unlock(&msc->buf_mutex);
1387 * @msc: MSC device
1390 * This traverses msc::win_list, so holding msc::buf_mutex is expected from
1395 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
1401 if (msc->mode == MSC_MODE_SINGLE)
1402 return msc_buffer_contig_get_page(msc, pgoff);
1404 list_for_each_entry(win, &msc->win_list, entry)
1461 struct msc *msc = dev_get_drvdata(&thdev->dev);
1467 iter = msc_iter_install(msc);
1479 struct msc *msc = iter->msc;
1481 msc_iter_remove(iter, msc);
1487 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len)
1489 unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len;
1492 if (msc->single_wrap) {
1493 start += msc->single_sz;
1496 if (copy_to_user(buf, msc->base + start, tocopy))
1506 tocopy = min(rem, msc->single_sz - start);
1507 if (copy_to_user(buf, msc->base + start, tocopy))
1516 if (copy_to_user(buf, msc->base + start, rem))
1526 struct msc *msc = iter->msc;
1531 if (!atomic_inc_unless_negative(&msc->user_count))
1534 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
1535 size = msc->single_sz;
1537 size = msc->nr_pages << PAGE_SHIFT;
1548 if (msc->mode == MSC_MODE_SINGLE) {
1549 ret = msc_single_to_user(msc, buf, off, len);
1552 } else if (msc->mode == MSC_MODE_MULTI) {
1566 atomic_dec(&msc->user_count);
1578 struct msc *msc = iter->msc;
1580 atomic_inc(&msc->mmap_count);
1586 struct msc *msc = iter->msc;
1589 if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
1593 for (pg = 0; pg < msc->nr_pages; pg++) {
1594 struct page *page = msc_buffer_get_page(msc, pg);
1604 atomic_dec(&msc->user_count);
1605 mutex_unlock(&msc->buf_mutex);
1611 struct msc *msc = iter->msc;
1613 vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
1634 struct msc *msc = iter->msc;
1644 if (!atomic_inc_unless_negative(&msc->user_count))
1647 if (msc->mode != MSC_MODE_SINGLE &&
1648 msc->mode != MSC_MODE_MULTI)
1651 if (size >> PAGE_SHIFT != msc->nr_pages)
1654 atomic_set(&msc->mmap_count, 1);
1659 atomic_dec(&msc->user_count);
1678 struct msc *msc = dev_get_drvdata(&thdev->dev);
1684 reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
1689 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
1692 static int intel_th_msc_init(struct msc *msc)
1694 atomic_set(&msc->user_count, -1);
1696 msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI;
1697 mutex_init(&msc->buf_mutex);
1698 INIT_LIST_HEAD(&msc->win_list);
1699 INIT_LIST_HEAD(&msc->iter_list);
1701 msc->burst_len =
1702 (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >>
1708 static int msc_win_switch(struct msc *msc)
1712 if (list_empty(&msc->win_list))
1715 first = list_first_entry(&msc->win_list, struct msc_window, entry);
1717 if (msc_is_last_win(msc->cur_win))
1718 msc->cur_win = first;
1720 msc->cur_win = list_next_entry(msc->cur_win, entry);
1722 msc->base = msc_win_base(msc->cur_win);
1723 msc->base_addr = msc_win_base_dma(msc->cur_win);
1725 intel_th_trace_switch(msc->thdev);
1737 struct msc *msc = dev_get_drvdata(dev);
1743 win = msc_find_window(msc, sgt, false);
1748 if (msc->switch_on_unlock == win) {
1749 msc->switch_on_unlock = NULL;
1750 msc_win_switch(msc);
1757 struct msc *msc = container_of(work, struct msc, work);
1759 intel_th_msc_deactivate(msc->thdev);
1764 struct msc *msc = dev_get_drvdata(&thdev->dev);
1765 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
1766 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
1769 if (!msc->do_irq || !msc->mbuf)
1775 return msc->enabled ? IRQ_HANDLED : IRQ_NONE;
1777 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
1779 if (!msc->enabled)
1783 win = msc->cur_win;
1792 if (msc->stop_on_full)
1793 schedule_work(&msc->work);
1795 msc->switch_on_unlock = next_win;
1803 msc_win_switch(msc);
1805 if (msc->mbuf && msc->mbuf->ready)
1806 msc->mbuf->ready(msc->mbuf_priv, win->sgt,
1822 struct msc *msc = dev_get_drvdata(dev);
1824 return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap);
1831 struct msc *msc = dev_get_drvdata(dev);
1839 msc->wrap = !!val;
1846 static void msc_buffer_unassign(struct msc *msc)
1848 lockdep_assert_held(&msc->buf_mutex);
1850 if (!msc->mbuf)
1853 msc->mbuf->unassign(msc->mbuf_priv);
1854 msu_buffer_put(msc->mbuf);
1855 msc->mbuf_priv = NULL;
1856 msc->mbuf = NULL;
1862 struct msc *msc = dev_get_drvdata(dev);
1863 const char *mode = msc_mode[msc->mode];
1866 mutex_lock(&msc->buf_mutex);
1867 if (msc->mbuf)
1868 mode = msc->mbuf->name;
1870 mutex_unlock(&msc->buf_mutex);
1880 struct msc *msc = dev_get_drvdata(dev);
1903 if (!msc->do_irq) {
1916 if (i == MSC_MODE_MULTI && msc->multi_is_broken)
1919 mutex_lock(&msc->buf_mutex);
1923 if (mbuf && mbuf == msc->mbuf) {
1929 ret = msc_buffer_unlocked_free_unless_used(msc);
1941 msc_buffer_unassign(msc);
1942 msc->mbuf_priv = mbuf_priv;
1943 msc->mbuf = mbuf;
1945 msc_buffer_unassign(msc);
1948 msc->mode = i;
1953 mutex_unlock(&msc->buf_mutex);
1963 struct msc *msc = dev_get_drvdata(dev);
1967 mutex_lock(&msc->buf_mutex);
1969 if (msc->mode == MSC_MODE_SINGLE)
1970 count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages);
1971 else if (msc->mode == MSC_MODE_MULTI) {
1972 list_for_each_entry(win, &msc->win_list, entry) {
1981 mutex_unlock(&msc->buf_mutex);
1990 struct msc *msc = dev_get_drvdata(dev);
2000 ret = msc_buffer_free_unless_used(msc);
2023 if (nr_wins && msc->mode == MSC_MODE_SINGLE) {
2046 mutex_lock(&msc->buf_mutex);
2047 ret = msc_buffer_alloc(msc, win, nr_wins);
2048 mutex_unlock(&msc->buf_mutex);
2062 struct msc *msc = dev_get_drvdata(dev);
2074 mutex_lock(&msc->buf_mutex);
2080 if (msc->mode == MSC_MODE_MULTI && !msc->mbuf)
2081 ret = msc_win_switch(msc);
2082 mutex_unlock(&msc->buf_mutex);
2092 struct msc *msc = dev_get_drvdata(dev);
2094 return sprintf(buf, "%d\n", msc->stop_on_full);
2101 struct msc *msc = dev_get_drvdata(dev);
2104 ret = kstrtobool(buf, &msc->stop_on_full);
2130 struct msc *msc;
2142 msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL);
2143 if (!msc)
2148 msc->do_irq = 1;
2151 msc->multi_is_broken = 1;
2153 msc->index = thdev->id;
2155 msc->thdev = thdev;
2156 msc->reg_base = base + msc->index * 0x100;
2157 msc->msu_base = base;
2159 INIT_WORK(&msc->work, msc_work);
2160 err = intel_th_msc_init(msc);
2164 dev_set_drvdata(dev, msc);
2171 struct msc *msc = dev_get_drvdata(&thdev->dev);
2181 ret = msc_buffer_free_unless_used(msc);
2195 .name = "msc",