Lines Matching refs:drvdata
39 struct tmc_drvdata *drvdata; member
595 static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata, in tmc_etr_alloc_flat_buf() argument
600 struct device *real_dev = drvdata->csdev->dev.parent; in tmc_etr_alloc_flat_buf()
619 flat_buf->dev = &drvdata->csdev->dev; in tmc_etr_alloc_flat_buf()
693 static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata, in tmc_etr_alloc_sg_buf() argument
698 struct device *dev = &drvdata->csdev->dev; in tmc_etr_alloc_sg_buf()
776 tmc_etr_get_catu_device(struct tmc_drvdata *drvdata) in tmc_etr_get_catu_device() argument
779 struct coresight_device *tmp, *etr = drvdata->csdev; in tmc_etr_get_catu_device()
794 static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata, in tmc_etr_enable_catu() argument
797 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata); in tmc_etr_enable_catu()
804 static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata) in tmc_etr_disable_catu() argument
806 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata); in tmc_etr_disable_catu()
809 helper_ops(catu)->disable(catu, drvdata->etr_buf); in tmc_etr_disable_catu()
831 struct tmc_drvdata *drvdata, in tmc_etr_mode_alloc_buf() argument
842 rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf, in tmc_etr_mode_alloc_buf()
860 static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata, in tmc_alloc_etr_buf() argument
868 struct device *dev = &drvdata->csdev->dev; in tmc_alloc_etr_buf()
870 has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG); in tmc_alloc_etr_buf()
872 has_catu = !!tmc_etr_get_catu_device(drvdata); in tmc_alloc_etr_buf()
896 rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata, in tmc_alloc_etr_buf()
899 rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata, in tmc_alloc_etr_buf()
902 rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata, in tmc_alloc_etr_buf()
957 static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata) in tmc_sync_etr_buf() argument
959 struct etr_buf *etr_buf = drvdata->etr_buf; in tmc_sync_etr_buf()
963 rrp = tmc_read_rrp(drvdata); in tmc_sync_etr_buf()
964 rwp = tmc_read_rwp(drvdata); in tmc_sync_etr_buf()
965 status = readl_relaxed(drvdata->base + TMC_STS); in tmc_sync_etr_buf()
972 dev_dbg(&drvdata->csdev->dev, in tmc_sync_etr_buf()
986 static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata) in __tmc_etr_enable_hw() argument
989 struct etr_buf *etr_buf = drvdata->etr_buf; in __tmc_etr_enable_hw()
991 CS_UNLOCK(drvdata->base); in __tmc_etr_enable_hw()
994 tmc_wait_for_tmcready(drvdata); in __tmc_etr_enable_hw()
996 writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ); in __tmc_etr_enable_hw()
997 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); in __tmc_etr_enable_hw()
999 axictl = readl_relaxed(drvdata->base + TMC_AXICTL); in __tmc_etr_enable_hw()
1002 axictl |= TMC_AXICTL_WR_BURST(drvdata->max_burst_size); in __tmc_etr_enable_hw()
1005 if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) { in __tmc_etr_enable_hw()
1013 writel_relaxed(axictl, drvdata->base + TMC_AXICTL); in __tmc_etr_enable_hw()
1014 tmc_write_dba(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1020 if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) { in __tmc_etr_enable_hw()
1021 tmc_write_rrp(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1022 tmc_write_rwp(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1023 sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL; in __tmc_etr_enable_hw()
1024 writel_relaxed(sts, drvdata->base + TMC_STS); in __tmc_etr_enable_hw()
1030 drvdata->base + TMC_FFCR); in __tmc_etr_enable_hw()
1031 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); in __tmc_etr_enable_hw()
1032 tmc_enable_hw(drvdata); in __tmc_etr_enable_hw()
1034 CS_LOCK(drvdata->base); in __tmc_etr_enable_hw()
1037 static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata, in tmc_etr_enable_hw() argument
1047 WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG))) in tmc_etr_enable_hw()
1050 if (WARN_ON(drvdata->etr_buf)) in tmc_etr_enable_hw()
1057 rc = tmc_etr_enable_catu(drvdata, etr_buf); in tmc_etr_enable_hw()
1060 rc = coresight_claim_device(drvdata->csdev); in tmc_etr_enable_hw()
1062 drvdata->etr_buf = etr_buf; in tmc_etr_enable_hw()
1063 __tmc_etr_enable_hw(drvdata); in tmc_etr_enable_hw()
1079 ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata, in tmc_etr_get_sysfs_trace() argument
1084 struct etr_buf *etr_buf = drvdata->sysfs_buf; in tmc_etr_get_sysfs_trace()
1099 tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata) in tmc_etr_setup_sysfs_buf() argument
1101 return tmc_alloc_etr_buf(drvdata, drvdata->size, in tmc_etr_setup_sysfs_buf()
1112 static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata) in tmc_etr_sync_sysfs_buf() argument
1114 struct etr_buf *etr_buf = drvdata->etr_buf; in tmc_etr_sync_sysfs_buf()
1116 if (WARN_ON(drvdata->sysfs_buf != etr_buf)) { in tmc_etr_sync_sysfs_buf()
1117 tmc_etr_free_sysfs_buf(drvdata->sysfs_buf); in tmc_etr_sync_sysfs_buf()
1118 drvdata->sysfs_buf = NULL; in tmc_etr_sync_sysfs_buf()
1120 tmc_sync_etr_buf(drvdata); in tmc_etr_sync_sysfs_buf()
1131 static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata) in __tmc_etr_disable_hw() argument
1133 CS_UNLOCK(drvdata->base); in __tmc_etr_disable_hw()
1135 tmc_flush_and_stop(drvdata); in __tmc_etr_disable_hw()
1140 if (drvdata->mode == CS_MODE_SYSFS) in __tmc_etr_disable_hw()
1141 tmc_etr_sync_sysfs_buf(drvdata); in __tmc_etr_disable_hw()
1143 tmc_disable_hw(drvdata); in __tmc_etr_disable_hw()
1145 CS_LOCK(drvdata->base); in __tmc_etr_disable_hw()
1149 void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) in tmc_etr_disable_hw() argument
1151 __tmc_etr_disable_hw(drvdata); in tmc_etr_disable_hw()
1153 tmc_etr_disable_catu(drvdata); in tmc_etr_disable_hw()
1154 coresight_disclaim_device(drvdata->csdev); in tmc_etr_disable_hw()
1156 drvdata->etr_buf = NULL; in tmc_etr_disable_hw()
1163 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_enable_etr_sink_sysfs() local
1174 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1175 sysfs_buf = READ_ONCE(drvdata->sysfs_buf); in tmc_enable_etr_sink_sysfs()
1176 if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) { in tmc_enable_etr_sink_sysfs()
1177 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1180 free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata); in tmc_enable_etr_sink_sysfs()
1185 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1188 if (drvdata->reading || drvdata->mode == CS_MODE_PERF) { in tmc_enable_etr_sink_sysfs()
1198 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_enable_etr_sink_sysfs()
1207 sysfs_buf = READ_ONCE(drvdata->sysfs_buf); in tmc_enable_etr_sink_sysfs()
1210 drvdata->sysfs_buf = new_buf; in tmc_enable_etr_sink_sysfs()
1213 ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf); in tmc_enable_etr_sink_sysfs()
1215 drvdata->mode = CS_MODE_SYSFS; in tmc_enable_etr_sink_sysfs()
1219 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1239 alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event, in alloc_etr_buf() argument
1251 if ((nr_pages << PAGE_SHIFT) > drvdata->size) { in alloc_etr_buf()
1252 etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT), in alloc_etr_buf()
1262 size = drvdata->size; in alloc_etr_buf()
1264 etr_buf = tmc_alloc_etr_buf(drvdata, size, 0, node, NULL); in alloc_etr_buf()
1277 get_perf_etr_buf_cpu_wide(struct tmc_drvdata *drvdata, in get_perf_etr_buf_cpu_wide() argument
1304 mutex_lock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1305 etr_buf = idr_find(&drvdata->idr, pid); in get_perf_etr_buf_cpu_wide()
1308 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1313 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1315 etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot); in get_perf_etr_buf_cpu_wide()
1320 mutex_lock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1321 ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL); in get_perf_etr_buf_cpu_wide()
1322 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1341 get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata, in get_perf_etr_buf_per_thread() argument
1349 return alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot); in get_perf_etr_buf_per_thread()
1353 get_perf_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event, in get_perf_etr_buf() argument
1357 return get_perf_etr_buf_per_thread(drvdata, event, nr_pages, in get_perf_etr_buf()
1360 return get_perf_etr_buf_cpu_wide(drvdata, event, nr_pages, in get_perf_etr_buf()
1365 tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event, in tmc_etr_setup_perf_buf() argument
1378 etr_buf = get_perf_etr_buf(drvdata, event, nr_pages, pages, snapshot); in tmc_etr_setup_perf_buf()
1390 etr_perf->drvdata = drvdata; in tmc_etr_setup_perf_buf()
1402 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_alloc_etr_buffer() local
1404 etr_perf = tmc_etr_setup_perf_buf(drvdata, event, in tmc_alloc_etr_buffer()
1422 struct tmc_drvdata *drvdata = etr_perf->drvdata; in tmc_free_etr_buffer() local
1428 mutex_lock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1431 mutex_unlock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1436 buf = idr_remove(&drvdata->idr, etr_perf->pid); in tmc_free_etr_buffer()
1437 mutex_unlock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1518 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_update_etr_buffer() local
1522 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1526 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1530 if (WARN_ON(drvdata->perf_buf != etr_buf)) { in tmc_update_etr_buffer()
1532 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1536 CS_UNLOCK(drvdata->base); in tmc_update_etr_buffer()
1538 tmc_flush_and_stop(drvdata); in tmc_update_etr_buffer()
1539 tmc_sync_etr_buf(drvdata); in tmc_update_etr_buffer()
1541 CS_LOCK(drvdata->base); in tmc_update_etr_buffer()
1542 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1556 u32 mask = tmc_get_memwidth_mask(drvdata); in tmc_update_etr_buffer()
1607 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_enable_etr_sink_perf() local
1611 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etr_sink_perf()
1613 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_enable_etr_sink_perf()
1627 if (drvdata->pid != -1 && drvdata->pid != pid) { in tmc_enable_etr_sink_perf()
1636 if (drvdata->pid == pid) { in tmc_enable_etr_sink_perf()
1641 rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf); in tmc_enable_etr_sink_perf()
1644 drvdata->pid = pid; in tmc_enable_etr_sink_perf()
1645 drvdata->mode = CS_MODE_PERF; in tmc_enable_etr_sink_perf()
1646 drvdata->perf_buf = etr_perf->etr_buf; in tmc_enable_etr_sink_perf()
1651 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etr_sink_perf()
1672 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_disable_etr_sink() local
1674 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1676 if (drvdata->reading) { in tmc_disable_etr_sink()
1677 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1682 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1687 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED); in tmc_disable_etr_sink()
1688 tmc_etr_disable_hw(drvdata); in tmc_disable_etr_sink()
1690 drvdata->pid = -1; in tmc_disable_etr_sink()
1691 drvdata->mode = CS_MODE_DISABLED; in tmc_disable_etr_sink()
1693 drvdata->perf_buf = NULL; in tmc_disable_etr_sink()
1695 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1713 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) in tmc_read_prepare_etr() argument
1719 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) in tmc_read_prepare_etr()
1722 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_read_prepare_etr()
1723 if (drvdata->reading) { in tmc_read_prepare_etr()
1733 if (!drvdata->sysfs_buf) { in tmc_read_prepare_etr()
1739 if (drvdata->mode == CS_MODE_SYSFS) in tmc_read_prepare_etr()
1740 __tmc_etr_disable_hw(drvdata); in tmc_read_prepare_etr()
1742 drvdata->reading = true; in tmc_read_prepare_etr()
1744 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_read_prepare_etr()
1749 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) in tmc_read_unprepare_etr() argument
1755 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) in tmc_read_unprepare_etr()
1758 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_read_unprepare_etr()
1761 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_read_unprepare_etr()
1767 __tmc_etr_enable_hw(drvdata); in tmc_read_unprepare_etr()
1773 sysfs_buf = drvdata->sysfs_buf; in tmc_read_unprepare_etr()
1774 drvdata->sysfs_buf = NULL; in tmc_read_unprepare_etr()
1777 drvdata->reading = false; in tmc_read_unprepare_etr()
1778 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_read_unprepare_etr()