1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright(C) 2020 Linaro Limited. All rights reserved.
4 * Author: Mike Leach <mike.leach@linaro.org>
5 */
6
7 #include "coresight-etm4x.h"
8 #include "coresight-etm4x-cfg.h"
9 #include "coresight-priv.h"
10 #include "coresight-syscfg.h"
11
12 /* defines to associate register IDs with driver data locations */
13 #define CHECKREG(cval, elem) \
14 { \
15 if (offset == cval) { \
16 reg_csdev->driver_regval = &drvcfg->elem; \
17 err = 0; \
18 break; \
19 } \
20 }
21
22 #define CHECKREGIDX(cval, elem, off_idx, mask) \
23 { \
24 if (mask == cval) { \
25 reg_csdev->driver_regval = &drvcfg->elem[off_idx]; \
26 err = 0; \
27 break; \
28 } \
29 }
30
31 /**
32 * etm4_cfg_map_reg_offset - validate and map the register offset into a
33 * location in the driver config struct.
34 *
35 * Limits the number of registers that can be accessed and programmed in
36 * features, to those which are used to control the trace capture parameters.
37 *
38 * Omits or limits access to those which the driver must use exclusively.
39 *
40 * Invalid offsets will result in fail code return and feature load failure.
41 *
42 * @drvdata: driver data to map into.
43 * @reg: register to map.
44 * @offset: device offset for the register
45 */
etm4_cfg_map_reg_offset(struct etmv4_drvdata * drvdata,struct cscfg_regval_csdev * reg_csdev,u32 offset)46 static int etm4_cfg_map_reg_offset(struct etmv4_drvdata *drvdata,
47 struct cscfg_regval_csdev *reg_csdev, u32 offset)
48 {
49 int err = -EINVAL, idx;
50 struct etmv4_config *drvcfg = &drvdata->config;
51 u32 off_mask;
52
53 if (((offset >= TRCEVENTCTL0R) && (offset <= TRCVIPCSSCTLR)) ||
54 ((offset >= TRCSEQRSTEVR) && (offset <= TRCEXTINSELR)) ||
55 ((offset >= TRCCIDCCTLR0) && (offset <= TRCVMIDCCTLR1))) {
56 do {
57 CHECKREG(TRCEVENTCTL0R, eventctrl0);
58 CHECKREG(TRCEVENTCTL1R, eventctrl1);
59 CHECKREG(TRCSTALLCTLR, stall_ctrl);
60 CHECKREG(TRCTSCTLR, ts_ctrl);
61 CHECKREG(TRCSYNCPR, syncfreq);
62 CHECKREG(TRCCCCTLR, ccctlr);
63 CHECKREG(TRCBBCTLR, bb_ctrl);
64 CHECKREG(TRCVICTLR, vinst_ctrl);
65 CHECKREG(TRCVIIECTLR, viiectlr);
66 CHECKREG(TRCVISSCTLR, vissctlr);
67 CHECKREG(TRCVIPCSSCTLR, vipcssctlr);
68 CHECKREG(TRCSEQRSTEVR, seq_rst);
69 CHECKREG(TRCSEQSTR, seq_state);
70 CHECKREG(TRCEXTINSELR, ext_inp);
71 CHECKREG(TRCCIDCCTLR0, ctxid_mask0);
72 CHECKREG(TRCCIDCCTLR1, ctxid_mask1);
73 CHECKREG(TRCVMIDCCTLR0, vmid_mask0);
74 CHECKREG(TRCVMIDCCTLR1, vmid_mask1);
75 } while (0);
76 } else if ((offset & GENMASK(11, 4)) == TRCSEQEVRn(0)) {
77 /* sequencer state control registers */
78 idx = (offset & GENMASK(3, 0)) / 4;
79 if (idx < ETM_MAX_SEQ_STATES) {
80 reg_csdev->driver_regval = &drvcfg->seq_ctrl[idx];
81 err = 0;
82 }
83 } else if ((offset >= TRCSSCCRn(0)) && (offset <= TRCSSPCICRn(7))) {
84 /* 32 bit, 8 off indexed register sets */
85 idx = (offset & GENMASK(4, 0)) / 4;
86 off_mask = (offset & GENMASK(11, 5));
87 do {
88 CHECKREGIDX(TRCSSCCRn(0), ss_ctrl, idx, off_mask);
89 CHECKREGIDX(TRCSSCSRn(0), ss_status, idx, off_mask);
90 CHECKREGIDX(TRCSSPCICRn(0), ss_pe_cmp, idx, off_mask);
91 } while (0);
92 } else if ((offset >= TRCCIDCVRn(0)) && (offset <= TRCVMIDCVRn(7))) {
93 /* 64 bit, 8 off indexed register sets */
94 idx = (offset & GENMASK(5, 0)) / 8;
95 off_mask = (offset & GENMASK(11, 6));
96 do {
97 CHECKREGIDX(TRCCIDCVRn(0), ctxid_pid, idx, off_mask);
98 CHECKREGIDX(TRCVMIDCVRn(0), vmid_val, idx, off_mask);
99 } while (0);
100 } else if ((offset >= TRCRSCTLRn(2)) &&
101 (offset <= TRCRSCTLRn((ETM_MAX_RES_SEL - 1)))) {
102 /* 32 bit resource selection regs, 32 off, skip fixed 0,1 */
103 idx = (offset & GENMASK(6, 0)) / 4;
104 if (idx < ETM_MAX_RES_SEL) {
105 reg_csdev->driver_regval = &drvcfg->res_ctrl[idx];
106 err = 0;
107 }
108 } else if ((offset >= TRCACVRn(0)) &&
109 (offset <= TRCACATRn((ETM_MAX_SINGLE_ADDR_CMP - 1)))) {
110 /* 64 bit addr cmp regs, 16 off */
111 idx = (offset & GENMASK(6, 0)) / 8;
112 off_mask = offset & GENMASK(11, 7);
113 do {
114 CHECKREGIDX(TRCACVRn(0), addr_val, idx, off_mask);
115 CHECKREGIDX(TRCACATRn(0), addr_acc, idx, off_mask);
116 } while (0);
117 } else if ((offset >= TRCCNTRLDVRn(0)) &&
118 (offset <= TRCCNTVRn((ETMv4_MAX_CNTR - 1)))) {
119 /* 32 bit counter regs, 4 off (ETMv4_MAX_CNTR - 1) */
120 idx = (offset & GENMASK(3, 0)) / 4;
121 off_mask = offset & GENMASK(11, 4);
122 do {
123 CHECKREGIDX(TRCCNTRLDVRn(0), cntrldvr, idx, off_mask);
124 CHECKREGIDX(TRCCNTCTLRn(0), cntr_ctrl, idx, off_mask);
125 CHECKREGIDX(TRCCNTVRn(0), cntr_val, idx, off_mask);
126 } while (0);
127 }
128 return err;
129 }
130
131 /**
132 * etm4_cfg_load_feature - load a feature into a device instance.
133 *
134 * @csdev: An ETMv4 CoreSight device.
135 * @feat: The feature to be loaded.
136 *
137 * The function will load a feature instance into the device, checking that
138 * the register definitions are valid for the device.
139 *
140 * Parameter and register definitions will be converted into internal
141 * structures that are used to set the values in the driver when the
142 * feature is enabled for the device.
143 *
144 * The feature spinlock pointer is initialised to the same spinlock
145 * that the driver uses to protect the internal register values.
146 */
etm4_cfg_load_feature(struct coresight_device * csdev,struct cscfg_feature_csdev * feat_csdev)147 static int etm4_cfg_load_feature(struct coresight_device *csdev,
148 struct cscfg_feature_csdev *feat_csdev)
149 {
150 struct device *dev = csdev->dev.parent;
151 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
152 const struct cscfg_feature_desc *feat_desc = feat_csdev->feat_desc;
153 u32 offset;
154 int i = 0, err = 0;
155
156 /*
157 * essential we set the device spinlock - this is used in the generic
158 * programming routines when copying values into the drvdata structures
159 * via the pointers setup in etm4_cfg_map_reg_offset().
160 */
161 feat_csdev->drv_spinlock = &drvdata->spinlock;
162
163 /* process the register descriptions */
164 for (i = 0; i < feat_csdev->nr_regs && !err; i++) {
165 offset = feat_desc->regs_desc[i].offset;
166 err = etm4_cfg_map_reg_offset(drvdata, &feat_csdev->regs_csdev[i], offset);
167 }
168 return err;
169 }
170
171 /* match information when loading configurations */
172 #define CS_CFG_ETM4_MATCH_FLAGS (CS_CFG_MATCH_CLASS_SRC_ALL | \
173 CS_CFG_MATCH_CLASS_SRC_ETM4)
174
etm4_cscfg_register(struct coresight_device * csdev)175 int etm4_cscfg_register(struct coresight_device *csdev)
176 {
177 struct cscfg_csdev_feat_ops ops;
178
179 ops.load_feat = &etm4_cfg_load_feature;
180
181 return cscfg_register_csdev(csdev, CS_CFG_ETM4_MATCH_FLAGS, &ops);
182 }
183