1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell PTP driver
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 
13 #include "ptp.h"
14 #include "mbox.h"
15 #include "rvu.h"
16 
17 #define DRV_NAME				"Marvell PTP Driver"
18 
19 #define PCI_DEVID_OCTEONTX2_PTP			0xA00C
20 #define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP		0xB100
21 #define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP		0xB200
22 #define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP		0xB300
23 #define PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP	0xB400
24 #define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP		0xB500
25 #define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP	0xB600
26 #define PCI_DEVID_OCTEONTX2_RST			0xA085
27 #define PCI_DEVID_CN10K_PTP			0xA09E
28 
29 #define PCI_PTP_BAR_NO				0
30 
31 #define PTP_CLOCK_CFG				0xF00ULL
32 #define PTP_CLOCK_CFG_PTP_EN			BIT_ULL(0)
33 #define PTP_CLOCK_CFG_EXT_CLK_EN		BIT_ULL(1)
34 #define PTP_CLOCK_CFG_EXT_CLK_IN_MASK		GENMASK_ULL(7, 2)
35 #define PTP_CLOCK_CFG_TSTMP_EDGE		BIT_ULL(9)
36 #define PTP_CLOCK_CFG_TSTMP_EN			BIT_ULL(8)
37 #define PTP_CLOCK_CFG_TSTMP_IN_MASK		GENMASK_ULL(15, 10)
38 #define PTP_CLOCK_CFG_PPS_EN			BIT_ULL(30)
39 #define PTP_CLOCK_CFG_PPS_INV			BIT_ULL(31)
40 
41 #define PTP_PPS_HI_INCR				0xF60ULL
42 #define PTP_PPS_LO_INCR				0xF68ULL
43 #define PTP_PPS_THRESH_HI			0xF58ULL
44 
45 #define PTP_CLOCK_LO				0xF08ULL
46 #define PTP_CLOCK_HI				0xF10ULL
47 #define PTP_CLOCK_COMP				0xF18ULL
48 #define PTP_TIMESTAMP				0xF20ULL
49 
50 static struct ptp *first_ptp_block;
51 static const struct pci_device_id ptp_id_table[];
52 
ptp_get(void)53 struct ptp *ptp_get(void)
54 {
55 	struct ptp *ptp = first_ptp_block;
56 
57 	/* Check PTP block is present in hardware */
58 	if (!pci_dev_present(ptp_id_table))
59 		return ERR_PTR(-ENODEV);
60 	/* Check driver is bound to PTP block */
61 	if (!ptp)
62 		ptp = ERR_PTR(-EPROBE_DEFER);
63 
64 	return ptp;
65 }
66 
ptp_put(struct ptp * ptp)67 void ptp_put(struct ptp *ptp)
68 {
69 	if (!ptp)
70 		return;
71 
72 	pci_dev_put(ptp->pdev);
73 }
74 
ptp_adjfine(struct ptp * ptp,long scaled_ppm)75 static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
76 {
77 	bool neg_adj = false;
78 	u64 comp;
79 	u64 adj;
80 	s64 ppb;
81 
82 	if (scaled_ppm < 0) {
83 		neg_adj = true;
84 		scaled_ppm = -scaled_ppm;
85 	}
86 
87 	/* The hardware adds the clock compensation value to the PTP clock
88 	 * on every coprocessor clock cycle. Typical convention is that it
89 	 * represent number of nanosecond betwen each cycle. In this
90 	 * convention compensation value is in 64 bit fixed-point
91 	 * representation where upper 32 bits are number of nanoseconds
92 	 * and lower is fractions of nanosecond.
93 	 * The scaled_ppm represent the ratio in "parts per million" by which
94 	 * the compensation value should be corrected.
95 	 * To calculate new compenstation value we use 64bit fixed point
96 	 * arithmetic on following formula
97 	 * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
98 	 * where tbase is the basic compensation value calculated
99 	 * initialy in the probe function.
100 	 */
101 	comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
102 	/* convert scaled_ppm to ppb */
103 	ppb = 1 + scaled_ppm;
104 	ppb *= 125;
105 	ppb >>= 13;
106 	adj = comp * ppb;
107 	adj = div_u64(adj, 1000000000ull);
108 	comp = neg_adj ? comp - adj : comp + adj;
109 
110 	writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
111 
112 	return 0;
113 }
114 
ptp_get_clock(struct ptp * ptp,u64 * clk)115 static int ptp_get_clock(struct ptp *ptp, u64 *clk)
116 {
117 	/* Return the current PTP clock */
118 	*clk = readq(ptp->reg_base + PTP_CLOCK_HI);
119 
120 	return 0;
121 }
122 
ptp_start(struct ptp * ptp,u64 sclk,u32 ext_clk_freq,u32 extts)123 void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
124 {
125 	struct pci_dev *pdev;
126 	u64 clock_comp;
127 	u64 clock_cfg;
128 
129 	if (!ptp)
130 		return;
131 
132 	pdev = ptp->pdev;
133 
134 	if (!sclk) {
135 		dev_err(&pdev->dev, "PTP input clock cannot be zero\n");
136 		return;
137 	}
138 
139 	/* sclk is in MHz */
140 	ptp->clock_rate = sclk * 1000000;
141 
142 	/* Enable PTP clock */
143 	clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
144 
145 	if (ext_clk_freq) {
146 		ptp->clock_rate = ext_clk_freq;
147 		/* Set GPIO as PTP clock source */
148 		clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK;
149 		clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN;
150 	}
151 
152 	if (extts) {
153 		clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE;
154 		/* Set GPIO as timestamping source */
155 		clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK;
156 		clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN;
157 	}
158 
159 	clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
160 	clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
161 	writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
162 
163 	/* Set 50% duty cycle for 1Hz output */
164 	writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
165 	writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
166 
167 	clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
168 	/* Initial compensation value to start the nanosecs counter */
169 	writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
170 }
171 
ptp_get_tstmp(struct ptp * ptp,u64 * clk)172 static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
173 {
174 	*clk = readq(ptp->reg_base + PTP_TIMESTAMP);
175 
176 	return 0;
177 }
178 
ptp_set_thresh(struct ptp * ptp,u64 thresh)179 static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
180 {
181 	writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
182 
183 	return 0;
184 }
185 
ptp_probe(struct pci_dev * pdev,const struct pci_device_id * ent)186 static int ptp_probe(struct pci_dev *pdev,
187 		     const struct pci_device_id *ent)
188 {
189 	struct device *dev = &pdev->dev;
190 	struct ptp *ptp;
191 	int err;
192 
193 	ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
194 	if (!ptp) {
195 		err = -ENOMEM;
196 		goto error;
197 	}
198 
199 	ptp->pdev = pdev;
200 
201 	err = pcim_enable_device(pdev);
202 	if (err)
203 		goto error_free;
204 
205 	err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
206 	if (err)
207 		goto error_free;
208 
209 	ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
210 
211 	pci_set_drvdata(pdev, ptp);
212 	if (!first_ptp_block)
213 		first_ptp_block = ptp;
214 
215 	return 0;
216 
217 error_free:
218 	devm_kfree(dev, ptp);
219 
220 error:
221 	/* For `ptp_get()` we need to differentiate between the case
222 	 * when the core has not tried to probe this device and the case when
223 	 * the probe failed.  In the later case we pretend that the
224 	 * initialization was successful and keep the error in
225 	 * `dev->driver_data`.
226 	 */
227 	pci_set_drvdata(pdev, ERR_PTR(err));
228 	if (!first_ptp_block)
229 		first_ptp_block = ERR_PTR(err);
230 
231 	return 0;
232 }
233 
ptp_remove(struct pci_dev * pdev)234 static void ptp_remove(struct pci_dev *pdev)
235 {
236 	struct ptp *ptp = pci_get_drvdata(pdev);
237 	u64 clock_cfg;
238 
239 	if (IS_ERR_OR_NULL(ptp))
240 		return;
241 
242 	/* Disable PTP clock */
243 	clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
244 	clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
245 	writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
246 }
247 
248 static const struct pci_device_id ptp_id_table[] = {
249 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
250 			 PCI_VENDOR_ID_CAVIUM,
251 			 PCI_SUBSYS_DEVID_OCTX2_98xx_PTP) },
252 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
253 			 PCI_VENDOR_ID_CAVIUM,
254 			 PCI_SUBSYS_DEVID_OCTX2_96XX_PTP) },
255 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
256 			 PCI_VENDOR_ID_CAVIUM,
257 			 PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
258 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
259 			 PCI_VENDOR_ID_CAVIUM,
260 			 PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP) },
261 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
262 			 PCI_VENDOR_ID_CAVIUM,
263 			 PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
264 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
265 			 PCI_VENDOR_ID_CAVIUM,
266 			 PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP) },
267 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_PTP) },
268 	{ 0, }
269 };
270 
271 struct pci_driver ptp_driver = {
272 	.name = DRV_NAME,
273 	.id_table = ptp_id_table,
274 	.probe = ptp_probe,
275 	.remove = ptp_remove,
276 };
277 
rvu_mbox_handler_ptp_op(struct rvu * rvu,struct ptp_req * req,struct ptp_rsp * rsp)278 int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
279 			    struct ptp_rsp *rsp)
280 {
281 	int err = 0;
282 
283 	/* This function is the PTP mailbox handler invoked when
284 	 * called by AF consumers/netdev drivers via mailbox mechanism.
285 	 * It is used by netdev driver to get the PTP clock and to set
286 	 * frequency adjustments. Since mailbox can be called without
287 	 * notion of whether the driver is bound to ptp device below
288 	 * validation is needed as first step.
289 	 */
290 	if (!rvu->ptp)
291 		return -ENODEV;
292 
293 	switch (req->op) {
294 	case PTP_OP_ADJFINE:
295 		err = ptp_adjfine(rvu->ptp, req->scaled_ppm);
296 		break;
297 	case PTP_OP_GET_CLOCK:
298 		err = ptp_get_clock(rvu->ptp, &rsp->clk);
299 		break;
300 	case PTP_OP_GET_TSTMP:
301 		err = ptp_get_tstmp(rvu->ptp, &rsp->clk);
302 		break;
303 	case PTP_OP_SET_THRESH:
304 		err = ptp_set_thresh(rvu->ptp, req->thresh);
305 		break;
306 	default:
307 		err = -EINVAL;
308 		break;
309 	}
310 
311 	return err;
312 }
313