1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Earthsoft PT3 driver
4  *
5  * Copyright (C) 2014 Akihiro Tsukada <tskd08@gmail.com>
6  */
7 #include <linux/dma-mapping.h>
8 #include <linux/kernel.h>
9 #include <linux/pci.h>
10 
11 #include "pt3.h"
12 
13 #define PT3_ACCESS_UNIT (TS_PACKET_SZ * 128)
14 #define PT3_BUF_CANARY  (0x74)
15 
get_dma_base(int idx)16 static u32 get_dma_base(int idx)
17 {
18 	int i;
19 
20 	i = (idx == 1 || idx == 2) ? 3 - idx : idx;
21 	return REG_DMA_BASE + 0x18 * i;
22 }
23 
pt3_stop_dma(struct pt3_adapter * adap)24 int pt3_stop_dma(struct pt3_adapter *adap)
25 {
26 	struct pt3_board *pt3 = adap->dvb_adap.priv;
27 	u32 base;
28 	u32 stat;
29 	int retry;
30 
31 	base = get_dma_base(adap->adap_idx);
32 	stat = ioread32(pt3->regs[0] + base + OFST_STATUS);
33 	if (!(stat & 0x01))
34 		return 0;
35 
36 	iowrite32(0x02, pt3->regs[0] + base + OFST_DMA_CTL);
37 	for (retry = 0; retry < 5; retry++) {
38 		stat = ioread32(pt3->regs[0] + base + OFST_STATUS);
39 		if (!(stat & 0x01))
40 			return 0;
41 		msleep(50);
42 	}
43 	return -EIO;
44 }
45 
pt3_start_dma(struct pt3_adapter * adap)46 int pt3_start_dma(struct pt3_adapter *adap)
47 {
48 	struct pt3_board *pt3 = adap->dvb_adap.priv;
49 	u32 base = get_dma_base(adap->adap_idx);
50 
51 	iowrite32(0x02, pt3->regs[0] + base + OFST_DMA_CTL);
52 	iowrite32(lower_32_bits(adap->desc_buf[0].b_addr),
53 			pt3->regs[0] + base + OFST_DMA_DESC_L);
54 	iowrite32(upper_32_bits(adap->desc_buf[0].b_addr),
55 			pt3->regs[0] + base + OFST_DMA_DESC_H);
56 	iowrite32(0x01, pt3->regs[0] + base + OFST_DMA_CTL);
57 	return 0;
58 }
59 
60 
next_unit(struct pt3_adapter * adap,int * idx,int * ofs)61 static u8 *next_unit(struct pt3_adapter *adap, int *idx, int *ofs)
62 {
63 	*ofs += PT3_ACCESS_UNIT;
64 	if (*ofs >= DATA_BUF_SZ) {
65 		*ofs -= DATA_BUF_SZ;
66 		(*idx)++;
67 		if (*idx == adap->num_bufs)
68 			*idx = 0;
69 	}
70 	return &adap->buffer[*idx].data[*ofs];
71 }
72 
pt3_proc_dma(struct pt3_adapter * adap)73 int pt3_proc_dma(struct pt3_adapter *adap)
74 {
75 	int idx, ofs;
76 
77 	idx = adap->buf_idx;
78 	ofs = adap->buf_ofs;
79 
80 	if (adap->buffer[idx].data[ofs] == PT3_BUF_CANARY)
81 		return 0;
82 
83 	while (*next_unit(adap, &idx, &ofs) != PT3_BUF_CANARY) {
84 		u8 *p;
85 
86 		p = &adap->buffer[adap->buf_idx].data[adap->buf_ofs];
87 		if (adap->num_discard > 0)
88 			adap->num_discard--;
89 		else if (adap->buf_ofs + PT3_ACCESS_UNIT > DATA_BUF_SZ) {
90 			dvb_dmx_swfilter_packets(&adap->demux, p,
91 				(DATA_BUF_SZ - adap->buf_ofs) / TS_PACKET_SZ);
92 			dvb_dmx_swfilter_packets(&adap->demux,
93 				adap->buffer[idx].data, ofs / TS_PACKET_SZ);
94 		} else
95 			dvb_dmx_swfilter_packets(&adap->demux, p,
96 				PT3_ACCESS_UNIT / TS_PACKET_SZ);
97 
98 		*p = PT3_BUF_CANARY;
99 		adap->buf_idx = idx;
100 		adap->buf_ofs = ofs;
101 	}
102 	return 0;
103 }
104 
pt3_init_dmabuf(struct pt3_adapter * adap)105 void pt3_init_dmabuf(struct pt3_adapter *adap)
106 {
107 	int idx, ofs;
108 	u8 *p;
109 
110 	idx = 0;
111 	ofs = 0;
112 	p = adap->buffer[0].data;
113 	/* mark the whole buffers as "not written yet" */
114 	while (idx < adap->num_bufs) {
115 		p[ofs] = PT3_BUF_CANARY;
116 		ofs += PT3_ACCESS_UNIT;
117 		if (ofs >= DATA_BUF_SZ) {
118 			ofs -= DATA_BUF_SZ;
119 			idx++;
120 			p = adap->buffer[idx].data;
121 		}
122 	}
123 	adap->buf_idx = 0;
124 	adap->buf_ofs = 0;
125 }
126 
pt3_free_dmabuf(struct pt3_adapter * adap)127 void pt3_free_dmabuf(struct pt3_adapter *adap)
128 {
129 	struct pt3_board *pt3;
130 	int i;
131 
132 	pt3 = adap->dvb_adap.priv;
133 	for (i = 0; i < adap->num_bufs; i++)
134 		dma_free_coherent(&pt3->pdev->dev, DATA_BUF_SZ,
135 			adap->buffer[i].data, adap->buffer[i].b_addr);
136 	adap->num_bufs = 0;
137 
138 	for (i = 0; i < adap->num_desc_bufs; i++)
139 		dma_free_coherent(&pt3->pdev->dev, PAGE_SIZE,
140 			adap->desc_buf[i].descs, adap->desc_buf[i].b_addr);
141 	adap->num_desc_bufs = 0;
142 }
143 
144 
pt3_alloc_dmabuf(struct pt3_adapter * adap)145 int pt3_alloc_dmabuf(struct pt3_adapter *adap)
146 {
147 	struct pt3_board *pt3;
148 	void *p;
149 	int i, j;
150 	int idx, ofs;
151 	int num_desc_bufs;
152 	dma_addr_t data_addr, desc_addr;
153 	struct xfer_desc *d;
154 
155 	pt3 = adap->dvb_adap.priv;
156 	adap->num_bufs = 0;
157 	adap->num_desc_bufs = 0;
158 	for (i = 0; i < pt3->num_bufs; i++) {
159 		p = dma_alloc_coherent(&pt3->pdev->dev, DATA_BUF_SZ,
160 					&adap->buffer[i].b_addr, GFP_KERNEL);
161 		if (p == NULL)
162 			goto failed;
163 		adap->buffer[i].data = p;
164 		adap->num_bufs++;
165 	}
166 	pt3_init_dmabuf(adap);
167 
168 	/* build circular-linked pointers (xfer_desc) to the data buffers*/
169 	idx = 0;
170 	ofs = 0;
171 	num_desc_bufs =
172 		DIV_ROUND_UP(adap->num_bufs * DATA_BUF_XFERS, DESCS_IN_PAGE);
173 	for (i = 0; i < num_desc_bufs; i++) {
174 		p = dma_alloc_coherent(&pt3->pdev->dev, PAGE_SIZE,
175 					&desc_addr, GFP_KERNEL);
176 		if (p == NULL)
177 			goto failed;
178 		adap->num_desc_bufs++;
179 		adap->desc_buf[i].descs = p;
180 		adap->desc_buf[i].b_addr = desc_addr;
181 
182 		if (i > 0) {
183 			d = &adap->desc_buf[i - 1].descs[DESCS_IN_PAGE - 1];
184 			d->next_l = lower_32_bits(desc_addr);
185 			d->next_h = upper_32_bits(desc_addr);
186 		}
187 		for (j = 0; j < DESCS_IN_PAGE; j++) {
188 			data_addr = adap->buffer[idx].b_addr + ofs;
189 			d = &adap->desc_buf[i].descs[j];
190 			d->addr_l = lower_32_bits(data_addr);
191 			d->addr_h = upper_32_bits(data_addr);
192 			d->size = DATA_XFER_SZ;
193 
194 			desc_addr += sizeof(struct xfer_desc);
195 			d->next_l = lower_32_bits(desc_addr);
196 			d->next_h = upper_32_bits(desc_addr);
197 
198 			ofs += DATA_XFER_SZ;
199 			if (ofs >= DATA_BUF_SZ) {
200 				ofs -= DATA_BUF_SZ;
201 				idx++;
202 				if (idx >= adap->num_bufs) {
203 					desc_addr = adap->desc_buf[0].b_addr;
204 					d->next_l = lower_32_bits(desc_addr);
205 					d->next_h = upper_32_bits(desc_addr);
206 					return 0;
207 				}
208 			}
209 		}
210 	}
211 	return 0;
212 
213 failed:
214 	pt3_free_dmabuf(adap);
215 	return -ENOMEM;
216 }
217