1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register cache access API - LZO caching support
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 
9 #include <linux/device.h>
10 #include <linux/lzo.h>
11 #include <linux/slab.h>
12 
13 #include "internal.h"
14 
15 static int regcache_lzo_exit(struct regmap *map);
16 
17 struct regcache_lzo_ctx {
18 	void *wmem;
19 	void *dst;
20 	const void *src;
21 	size_t src_len;
22 	size_t dst_len;
23 	size_t decompressed_size;
24 	unsigned long *sync_bmp;
25 	int sync_bmp_nbits;
26 };
27 
28 #define LZO_BLOCK_NUM 8
regcache_lzo_block_count(struct regmap * map)29 static int regcache_lzo_block_count(struct regmap *map)
30 {
31 	return LZO_BLOCK_NUM;
32 }
33 
regcache_lzo_prepare(struct regcache_lzo_ctx * lzo_ctx)34 static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
35 {
36 	lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
37 	if (!lzo_ctx->wmem)
38 		return -ENOMEM;
39 	return 0;
40 }
41 
regcache_lzo_compress(struct regcache_lzo_ctx * lzo_ctx)42 static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
43 {
44 	size_t compress_size;
45 	int ret;
46 
47 	ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
48 			       lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
49 	if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
50 		return -EINVAL;
51 	lzo_ctx->dst_len = compress_size;
52 	return 0;
53 }
54 
regcache_lzo_decompress(struct regcache_lzo_ctx * lzo_ctx)55 static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
56 {
57 	size_t dst_len;
58 	int ret;
59 
60 	dst_len = lzo_ctx->dst_len;
61 	ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
62 				    lzo_ctx->dst, &dst_len);
63 	if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
64 		return -EINVAL;
65 	return 0;
66 }
67 
regcache_lzo_compress_cache_block(struct regmap * map,struct regcache_lzo_ctx * lzo_ctx)68 static int regcache_lzo_compress_cache_block(struct regmap *map,
69 		struct regcache_lzo_ctx *lzo_ctx)
70 {
71 	int ret;
72 
73 	lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
74 	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
75 	if (!lzo_ctx->dst) {
76 		lzo_ctx->dst_len = 0;
77 		return -ENOMEM;
78 	}
79 
80 	ret = regcache_lzo_compress(lzo_ctx);
81 	if (ret < 0)
82 		return ret;
83 	return 0;
84 }
85 
regcache_lzo_decompress_cache_block(struct regmap * map,struct regcache_lzo_ctx * lzo_ctx)86 static int regcache_lzo_decompress_cache_block(struct regmap *map,
87 		struct regcache_lzo_ctx *lzo_ctx)
88 {
89 	int ret;
90 
91 	lzo_ctx->dst_len = lzo_ctx->decompressed_size;
92 	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
93 	if (!lzo_ctx->dst) {
94 		lzo_ctx->dst_len = 0;
95 		return -ENOMEM;
96 	}
97 
98 	ret = regcache_lzo_decompress(lzo_ctx);
99 	if (ret < 0)
100 		return ret;
101 	return 0;
102 }
103 
regcache_lzo_get_blkindex(struct regmap * map,unsigned int reg)104 static inline int regcache_lzo_get_blkindex(struct regmap *map,
105 					    unsigned int reg)
106 {
107 	return ((reg / map->reg_stride) * map->cache_word_size) /
108 		DIV_ROUND_UP(map->cache_size_raw,
109 			     regcache_lzo_block_count(map));
110 }
111 
regcache_lzo_get_blkpos(struct regmap * map,unsigned int reg)112 static inline int regcache_lzo_get_blkpos(struct regmap *map,
113 					  unsigned int reg)
114 {
115 	return (reg / map->reg_stride) %
116 		    (DIV_ROUND_UP(map->cache_size_raw,
117 				  regcache_lzo_block_count(map)) /
118 		     map->cache_word_size);
119 }
120 
regcache_lzo_get_blksize(struct regmap * map)121 static inline int regcache_lzo_get_blksize(struct regmap *map)
122 {
123 	return DIV_ROUND_UP(map->cache_size_raw,
124 			    regcache_lzo_block_count(map));
125 }
126 
regcache_lzo_init(struct regmap * map)127 static int regcache_lzo_init(struct regmap *map)
128 {
129 	struct regcache_lzo_ctx **lzo_blocks;
130 	size_t bmp_size;
131 	int ret, i, blksize, blkcount;
132 	const char *p, *end;
133 	unsigned long *sync_bmp;
134 
135 	ret = 0;
136 
137 	blkcount = regcache_lzo_block_count(map);
138 	map->cache = kcalloc(blkcount, sizeof(*lzo_blocks),
139 			     GFP_KERNEL);
140 	if (!map->cache)
141 		return -ENOMEM;
142 	lzo_blocks = map->cache;
143 
144 	/*
145 	 * allocate a bitmap to be used when syncing the cache with
146 	 * the hardware.  Each time a register is modified, the corresponding
147 	 * bit is set in the bitmap, so we know that we have to sync
148 	 * that register.
149 	 */
150 	bmp_size = map->num_reg_defaults_raw;
151 	sync_bmp = bitmap_zalloc(bmp_size, GFP_KERNEL);
152 	if (!sync_bmp) {
153 		ret = -ENOMEM;
154 		goto err;
155 	}
156 
157 	/* allocate the lzo blocks and initialize them */
158 	for (i = 0; i < blkcount; i++) {
159 		lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
160 					GFP_KERNEL);
161 		if (!lzo_blocks[i]) {
162 			bitmap_free(sync_bmp);
163 			ret = -ENOMEM;
164 			goto err;
165 		}
166 		lzo_blocks[i]->sync_bmp = sync_bmp;
167 		lzo_blocks[i]->sync_bmp_nbits = bmp_size;
168 		/* alloc the working space for the compressed block */
169 		ret = regcache_lzo_prepare(lzo_blocks[i]);
170 		if (ret < 0)
171 			goto err;
172 	}
173 
174 	blksize = regcache_lzo_get_blksize(map);
175 	p = map->reg_defaults_raw;
176 	end = map->reg_defaults_raw + map->cache_size_raw;
177 	/* compress the register map and fill the lzo blocks */
178 	for (i = 0; i < blkcount; i++, p += blksize) {
179 		lzo_blocks[i]->src = p;
180 		if (p + blksize > end)
181 			lzo_blocks[i]->src_len = end - p;
182 		else
183 			lzo_blocks[i]->src_len = blksize;
184 		ret = regcache_lzo_compress_cache_block(map,
185 						       lzo_blocks[i]);
186 		if (ret < 0)
187 			goto err;
188 		lzo_blocks[i]->decompressed_size =
189 			lzo_blocks[i]->src_len;
190 	}
191 
192 	return 0;
193 err:
194 	regcache_lzo_exit(map);
195 	return ret;
196 }
197 
regcache_lzo_exit(struct regmap * map)198 static int regcache_lzo_exit(struct regmap *map)
199 {
200 	struct regcache_lzo_ctx **lzo_blocks;
201 	int i, blkcount;
202 
203 	lzo_blocks = map->cache;
204 	if (!lzo_blocks)
205 		return 0;
206 
207 	blkcount = regcache_lzo_block_count(map);
208 	/*
209 	 * the pointer to the bitmap used for syncing the cache
210 	 * is shared amongst all lzo_blocks.  Ensure it is freed
211 	 * only once.
212 	 */
213 	if (lzo_blocks[0])
214 		bitmap_free(lzo_blocks[0]->sync_bmp);
215 	for (i = 0; i < blkcount; i++) {
216 		if (lzo_blocks[i]) {
217 			kfree(lzo_blocks[i]->wmem);
218 			kfree(lzo_blocks[i]->dst);
219 		}
220 		/* each lzo_block is a pointer returned by kmalloc or NULL */
221 		kfree(lzo_blocks[i]);
222 	}
223 	kfree(lzo_blocks);
224 	map->cache = NULL;
225 	return 0;
226 }
227 
regcache_lzo_read(struct regmap * map,unsigned int reg,unsigned int * value)228 static int regcache_lzo_read(struct regmap *map,
229 			     unsigned int reg, unsigned int *value)
230 {
231 	struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
232 	int ret, blkindex, blkpos;
233 	size_t tmp_dst_len;
234 	void *tmp_dst;
235 
236 	/* index of the compressed lzo block */
237 	blkindex = regcache_lzo_get_blkindex(map, reg);
238 	/* register index within the decompressed block */
239 	blkpos = regcache_lzo_get_blkpos(map, reg);
240 	lzo_blocks = map->cache;
241 	lzo_block = lzo_blocks[blkindex];
242 
243 	/* save the pointer and length of the compressed block */
244 	tmp_dst = lzo_block->dst;
245 	tmp_dst_len = lzo_block->dst_len;
246 
247 	/* prepare the source to be the compressed block */
248 	lzo_block->src = lzo_block->dst;
249 	lzo_block->src_len = lzo_block->dst_len;
250 
251 	/* decompress the block */
252 	ret = regcache_lzo_decompress_cache_block(map, lzo_block);
253 	if (ret >= 0)
254 		/* fetch the value from the cache */
255 		*value = regcache_get_val(map, lzo_block->dst, blkpos);
256 
257 	kfree(lzo_block->dst);
258 	/* restore the pointer and length of the compressed block */
259 	lzo_block->dst = tmp_dst;
260 	lzo_block->dst_len = tmp_dst_len;
261 
262 	return ret;
263 }
264 
regcache_lzo_write(struct regmap * map,unsigned int reg,unsigned int value)265 static int regcache_lzo_write(struct regmap *map,
266 			      unsigned int reg, unsigned int value)
267 {
268 	struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
269 	int ret, blkindex, blkpos;
270 	size_t tmp_dst_len;
271 	void *tmp_dst;
272 
273 	/* index of the compressed lzo block */
274 	blkindex = regcache_lzo_get_blkindex(map, reg);
275 	/* register index within the decompressed block */
276 	blkpos = regcache_lzo_get_blkpos(map, reg);
277 	lzo_blocks = map->cache;
278 	lzo_block = lzo_blocks[blkindex];
279 
280 	/* save the pointer and length of the compressed block */
281 	tmp_dst = lzo_block->dst;
282 	tmp_dst_len = lzo_block->dst_len;
283 
284 	/* prepare the source to be the compressed block */
285 	lzo_block->src = lzo_block->dst;
286 	lzo_block->src_len = lzo_block->dst_len;
287 
288 	/* decompress the block */
289 	ret = regcache_lzo_decompress_cache_block(map, lzo_block);
290 	if (ret < 0) {
291 		kfree(lzo_block->dst);
292 		goto out;
293 	}
294 
295 	/* write the new value to the cache */
296 	if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
297 		kfree(lzo_block->dst);
298 		goto out;
299 	}
300 
301 	/* prepare the source to be the decompressed block */
302 	lzo_block->src = lzo_block->dst;
303 	lzo_block->src_len = lzo_block->dst_len;
304 
305 	/* compress the block */
306 	ret = regcache_lzo_compress_cache_block(map, lzo_block);
307 	if (ret < 0) {
308 		kfree(lzo_block->dst);
309 		kfree(lzo_block->src);
310 		goto out;
311 	}
312 
313 	/* set the bit so we know we have to sync this register */
314 	set_bit(reg / map->reg_stride, lzo_block->sync_bmp);
315 	kfree(tmp_dst);
316 	kfree(lzo_block->src);
317 	return 0;
318 out:
319 	lzo_block->dst = tmp_dst;
320 	lzo_block->dst_len = tmp_dst_len;
321 	return ret;
322 }
323 
regcache_lzo_sync(struct regmap * map,unsigned int min,unsigned int max)324 static int regcache_lzo_sync(struct regmap *map, unsigned int min,
325 			     unsigned int max)
326 {
327 	struct regcache_lzo_ctx **lzo_blocks;
328 	unsigned int val;
329 	int i;
330 	int ret;
331 
332 	lzo_blocks = map->cache;
333 	i = min;
334 	for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp,
335 			      lzo_blocks[0]->sync_bmp_nbits) {
336 		if (i > max)
337 			continue;
338 
339 		ret = regcache_read(map, i, &val);
340 		if (ret)
341 			return ret;
342 
343 		/* Is this the hardware default?  If so skip. */
344 		ret = regcache_lookup_reg(map, i);
345 		if (ret > 0 && val == map->reg_defaults[ret].def)
346 			continue;
347 
348 		map->cache_bypass = true;
349 		ret = _regmap_write(map, i, val);
350 		map->cache_bypass = false;
351 		if (ret)
352 			return ret;
353 		dev_dbg(map->dev, "Synced register %#x, value %#x\n",
354 			i, val);
355 	}
356 
357 	return 0;
358 }
359 
360 struct regcache_ops regcache_lzo_ops = {
361 	.type = REGCACHE_COMPRESSED,
362 	.name = "lzo",
363 	.init = regcache_lzo_init,
364 	.exit = regcache_lzo_exit,
365 	.read = regcache_lzo_read,
366 	.write = regcache_lzo_write,
367 	.sync = regcache_lzo_sync
368 };
369