1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "status.h"
5 #include "hmc.h"
6 #include "defs.h"
7 #include "type.h"
8 #include "protos.h"
9 #include "pble.h"
10 
11 static enum irdma_status_code
12 add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
13 
14 /**
15  * irdma_destroy_pble_prm - destroy prm during module unload
16  * @pble_rsrc: pble resources
17  */
irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc * pble_rsrc)18 void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
19 {
20 	struct irdma_chunk *chunk;
21 	struct irdma_pble_prm *pinfo = &pble_rsrc->pinfo;
22 
23 	while (!list_empty(&pinfo->clist)) {
24 		chunk = (struct irdma_chunk *) pinfo->clist.next;
25 		list_del(&chunk->list);
26 		if (chunk->type == PBLE_SD_PAGED)
27 			irdma_pble_free_paged_mem(chunk);
28 		bitmap_free(chunk->bitmapbuf);
29 		kfree(chunk->chunkmem.va);
30 	}
31 }
32 
33 /**
34  * irdma_hmc_init_pble - Initialize pble resources during module load
35  * @dev: irdma_sc_dev struct
36  * @pble_rsrc: pble resources
37  */
38 enum irdma_status_code
irdma_hmc_init_pble(struct irdma_sc_dev * dev,struct irdma_hmc_pble_rsrc * pble_rsrc)39 irdma_hmc_init_pble(struct irdma_sc_dev *dev,
40 		    struct irdma_hmc_pble_rsrc *pble_rsrc)
41 {
42 	struct irdma_hmc_info *hmc_info;
43 	u32 fpm_idx = 0;
44 	enum irdma_status_code status = 0;
45 
46 	hmc_info = dev->hmc_info;
47 	pble_rsrc->dev = dev;
48 	pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].base;
49 	/* Start pble' on 4k boundary */
50 	if (pble_rsrc->fpm_base_addr & 0xfff)
51 		fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
52 	pble_rsrc->unallocated_pble =
53 		hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt - fpm_idx;
54 	pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
55 	pble_rsrc->pinfo.pble_shift = PBLE_SHIFT;
56 
57 	mutex_init(&pble_rsrc->pble_mutex_lock);
58 
59 	spin_lock_init(&pble_rsrc->pinfo.prm_lock);
60 	INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
61 	if (add_pble_prm(pble_rsrc)) {
62 		irdma_destroy_pble_prm(pble_rsrc);
63 		status = IRDMA_ERR_NO_MEMORY;
64 	}
65 
66 	return status;
67 }
68 
69 /**
70  * get_sd_pd_idx -  Returns sd index, pd index and rel_pd_idx from fpm address
71  * @pble_rsrc: structure containing fpm address
72  * @idx: where to return indexes
73  */
get_sd_pd_idx(struct irdma_hmc_pble_rsrc * pble_rsrc,struct sd_pd_idx * idx)74 static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
75 			  struct sd_pd_idx *idx)
76 {
77 	idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
78 	idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
79 	idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
80 }
81 
82 /**
83  * add_sd_direct - add sd direct for pble
84  * @pble_rsrc: pble resource ptr
85  * @info: page info for sd
86  */
87 static enum irdma_status_code
add_sd_direct(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_add_page_info * info)88 add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
89 	      struct irdma_add_page_info *info)
90 {
91 	struct irdma_sc_dev *dev = pble_rsrc->dev;
92 	enum irdma_status_code ret_code = 0;
93 	struct sd_pd_idx *idx = &info->idx;
94 	struct irdma_chunk *chunk = info->chunk;
95 	struct irdma_hmc_info *hmc_info = info->hmc_info;
96 	struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
97 	u32 offset = 0;
98 
99 	if (!sd_entry->valid) {
100 		ret_code = irdma_add_sd_table_entry(dev->hw, hmc_info,
101 						    info->idx.sd_idx,
102 						    IRDMA_SD_TYPE_DIRECT,
103 						    IRDMA_HMC_DIRECT_BP_SIZE);
104 		if (ret_code)
105 			return ret_code;
106 
107 		chunk->type = PBLE_SD_CONTIGOUS;
108 	}
109 
110 	offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT;
111 	chunk->size = info->pages << HMC_PAGED_BP_SHIFT;
112 	chunk->vaddr = sd_entry->u.bp.addr.va + offset;
113 	chunk->fpm_addr = pble_rsrc->next_fpm_addr;
114 	ibdev_dbg(to_ibdev(dev),
115 		  "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n",
116 		  chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
117 
118 	return 0;
119 }
120 
121 /**
122  * fpm_to_idx - given fpm address, get pble index
123  * @pble_rsrc: pble resource management
124  * @addr: fpm address for index
125  */
fpm_to_idx(struct irdma_hmc_pble_rsrc * pble_rsrc,u64 addr)126 static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr)
127 {
128 	u64 idx;
129 
130 	idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3;
131 
132 	return (u32)idx;
133 }
134 
135 /**
136  * add_bp_pages - add backing pages for sd
137  * @pble_rsrc: pble resource management
138  * @info: page info for sd
139  */
140 static enum irdma_status_code
add_bp_pages(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_add_page_info * info)141 add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
142 	     struct irdma_add_page_info *info)
143 {
144 	struct irdma_sc_dev *dev = pble_rsrc->dev;
145 	u8 *addr;
146 	struct irdma_dma_mem mem;
147 	struct irdma_hmc_pd_entry *pd_entry;
148 	struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
149 	struct irdma_hmc_info *hmc_info = info->hmc_info;
150 	struct irdma_chunk *chunk = info->chunk;
151 	enum irdma_status_code status = 0;
152 	u32 rel_pd_idx = info->idx.rel_pd_idx;
153 	u32 pd_idx = info->idx.pd_idx;
154 	u32 i;
155 
156 	if (irdma_pble_get_paged_mem(chunk, info->pages))
157 		return IRDMA_ERR_NO_MEMORY;
158 
159 	status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
160 					  IRDMA_SD_TYPE_PAGED,
161 					  IRDMA_HMC_DIRECT_BP_SIZE);
162 	if (status)
163 		goto error;
164 
165 	addr = chunk->vaddr;
166 	for (i = 0; i < info->pages; i++) {
167 		mem.pa = (u64)chunk->dmainfo.dmaaddrs[i];
168 		mem.size = 4096;
169 		mem.va = addr;
170 		pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
171 		if (!pd_entry->valid) {
172 			status = irdma_add_pd_table_entry(dev, hmc_info,
173 							  pd_idx++, &mem);
174 			if (status)
175 				goto error;
176 
177 			addr += 4096;
178 		}
179 	}
180 
181 	chunk->fpm_addr = pble_rsrc->next_fpm_addr;
182 	return 0;
183 
184 error:
185 	irdma_pble_free_paged_mem(chunk);
186 
187 	return status;
188 }
189 
190 /**
191  * irdma_get_type - add a sd entry type for sd
192  * @dev: irdma_sc_dev struct
193  * @idx: index of sd
194  * @pages: pages in the sd
195  */
irdma_get_type(struct irdma_sc_dev * dev,struct sd_pd_idx * idx,u32 pages)196 static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
197 					       struct sd_pd_idx *idx, u32 pages)
198 {
199 	enum irdma_sd_entry_type sd_entry_type;
200 
201 	sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
202 			IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
203 	return sd_entry_type;
204 }
205 
206 /**
207  * add_pble_prm - add a sd entry for pble resoure
208  * @pble_rsrc: pble resource management
209  */
210 static enum irdma_status_code
add_pble_prm(struct irdma_hmc_pble_rsrc * pble_rsrc)211 add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
212 {
213 	struct irdma_sc_dev *dev = pble_rsrc->dev;
214 	struct irdma_hmc_sd_entry *sd_entry;
215 	struct irdma_hmc_info *hmc_info;
216 	struct irdma_chunk *chunk;
217 	struct irdma_add_page_info info;
218 	struct sd_pd_idx *idx = &info.idx;
219 	enum irdma_status_code ret_code = 0;
220 	enum irdma_sd_entry_type sd_entry_type;
221 	u64 sd_reg_val = 0;
222 	struct irdma_virt_mem chunkmem;
223 	u32 pages;
224 
225 	if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
226 		return IRDMA_ERR_NO_MEMORY;
227 
228 	if (pble_rsrc->next_fpm_addr & 0xfff)
229 		return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
230 
231 	chunkmem.size = sizeof(*chunk);
232 	chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
233 	if (!chunkmem.va)
234 		return IRDMA_ERR_NO_MEMORY;
235 
236 	chunk = chunkmem.va;
237 	chunk->chunkmem = chunkmem;
238 	hmc_info = dev->hmc_info;
239 	chunk->dev = dev;
240 	chunk->fpm_addr = pble_rsrc->next_fpm_addr;
241 	get_sd_pd_idx(pble_rsrc, idx);
242 	sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
243 	pages = (idx->rel_pd_idx) ? (IRDMA_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) :
244 				    IRDMA_HMC_PD_CNT_IN_SD;
245 	pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
246 	info.chunk = chunk;
247 	info.hmc_info = hmc_info;
248 	info.pages = pages;
249 	info.sd_entry = sd_entry;
250 	if (!sd_entry->valid)
251 		sd_entry_type = irdma_get_type(dev, idx, pages);
252 	else
253 		sd_entry_type = sd_entry->entry_type;
254 
255 	ibdev_dbg(to_ibdev(dev),
256 		  "PBLE: pages = %d, unallocated_pble[%d] current_fpm_addr = %llx\n",
257 		  pages, pble_rsrc->unallocated_pble,
258 		  pble_rsrc->next_fpm_addr);
259 	ibdev_dbg(to_ibdev(dev), "PBLE: sd_entry_type = %d\n", sd_entry_type);
260 	if (sd_entry_type == IRDMA_SD_TYPE_DIRECT)
261 		ret_code = add_sd_direct(pble_rsrc, &info);
262 
263 	if (ret_code)
264 		sd_entry_type = IRDMA_SD_TYPE_PAGED;
265 	else
266 		pble_rsrc->stats_direct_sds++;
267 
268 	if (sd_entry_type == IRDMA_SD_TYPE_PAGED) {
269 		ret_code = add_bp_pages(pble_rsrc, &info);
270 		if (ret_code)
271 			goto error;
272 		else
273 			pble_rsrc->stats_paged_sds++;
274 	}
275 
276 	ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk);
277 	if (ret_code)
278 		goto error;
279 
280 	pble_rsrc->next_fpm_addr += chunk->size;
281 	ibdev_dbg(to_ibdev(dev),
282 		  "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
283 		  pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
284 	pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
285 	sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
286 			     sd_entry->u.pd_table.pd_page_addr.pa :
287 			     sd_entry->u.bp.addr.pa;
288 
289 	if (!sd_entry->valid) {
290 		ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
291 					    idx->sd_idx, sd_entry->entry_type, true);
292 		if (ret_code)
293 			goto error;
294 	}
295 
296 	list_add(&chunk->list, &pble_rsrc->pinfo.clist);
297 	sd_entry->valid = true;
298 	return 0;
299 
300 error:
301 	bitmap_free(chunk->bitmapbuf);
302 	kfree(chunk->chunkmem.va);
303 
304 	return ret_code;
305 }
306 
307 /**
308  * free_lvl2 - fee level 2 pble
309  * @pble_rsrc: pble resource management
310  * @palloc: level 2 pble allocation
311  */
free_lvl2(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_pble_alloc * palloc)312 static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
313 		      struct irdma_pble_alloc *palloc)
314 {
315 	u32 i;
316 	struct irdma_pble_level2 *lvl2 = &palloc->level2;
317 	struct irdma_pble_info *root = &lvl2->root;
318 	struct irdma_pble_info *leaf = lvl2->leaf;
319 
320 	for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
321 		if (leaf->addr)
322 			irdma_prm_return_pbles(&pble_rsrc->pinfo,
323 					       &leaf->chunkinfo);
324 		else
325 			break;
326 	}
327 
328 	if (root->addr)
329 		irdma_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo);
330 
331 	kfree(lvl2->leafmem.va);
332 	lvl2->leaf = NULL;
333 }
334 
335 /**
336  * get_lvl2_pble - get level 2 pble resource
337  * @pble_rsrc: pble resource management
338  * @palloc: level 2 pble allocation
339  */
340 static enum irdma_status_code
get_lvl2_pble(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_pble_alloc * palloc)341 get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
342 	      struct irdma_pble_alloc *palloc)
343 {
344 	u32 lf4k, lflast, total, i;
345 	u32 pblcnt = PBLE_PER_PAGE;
346 	u64 *addr;
347 	struct irdma_pble_level2 *lvl2 = &palloc->level2;
348 	struct irdma_pble_info *root = &lvl2->root;
349 	struct irdma_pble_info *leaf;
350 	enum irdma_status_code ret_code;
351 	u64 fpm_addr;
352 
353 	/* number of full 512 (4K) leafs) */
354 	lf4k = palloc->total_cnt >> 9;
355 	lflast = palloc->total_cnt % PBLE_PER_PAGE;
356 	total = (lflast == 0) ? lf4k : lf4k + 1;
357 	lvl2->leaf_cnt = total;
358 
359 	lvl2->leafmem.size = (sizeof(*leaf) * total);
360 	lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
361 	if (!lvl2->leafmem.va)
362 		return IRDMA_ERR_NO_MEMORY;
363 
364 	lvl2->leaf = lvl2->leafmem.va;
365 	leaf = lvl2->leaf;
366 	ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo,
367 				       total << 3, &root->addr, &fpm_addr);
368 	if (ret_code) {
369 		kfree(lvl2->leafmem.va);
370 		lvl2->leaf = NULL;
371 		return IRDMA_ERR_NO_MEMORY;
372 	}
373 
374 	root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
375 	root->cnt = total;
376 	addr = root->addr;
377 	for (i = 0; i < total; i++, leaf++) {
378 		pblcnt = (lflast && ((i + 1) == total)) ?
379 				lflast : PBLE_PER_PAGE;
380 		ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo,
381 					       &leaf->chunkinfo, pblcnt << 3,
382 					       &leaf->addr, &fpm_addr);
383 		if (ret_code)
384 			goto error;
385 
386 		leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr);
387 
388 		leaf->cnt = pblcnt;
389 		*addr = (u64)leaf->idx;
390 		addr++;
391 	}
392 
393 	palloc->level = PBLE_LEVEL_2;
394 	pble_rsrc->stats_lvl2++;
395 	return 0;
396 
397 error:
398 	free_lvl2(pble_rsrc, palloc);
399 
400 	return IRDMA_ERR_NO_MEMORY;
401 }
402 
403 /**
404  * get_lvl1_pble - get level 1 pble resource
405  * @pble_rsrc: pble resource management
406  * @palloc: level 1 pble allocation
407  */
408 static enum irdma_status_code
get_lvl1_pble(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_pble_alloc * palloc)409 get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
410 	      struct irdma_pble_alloc *palloc)
411 {
412 	enum irdma_status_code ret_code;
413 	u64 fpm_addr;
414 	struct irdma_pble_info *lvl1 = &palloc->level1;
415 
416 	ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo,
417 				       palloc->total_cnt << 3, &lvl1->addr,
418 				       &fpm_addr);
419 	if (ret_code)
420 		return IRDMA_ERR_NO_MEMORY;
421 
422 	palloc->level = PBLE_LEVEL_1;
423 	lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
424 	lvl1->cnt = palloc->total_cnt;
425 	pble_rsrc->stats_lvl1++;
426 
427 	return 0;
428 }
429 
430 /**
431  * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
432  * @pble_rsrc: pble resources
433  * @palloc: contains all inforamtion regarding pble (idx + pble addr)
434  * @level1_only: flag for a level 1 PBLE
435  */
436 static enum irdma_status_code
get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_pble_alloc * palloc,bool level1_only)437 get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
438 		   struct irdma_pble_alloc *palloc, bool level1_only)
439 {
440 	enum irdma_status_code status = 0;
441 
442 	status = get_lvl1_pble(pble_rsrc, palloc);
443 	if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE)
444 		return status;
445 
446 	status = get_lvl2_pble(pble_rsrc, palloc);
447 
448 	return status;
449 }
450 
451 /**
452  * irdma_get_pble - allocate pbles from the prm
453  * @pble_rsrc: pble resources
454  * @palloc: contains all inforamtion regarding pble (idx + pble addr)
455  * @pble_cnt: #of pbles requested
456  * @level1_only: true if only pble level 1 to acquire
457  */
irdma_get_pble(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_pble_alloc * palloc,u32 pble_cnt,bool level1_only)458 enum irdma_status_code irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
459 				      struct irdma_pble_alloc *palloc,
460 				      u32 pble_cnt, bool level1_only)
461 {
462 	enum irdma_status_code status = 0;
463 	int max_sds = 0;
464 	int i;
465 
466 	palloc->total_cnt = pble_cnt;
467 	palloc->level = PBLE_LEVEL_0;
468 
469 	mutex_lock(&pble_rsrc->pble_mutex_lock);
470 
471 	/*check first to see if we can get pble's without acquiring
472 	 * additional sd's
473 	 */
474 	status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
475 	if (!status)
476 		goto exit;
477 
478 	max_sds = (palloc->total_cnt >> 18) + 1;
479 	for (i = 0; i < max_sds; i++) {
480 		status = add_pble_prm(pble_rsrc);
481 		if (status)
482 			break;
483 
484 		status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
485 		/* if level1_only, only go through it once */
486 		if (!status || level1_only)
487 			break;
488 	}
489 
490 exit:
491 	if (!status) {
492 		pble_rsrc->allocdpbles += pble_cnt;
493 		pble_rsrc->stats_alloc_ok++;
494 	} else {
495 		pble_rsrc->stats_alloc_fail++;
496 	}
497 	mutex_unlock(&pble_rsrc->pble_mutex_lock);
498 
499 	return status;
500 }
501 
502 /**
503  * irdma_free_pble - put pbles back into prm
504  * @pble_rsrc: pble resources
505  * @palloc: contains all information regarding pble resource being freed
506  */
irdma_free_pble(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_pble_alloc * palloc)507 void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
508 		     struct irdma_pble_alloc *palloc)
509 {
510 	pble_rsrc->freedpbles += palloc->total_cnt;
511 
512 	if (palloc->level == PBLE_LEVEL_2)
513 		free_lvl2(pble_rsrc, palloc);
514 	else
515 		irdma_prm_return_pbles(&pble_rsrc->pinfo,
516 				       &palloc->level1.chunkinfo);
517 	pble_rsrc->stats_alloc_freed++;
518 }
519