1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7 
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 
11 #include "rvu_struct.h"
12 #include "rvu_reg.h"
13 #include "rvu.h"
14 
npa_aq_enqueue_wait(struct rvu * rvu,struct rvu_block * block,struct npa_aq_inst_s * inst)15 static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
16 			       struct npa_aq_inst_s *inst)
17 {
18 	struct admin_queue *aq = block->aq;
19 	struct npa_aq_res_s *result;
20 	int timeout = 1000;
21 	u64 reg, head;
22 
23 	result = (struct npa_aq_res_s *)aq->res->base;
24 
25 	/* Get current head pointer where to append this instruction */
26 	reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
27 	head = (reg >> 4) & AQ_PTR_MASK;
28 
29 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
30 	       (void *)inst, aq->inst->entry_sz);
31 	memset(result, 0, sizeof(*result));
32 	/* sync into memory */
33 	wmb();
34 
35 	/* Ring the doorbell and wait for result */
36 	rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
37 	while (result->compcode == NPA_AQ_COMP_NOTDONE) {
38 		cpu_relax();
39 		udelay(1);
40 		timeout--;
41 		if (!timeout)
42 			return -EBUSY;
43 	}
44 
45 	if (result->compcode != NPA_AQ_COMP_GOOD)
46 		/* TODO: Replace this with some error code */
47 		return -EBUSY;
48 
49 	return 0;
50 }
51 
rvu_npa_aq_enq_inst(struct rvu * rvu,struct npa_aq_enq_req * req,struct npa_aq_enq_rsp * rsp)52 int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
53 			struct npa_aq_enq_rsp *rsp)
54 {
55 	struct rvu_hwinfo *hw = rvu->hw;
56 	u16 pcifunc = req->hdr.pcifunc;
57 	int blkaddr, npalf, rc = 0;
58 	struct npa_aq_inst_s inst;
59 	struct rvu_block *block;
60 	struct admin_queue *aq;
61 	struct rvu_pfvf *pfvf;
62 	void *ctx, *mask;
63 	bool ena;
64 
65 	pfvf = rvu_get_pfvf(rvu, pcifunc);
66 	if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
67 		return NPA_AF_ERR_AQ_ENQUEUE;
68 
69 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
70 	if (!pfvf->npalf || blkaddr < 0)
71 		return NPA_AF_ERR_AF_LF_INVALID;
72 
73 	block = &hw->block[blkaddr];
74 	aq = block->aq;
75 	if (!aq) {
76 		dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
77 		return NPA_AF_ERR_AQ_ENQUEUE;
78 	}
79 
80 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
81 	if (npalf < 0)
82 		return NPA_AF_ERR_AF_LF_INVALID;
83 
84 	memset(&inst, 0, sizeof(struct npa_aq_inst_s));
85 	inst.cindex = req->aura_id;
86 	inst.lf = npalf;
87 	inst.ctype = req->ctype;
88 	inst.op = req->op;
89 	/* Currently we are not supporting enqueuing multiple instructions,
90 	 * so always choose first entry in result memory.
91 	 */
92 	inst.res_addr = (u64)aq->res->iova;
93 
94 	/* Hardware uses same aq->res->base for updating result of
95 	 * previous instruction hence wait here till it is done.
96 	 */
97 	spin_lock(&aq->lock);
98 
99 	/* Clean result + context memory */
100 	memset(aq->res->base, 0, aq->res->entry_sz);
101 	/* Context needs to be written at RES_ADDR + 128 */
102 	ctx = aq->res->base + 128;
103 	/* Mask needs to be written at RES_ADDR + 256 */
104 	mask = aq->res->base + 256;
105 
106 	switch (req->op) {
107 	case NPA_AQ_INSTOP_WRITE:
108 		/* Copy context and write mask */
109 		if (req->ctype == NPA_AQ_CTYPE_AURA) {
110 			memcpy(mask, &req->aura_mask,
111 			       sizeof(struct npa_aura_s));
112 			memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
113 		} else {
114 			memcpy(mask, &req->pool_mask,
115 			       sizeof(struct npa_pool_s));
116 			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
117 		}
118 		break;
119 	case NPA_AQ_INSTOP_INIT:
120 		if (req->ctype == NPA_AQ_CTYPE_AURA) {
121 			if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
122 				rc = NPA_AF_ERR_AQ_FULL;
123 				break;
124 			}
125 			/* Set pool's context address */
126 			req->aura.pool_addr = pfvf->pool_ctx->iova +
127 			(req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
128 			memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
129 		} else { /* POOL's context */
130 			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
131 		}
132 		break;
133 	case NPA_AQ_INSTOP_NOP:
134 	case NPA_AQ_INSTOP_READ:
135 	case NPA_AQ_INSTOP_LOCK:
136 	case NPA_AQ_INSTOP_UNLOCK:
137 		break;
138 	default:
139 		rc = NPA_AF_ERR_AQ_FULL;
140 		break;
141 	}
142 
143 	if (rc) {
144 		spin_unlock(&aq->lock);
145 		return rc;
146 	}
147 
148 	/* Submit the instruction to AQ */
149 	rc = npa_aq_enqueue_wait(rvu, block, &inst);
150 	if (rc) {
151 		spin_unlock(&aq->lock);
152 		return rc;
153 	}
154 
155 	/* Set aura bitmap if aura hw context is enabled */
156 	if (req->ctype == NPA_AQ_CTYPE_AURA) {
157 		if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
158 			__set_bit(req->aura_id, pfvf->aura_bmap);
159 		if (req->op == NPA_AQ_INSTOP_WRITE) {
160 			ena = (req->aura.ena & req->aura_mask.ena) |
161 				(test_bit(req->aura_id, pfvf->aura_bmap) &
162 				~req->aura_mask.ena);
163 			if (ena)
164 				__set_bit(req->aura_id, pfvf->aura_bmap);
165 			else
166 				__clear_bit(req->aura_id, pfvf->aura_bmap);
167 		}
168 	}
169 
170 	/* Set pool bitmap if pool hw context is enabled */
171 	if (req->ctype == NPA_AQ_CTYPE_POOL) {
172 		if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
173 			__set_bit(req->aura_id, pfvf->pool_bmap);
174 		if (req->op == NPA_AQ_INSTOP_WRITE) {
175 			ena = (req->pool.ena & req->pool_mask.ena) |
176 				(test_bit(req->aura_id, pfvf->pool_bmap) &
177 				~req->pool_mask.ena);
178 			if (ena)
179 				__set_bit(req->aura_id, pfvf->pool_bmap);
180 			else
181 				__clear_bit(req->aura_id, pfvf->pool_bmap);
182 		}
183 	}
184 	spin_unlock(&aq->lock);
185 
186 	if (rsp) {
187 		/* Copy read context into mailbox */
188 		if (req->op == NPA_AQ_INSTOP_READ) {
189 			if (req->ctype == NPA_AQ_CTYPE_AURA)
190 				memcpy(&rsp->aura, ctx,
191 				       sizeof(struct npa_aura_s));
192 			else
193 				memcpy(&rsp->pool, ctx,
194 				       sizeof(struct npa_pool_s));
195 		}
196 	}
197 
198 	return 0;
199 }
200 
npa_lf_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req)201 static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
202 {
203 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
204 	struct npa_aq_enq_req aq_req;
205 	unsigned long *bmap;
206 	int id, cnt = 0;
207 	int err = 0, rc;
208 
209 	if (!pfvf->pool_ctx || !pfvf->aura_ctx)
210 		return NPA_AF_ERR_AQ_ENQUEUE;
211 
212 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
213 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
214 
215 	if (req->ctype == NPA_AQ_CTYPE_POOL) {
216 		aq_req.pool.ena = 0;
217 		aq_req.pool_mask.ena = 1;
218 		cnt = pfvf->pool_ctx->qsize;
219 		bmap = pfvf->pool_bmap;
220 	} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
221 		aq_req.aura.ena = 0;
222 		aq_req.aura_mask.ena = 1;
223 		aq_req.aura.bp_ena = 0;
224 		aq_req.aura_mask.bp_ena = 1;
225 		cnt = pfvf->aura_ctx->qsize;
226 		bmap = pfvf->aura_bmap;
227 	}
228 
229 	aq_req.ctype = req->ctype;
230 	aq_req.op = NPA_AQ_INSTOP_WRITE;
231 
232 	for (id = 0; id < cnt; id++) {
233 		if (!test_bit(id, bmap))
234 			continue;
235 		aq_req.aura_id = id;
236 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
237 		if (rc) {
238 			err = rc;
239 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
240 				(req->ctype == NPA_AQ_CTYPE_AURA) ?
241 				"Aura" : "Pool", id);
242 		}
243 	}
244 
245 	return err;
246 }
247 
248 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
npa_lf_hwctx_lockdown(struct rvu * rvu,struct npa_aq_enq_req * req)249 static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
250 {
251 	struct npa_aq_enq_req lock_ctx_req;
252 	int err;
253 
254 	if (req->op != NPA_AQ_INSTOP_INIT)
255 		return 0;
256 
257 	memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
258 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
259 	lock_ctx_req.ctype = req->ctype;
260 	lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
261 	lock_ctx_req.aura_id = req->aura_id;
262 	err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
263 	if (err)
264 		dev_err(rvu->dev,
265 			"PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
266 			req->hdr.pcifunc,
267 			(req->ctype == NPA_AQ_CTYPE_AURA) ?
268 			"Aura" : "Pool", req->aura_id);
269 	return err;
270 }
271 
rvu_mbox_handler_npa_aq_enq(struct rvu * rvu,struct npa_aq_enq_req * req,struct npa_aq_enq_rsp * rsp)272 int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
273 				struct npa_aq_enq_req *req,
274 				struct npa_aq_enq_rsp *rsp)
275 {
276 	int err;
277 
278 	err = rvu_npa_aq_enq_inst(rvu, req, rsp);
279 	if (!err)
280 		err = npa_lf_hwctx_lockdown(rvu, req);
281 	return err;
282 }
283 #else
284 
rvu_mbox_handler_npa_aq_enq(struct rvu * rvu,struct npa_aq_enq_req * req,struct npa_aq_enq_rsp * rsp)285 int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
286 				struct npa_aq_enq_req *req,
287 				struct npa_aq_enq_rsp *rsp)
288 {
289 	return rvu_npa_aq_enq_inst(rvu, req, rsp);
290 }
291 #endif
292 
rvu_mbox_handler_npa_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req,struct msg_rsp * rsp)293 int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
294 				       struct hwctx_disable_req *req,
295 				       struct msg_rsp *rsp)
296 {
297 	return npa_lf_hwctx_disable(rvu, req);
298 }
299 
npa_ctx_free(struct rvu * rvu,struct rvu_pfvf * pfvf)300 static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
301 {
302 	kfree(pfvf->aura_bmap);
303 	pfvf->aura_bmap = NULL;
304 
305 	qmem_free(rvu->dev, pfvf->aura_ctx);
306 	pfvf->aura_ctx = NULL;
307 
308 	kfree(pfvf->pool_bmap);
309 	pfvf->pool_bmap = NULL;
310 
311 	qmem_free(rvu->dev, pfvf->pool_ctx);
312 	pfvf->pool_ctx = NULL;
313 
314 	qmem_free(rvu->dev, pfvf->npa_qints_ctx);
315 	pfvf->npa_qints_ctx = NULL;
316 }
317 
rvu_mbox_handler_npa_lf_alloc(struct rvu * rvu,struct npa_lf_alloc_req * req,struct npa_lf_alloc_rsp * rsp)318 int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
319 				  struct npa_lf_alloc_req *req,
320 				  struct npa_lf_alloc_rsp *rsp)
321 {
322 	int npalf, qints, hwctx_size, err, rc = 0;
323 	struct rvu_hwinfo *hw = rvu->hw;
324 	u16 pcifunc = req->hdr.pcifunc;
325 	struct rvu_block *block;
326 	struct rvu_pfvf *pfvf;
327 	u64 cfg, ctx_cfg;
328 	int blkaddr;
329 
330 	if (req->aura_sz > NPA_AURA_SZ_MAX ||
331 	    req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
332 		return NPA_AF_ERR_PARAM;
333 
334 	if (req->way_mask)
335 		req->way_mask &= 0xFFFF;
336 
337 	pfvf = rvu_get_pfvf(rvu, pcifunc);
338 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
339 	if (!pfvf->npalf || blkaddr < 0)
340 		return NPA_AF_ERR_AF_LF_INVALID;
341 
342 	block = &hw->block[blkaddr];
343 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
344 	if (npalf < 0)
345 		return NPA_AF_ERR_AF_LF_INVALID;
346 
347 	/* Reset this NPA LF */
348 	err = rvu_lf_reset(rvu, block, npalf);
349 	if (err) {
350 		dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
351 		return NPA_AF_ERR_LF_RESET;
352 	}
353 
354 	ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
355 
356 	/* Alloc memory for aura HW contexts */
357 	hwctx_size = 1UL << (ctx_cfg & 0xF);
358 	err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
359 			 NPA_AURA_COUNT(req->aura_sz), hwctx_size);
360 	if (err)
361 		goto free_mem;
362 
363 	pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
364 				  GFP_KERNEL);
365 	if (!pfvf->aura_bmap)
366 		goto free_mem;
367 
368 	/* Alloc memory for pool HW contexts */
369 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
370 	err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
371 	if (err)
372 		goto free_mem;
373 
374 	pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
375 				  GFP_KERNEL);
376 	if (!pfvf->pool_bmap)
377 		goto free_mem;
378 
379 	/* Get no of queue interrupts supported */
380 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
381 	qints = (cfg >> 28) & 0xFFF;
382 
383 	/* Alloc memory for Qints HW contexts */
384 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
385 	err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
386 	if (err)
387 		goto free_mem;
388 
389 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
390 	/* Clear way partition mask and set aura offset to '0' */
391 	cfg &= ~(BIT_ULL(34) - 1);
392 	/* Set aura size & enable caching of contexts */
393 	cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
394 
395 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
396 
397 	/* Configure aura HW context's base */
398 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
399 		    (u64)pfvf->aura_ctx->iova);
400 
401 	/* Enable caching of qints hw context */
402 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
403 		    BIT_ULL(36) | req->way_mask << 20);
404 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
405 		    (u64)pfvf->npa_qints_ctx->iova);
406 
407 	goto exit;
408 
409 free_mem:
410 	npa_ctx_free(rvu, pfvf);
411 	rc = -ENOMEM;
412 
413 exit:
414 	/* set stack page info */
415 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
416 	rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
417 	rsp->stack_pg_bytes = cfg & 0xFF;
418 	rsp->qints = (cfg >> 28) & 0xFFF;
419 	if (!is_rvu_otx2(rvu)) {
420 		cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
421 		rsp->cache_lines = (cfg >> 1) & 0x3F;
422 	}
423 	return rc;
424 }
425 
rvu_mbox_handler_npa_lf_free(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)426 int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
427 				 struct msg_rsp *rsp)
428 {
429 	struct rvu_hwinfo *hw = rvu->hw;
430 	u16 pcifunc = req->hdr.pcifunc;
431 	struct rvu_block *block;
432 	struct rvu_pfvf *pfvf;
433 	int npalf, err;
434 	int blkaddr;
435 
436 	pfvf = rvu_get_pfvf(rvu, pcifunc);
437 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
438 	if (!pfvf->npalf || blkaddr < 0)
439 		return NPA_AF_ERR_AF_LF_INVALID;
440 
441 	block = &hw->block[blkaddr];
442 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
443 	if (npalf < 0)
444 		return NPA_AF_ERR_AF_LF_INVALID;
445 
446 	/* Reset this NPA LF */
447 	err = rvu_lf_reset(rvu, block, npalf);
448 	if (err) {
449 		dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
450 		return NPA_AF_ERR_LF_RESET;
451 	}
452 
453 	npa_ctx_free(rvu, pfvf);
454 
455 	return 0;
456 }
457 
npa_aq_init(struct rvu * rvu,struct rvu_block * block)458 static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
459 {
460 	u64 cfg;
461 	int err;
462 
463 	/* Set admin queue endianness */
464 	cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
465 #ifdef __BIG_ENDIAN
466 	cfg |= BIT_ULL(1);
467 	rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
468 #else
469 	cfg &= ~BIT_ULL(1);
470 	rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
471 #endif
472 
473 	/* Do not bypass NDC cache */
474 	cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
475 	cfg &= ~0x03DULL;
476 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
477 	/* Disable caching of stack pages */
478 	cfg |= 0x10ULL;
479 #endif
480 	rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
481 
482 	/* For CN10K NPA BATCH DMA set 35 cache lines */
483 	if (!is_rvu_otx2(rvu)) {
484 		cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
485 		cfg &= ~0x7EULL;
486 		cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1);
487 		rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg);
488 	}
489 	/* Result structure can be followed by Aura/Pool context at
490 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
491 	 * operation type. Alloc sufficient result memory for all operations.
492 	 */
493 	err = rvu_aq_alloc(rvu, &block->aq,
494 			   Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
495 			   ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
496 	if (err)
497 		return err;
498 
499 	rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
500 	rvu_write64(rvu, block->addr,
501 		    NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
502 	return 0;
503 }
504 
rvu_npa_init(struct rvu * rvu)505 int rvu_npa_init(struct rvu *rvu)
506 {
507 	struct rvu_hwinfo *hw = rvu->hw;
508 	int blkaddr;
509 
510 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
511 	if (blkaddr < 0)
512 		return 0;
513 
514 	/* Initialize admin queue */
515 	return npa_aq_init(rvu, &hw->block[blkaddr]);
516 }
517 
rvu_npa_freemem(struct rvu * rvu)518 void rvu_npa_freemem(struct rvu *rvu)
519 {
520 	struct rvu_hwinfo *hw = rvu->hw;
521 	struct rvu_block *block;
522 	int blkaddr;
523 
524 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
525 	if (blkaddr < 0)
526 		return;
527 
528 	block = &hw->block[blkaddr];
529 	rvu_aq_free(rvu, block->aq);
530 }
531 
rvu_npa_lf_teardown(struct rvu * rvu,u16 pcifunc,int npalf)532 void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
533 {
534 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
535 	struct hwctx_disable_req ctx_req;
536 
537 	/* Disable all pools */
538 	ctx_req.hdr.pcifunc = pcifunc;
539 	ctx_req.ctype = NPA_AQ_CTYPE_POOL;
540 	npa_lf_hwctx_disable(rvu, &ctx_req);
541 
542 	/* Disable all auras */
543 	ctx_req.ctype = NPA_AQ_CTYPE_AURA;
544 	npa_lf_hwctx_disable(rvu, &ctx_req);
545 
546 	npa_ctx_free(rvu, pfvf);
547 }
548