1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2020-2021 NXP
4  *
5  * CAAM DMA data object utilities.
6  */
7 
8 #include <caam_trace.h>
9 #include <caam_utils_dmaobj.h>
10 #include <caam_utils_mem.h>
11 #include <caam_utils_sgt.h>
12 #include <caam_utils_status.h>
13 #include <kernel/cache_helpers.h>
14 #include <kernel/spinlock.h>
15 #include <mm/core_memprot.h>
16 #include <string.h>
17 #include <tee/cache.h>
18 
19 #define IS_DMA_OVERFLOW(addr) ((addr) > UINT32_MAX)
20 #define MAX_BUFFER_ALLOC_SIZE ((size_t)(8 * 1024))
21 
22 /*
23  * Local defines used to identify Object type as:
24  *  - input or output data
25  *  - SGT object created because buffer is not physical contiguous
26  *  - derived object (not buffer reallocation)
27  *  - allocated origin buffer
28  */
29 #define DMAOBJ_INPUT	  BIT(0)
30 #define DMAOBJ_OUTPUT	  BIT(1)
31 #define DMAOBJ_ALLOC_ORIG BIT(2)
32 #define DMAOBJ_DONT_COPY  BIT(3)
33 
34 /*
35  * DMA Buffer
36  *
37  * @require    DMA Buffer size require
38  * @allocated  Size of the buffer allocated
39  * @remind     Size available in the buffer
40  * @buf        CAAM Buffer
41  */
42 struct caamdmabuf {
43 	size_t require;
44 	size_t allocated;
45 	size_t remind;
46 	struct caambuf buf;
47 };
48 
49 /*
50  * DMA Object buffer entry
51  *
52  * @newbuf        True if list entry is a new DMA Buffer
53  * @nodma_access  Buffer is not accessible from CAAM DMA
54  * @nocopy        Buffer doesn't have to be copied back to the origin
55  * @origbuf       Original buffer reference
56  * @link          Pointer to next entry
57  */
58 struct dmaentry {
59 	bool newbuf;
60 	bool nodma_access;
61 	bool nocopy;
62 
63 	struct caambuf origbuf;
64 
65 	TAILQ_ENTRY(dmaentry) link;
66 };
67 
68 /*
69  * SGT/Buffer Data currently handled
70  *
71  * @orig    Original buffer reference
72  * @dma     DMA Buffer (new or original)
73  * @length  Buffer length
74  */
75 struct sgtdata {
76 	uint8_t *orig;
77 	uint8_t *dma;
78 	size_t length;
79 };
80 
81 /*
82  * CAAM DMA private Object data
83  * @type         Type of DMA Object
84  * @nb_sgtbuf    Number of SGT/Buffer entries allocated
85  * @dmabuf       DMA Buffer allocated
86  * @sgtdata      Reference to SGT/Buffer list in used
87  * @list         List of the DMA Object buffer entry
88  */
89 struct priv_dmaobj {
90 	unsigned int type;
91 	unsigned int nb_sgtbuf;
92 
93 	struct caamdmabuf dmabuf;
94 	struct sgtdata *sgtdata;
95 
96 	TAILQ_HEAD(dmalist, dmaentry) list;
97 };
98 
99 /*
100  * Memory allocation and free spinlock to ensure that in case
101  * of big buffer reallocation, memory used is freed
102  */
103 static unsigned int memlock;
104 
105 /*
106  * Try to allocate a DMA Buffer of type input or output data of @size bytes.
107  * If allocation success, set the DMA Buffer settings, else
108  * return in error.
109  *
110  * @priv  CAAM DMA object private data
111  * @size  Size of the DMA Buffer to allocate
112  */
try_allocate_dmabuf(struct priv_dmaobj * priv,size_t size)113 static TEE_Result try_allocate_dmabuf(struct priv_dmaobj *priv, size_t size)
114 {
115 	enum caam_status retstatus = CAAM_FAILURE;
116 
117 	if (priv->dmabuf.allocated) {
118 		caam_free_buf(&priv->dmabuf.buf);
119 		priv->dmabuf.allocated = 0;
120 	}
121 
122 	if (priv->type & DMAOBJ_INPUT)
123 		retstatus = caam_alloc_buf(&priv->dmabuf.buf, size);
124 	else
125 		retstatus = caam_alloc_align_buf(&priv->dmabuf.buf, size);
126 
127 	DMAOBJ_TRACE("Alloc %s DMA buffer (%zu) ret 0x%" PRIx32,
128 		     (priv->type & DMAOBJ_INPUT) ? "Input" : "Output", size,
129 		     retstatus);
130 
131 	if (retstatus == CAAM_NO_ERROR) {
132 		DMAOBJ_TRACE("DMA buffer Allocation Success");
133 		/* Set the Object's DMA Buffer settings */
134 		priv->dmabuf.allocated = size;
135 		priv->dmabuf.remind = size;
136 		priv->dmabuf.buf.length = 0;
137 		return TEE_SUCCESS;
138 	}
139 
140 	DMAOBJ_TRACE("DMA buffer Allocation Failure");
141 	return TEE_ERROR_OUT_OF_MEMORY;
142 }
143 
144 /*
145  * Allocate and initialize the CAAM DMA object's private data.
146  *
147  * @obj   CAAM DMA Object
148  * @type  Type of the CAAM DMA Object (i.e. Input or Output)
149  */
allocate_private(struct caamdmaobj * obj,unsigned int type)150 static TEE_Result allocate_private(struct caamdmaobj *obj, unsigned int type)
151 {
152 	struct priv_dmaobj *priv = NULL;
153 
154 	priv = caam_calloc(sizeof(*priv));
155 	if (!priv)
156 		return TEE_ERROR_OUT_OF_MEMORY;
157 
158 	obj->priv = priv;
159 
160 	/* Set the object type as input */
161 	priv->type = type;
162 
163 	TAILQ_INIT(&priv->list);
164 
165 	return TEE_SUCCESS;
166 }
167 
168 /*
169  * Fill the @sgtdata object to record the current input/output data
170  * handled in the DMA SGT/Buffer object.
171  * Increment the SGT/Buffer length according
172  *
173  * @obj      CAAM DMA object
174  * @sgtdata  [out] SGT Data handled
175  * @entry    DMA Object buffer entry
176  * @dma      DMA SGT/Buffer object
177  * @offset   Start offset of the DMA Object buffer
178  */
add_sgtdata_entry(struct caamdmaobj * obj,struct sgtdata * sgtdata,struct dmaentry * entry,struct caambuf * dma,size_t offset)179 static void add_sgtdata_entry(struct caamdmaobj *obj, struct sgtdata *sgtdata,
180 			      struct dmaentry *entry, struct caambuf *dma,
181 			      size_t offset)
182 {
183 	if (entry->nocopy) {
184 		sgtdata->orig = 0;
185 		sgtdata->length = 0;
186 		sgtdata->dma = 0;
187 	} else {
188 		sgtdata->orig = entry->origbuf.data + offset;
189 		sgtdata->length = dma->length;
190 		sgtdata->dma = dma->data;
191 	}
192 
193 	obj->sgtbuf.length += dma->length;
194 }
195 
196 /*
197  * Add a new DMA Buffer entry as first element of the list.
198  * Return NULL if error, else the new entry in the list
199  *
200  * @priv    DMA Object private data
201  * @orig    Original buffer reference
202  */
dmalist_add_entry_head(struct priv_dmaobj * priv,struct caambuf * orig)203 static struct dmaentry *dmalist_add_entry_head(struct priv_dmaobj *priv,
204 					       struct caambuf *orig)
205 {
206 	struct dmaentry *entry = NULL;
207 
208 	entry = caam_calloc(sizeof(*entry));
209 	if (entry) {
210 		/* Save the original buffer reference */
211 		memcpy(&entry->origbuf, orig, sizeof(entry->origbuf));
212 		DMAOBJ_TRACE("entry %p - insert head entry of %zu bytes", entry,
213 			     orig->length);
214 		TAILQ_INSERT_HEAD(&priv->list, entry, link);
215 	}
216 
217 	return entry;
218 }
219 
220 /*
221  * Add a new DMA Buffer entry in the list.
222  * Return NULL if error, else the new entry in the list
223  *
224  * @priv    DMA Object private data
225  * @orig    Original buffer reference
226  */
dmalist_add_entry(struct priv_dmaobj * priv,struct caambuf * orig)227 static struct dmaentry *dmalist_add_entry(struct priv_dmaobj *priv,
228 					  struct caambuf *orig)
229 {
230 	struct dmaentry *entry = NULL;
231 
232 	entry = caam_calloc(sizeof(*entry));
233 	if (entry) {
234 		/* Save the original buffer reference */
235 		memcpy(&entry->origbuf, orig, sizeof(entry->origbuf));
236 		DMAOBJ_TRACE("entry %p - insert entry of %zu bytes", entry,
237 			     orig->length);
238 		if (TAILQ_EMPTY(&priv->list))
239 			TAILQ_INSERT_HEAD(&priv->list, entry, link);
240 		else
241 			TAILQ_INSERT_TAIL(&priv->list, entry, link);
242 	}
243 
244 	return entry;
245 }
246 
247 /*
248  * Insert and allocate a DMA entry in the list before the given DMA entry.
249  * Return the allocated DMA entry.
250  *
251  * @priv   DMA Object private data
252  * @before DMA entry after the new DMA entry
253  * @new    CAAM buffer of the new DMA entry
254  */
dmalist_insert_before_entry(struct priv_dmaobj * priv,struct dmaentry * before,struct caambuf * new)255 static struct dmaentry *dmalist_insert_before_entry(struct priv_dmaobj *priv,
256 						    struct dmaentry *before,
257 						    struct caambuf *new)
258 {
259 	struct dmaentry *entry = NULL;
260 
261 	entry = caam_calloc(sizeof(*entry));
262 	if (entry) {
263 		/* Save the original buffer reference */
264 		memcpy(&entry->origbuf, new, sizeof(entry->origbuf));
265 		DMAOBJ_TRACE("entry %p - insert entry of %zu bytes", entry,
266 			     new->length);
267 		if (TAILQ_FIRST(&priv->list) == before)
268 			TAILQ_INSERT_HEAD(&priv->list, entry, link);
269 		else
270 			TAILQ_INSERT_BEFORE(before, entry, link);
271 	}
272 
273 	return entry;
274 }
275 
276 /*
277  * Insert and allocate a DMA entry in the list after the given DMA entry.
278  * Return the allocated DMA entry.
279  *
280  * @priv   DMA Object private data
281  * @after  DMA entry before the new DMA entry
282  * @new    CAAM buffer of the new DMA entry
283  */
dmalist_insert_after_entry(struct priv_dmaobj * priv,struct dmaentry * after,struct caambuf * new)284 static struct dmaentry *dmalist_insert_after_entry(struct priv_dmaobj *priv,
285 						   struct dmaentry *after,
286 						   struct caambuf *new)
287 {
288 	struct dmaentry *entry = NULL;
289 
290 	entry = caam_calloc(sizeof(*entry));
291 	if (entry) {
292 		/* Save the original buffer reference */
293 		memcpy(&entry->origbuf, new, sizeof(entry->origbuf));
294 		DMAOBJ_TRACE("entry %p - insert entry of %zu bytes", entry,
295 			     new->length);
296 		TAILQ_INSERT_AFTER(&priv->list, after, entry, link);
297 	}
298 
299 	return entry;
300 }
301 
302 /*
303  * Apply the cache operation @op to the DMA Object (SGT or buffer)
304  *
305  * @op    Cache operation
306  * @obj   CAAM DMA object
307  */
dmaobj_cache_operation(enum utee_cache_operation op,struct caamdmaobj * obj)308 static inline void dmaobj_cache_operation(enum utee_cache_operation op,
309 					  struct caamdmaobj *obj)
310 {
311 	if (!obj->sgtbuf.length)
312 		return;
313 
314 	if (obj->sgtbuf.sgt_type)
315 		caam_sgt_cache_op(op, &obj->sgtbuf, obj->sgtbuf.length);
316 	else if (!obj->sgtbuf.buf->nocache)
317 		cache_operation(op, obj->sgtbuf.buf->data, obj->sgtbuf.length);
318 }
319 
320 /*
321  * Set the required allocation size for the DMA buffer.
322  *
323  * @priv   DMA Object private data
324  * @length Required buffer size
325  */
add_dma_require(struct priv_dmaobj * priv,size_t length)326 static inline void add_dma_require(struct priv_dmaobj *priv, size_t length)
327 {
328 	size_t tmp = 0;
329 
330 	if (ADD_OVERFLOW(priv->dmabuf.require, length, &tmp))
331 		priv->dmabuf.require = SIZE_MAX;
332 	else
333 		priv->dmabuf.require = tmp;
334 }
335 
336 /*
337  * Check if the buffer start/end addresses are aligned on the cache line.
338  * If not flags as start and/or end addresses not aligned, expect if the
339  * maximum length @maxlen to use is inside a cache line size. In this case,
340  * flags to allocate a new buffer.
341  *
342  * @priv    DMA Object private data
343  * @maxlen  Maximum length to use
344  */
check_buffer_alignment(struct priv_dmaobj * priv,size_t maxlen)345 static TEE_Result check_buffer_alignment(struct priv_dmaobj *priv,
346 					 size_t maxlen)
347 {
348 	unsigned int cacheline_size = 0;
349 	struct dmaentry *entry = NULL;
350 	struct dmaentry *new_entry = NULL;
351 	struct caambuf newbuf = {};
352 	vaddr_t va_start = 0;
353 	vaddr_t va_end = 0;
354 	vaddr_t va_end_align = 0;
355 	vaddr_t va_start_align = 0;
356 	size_t remlen = 0;
357 	size_t acclen = 0;
358 
359 	cacheline_size = dcache_get_line_size();
360 
361 	TAILQ_FOREACH(entry, &priv->list, link) {
362 		DMAOBJ_TRACE("Entry %p: start %p len %zu (%zu >= %zu)", entry,
363 			     entry->origbuf.data, entry->origbuf.length, acclen,
364 			     maxlen);
365 
366 		/* No need to continue if we convert the needed length */
367 		if (acclen >= maxlen)
368 			return TEE_SUCCESS;
369 
370 		acclen += entry->origbuf.length;
371 
372 		if (entry->nodma_access || entry->newbuf)
373 			continue;
374 
375 		if (entry->origbuf.length < cacheline_size) {
376 			/*
377 			 * Length of the entry is not aligned on cache size
378 			 * Require a full aligned buffer
379 			 */
380 			DMAOBJ_TRACE("Length %zu vs cache line %u",
381 				     entry->origbuf.length, cacheline_size);
382 
383 			entry->newbuf = true;
384 			add_dma_require(priv, entry->origbuf.length);
385 			continue;
386 		}
387 
388 		va_start = (vaddr_t)entry->origbuf.data;
389 		va_start_align = ROUNDUP(va_start, cacheline_size);
390 
391 		if (va_start_align != va_start) {
392 			DMAOBJ_TRACE("Start 0x%" PRIxVA " vs align 0x%" PRIxVA,
393 				     va_start, va_start_align);
394 
395 			remlen = entry->origbuf.length -
396 				 (va_start_align - va_start);
397 			if (remlen <= cacheline_size) {
398 				/*
399 				 * Start address is not aligned and the
400 				 * remaining length if after re-alignment
401 				 * is not cache size aligned.
402 				 * Require a full aligned buffer
403 				 */
404 				DMAOBJ_TRACE("Rem length %zu vs cache line %u",
405 					     remlen, cacheline_size);
406 				entry->newbuf = true;
407 				add_dma_require(priv, entry->origbuf.length);
408 				continue;
409 			}
410 
411 			/*
412 			 * Insert a new entry to make buffer on a cache line.
413 			 */
414 			newbuf.data = entry->origbuf.data;
415 			newbuf.length = va_start_align - va_start;
416 			newbuf.paddr = entry->origbuf.paddr;
417 			newbuf.nocache = entry->origbuf.nocache;
418 
419 			add_dma_require(priv, newbuf.length);
420 			new_entry = dmalist_insert_before_entry(priv, entry,
421 								&newbuf);
422 			if (!new_entry)
423 				return TEE_ERROR_OUT_OF_MEMORY;
424 
425 			new_entry->newbuf = true;
426 
427 			/*
428 			 * Update current entry with align address and new
429 			 * length.
430 			 */
431 			entry->origbuf.data = (uint8_t *)va_start_align;
432 			entry->origbuf.length -= newbuf.length;
433 			entry->origbuf.paddr += newbuf.length;
434 
435 			/*
436 			 * Set current entry to new entry to continue
437 			 * the FOREACH loop from this new_entry and then
438 			 * verify the rest of the entry modified.
439 			 */
440 			entry = new_entry;
441 			acclen -= entry->origbuf.length;
442 			continue;
443 		}
444 
445 		va_end = (vaddr_t)entry->origbuf.data + entry->origbuf.length;
446 		va_end_align = ROUNDUP(va_end, cacheline_size);
447 
448 		if (va_end != va_end_align) {
449 			DMAOBJ_TRACE("End 0x%" PRIxVA " vs align 0x%" PRIxVA,
450 				     va_end, va_end_align);
451 
452 			va_end_align = ROUNDDOWN(va_end, cacheline_size);
453 			remlen = entry->origbuf.length - va_end_align;
454 
455 			if (remlen <= cacheline_size) {
456 				/*
457 				 * End address is not aligned and the remaining
458 				 * length if after re-alignment is not cache
459 				 * size aligned.
460 				 * Require a full aligned buffer
461 				 */
462 				DMAOBJ_TRACE("Rem length %zu vs cache line %u",
463 					     remlen, cacheline_size);
464 				entry->newbuf = true;
465 				add_dma_require(priv, entry->origbuf.length);
466 				continue;
467 			}
468 
469 			/*
470 			 * Insert a new entry to make buffer on a cache line.
471 			 */
472 			newbuf.data = (uint8_t *)va_end_align;
473 			newbuf.length = va_end - va_end_align;
474 			newbuf.paddr = entry->origbuf.paddr + newbuf.length;
475 			newbuf.nocache = entry->origbuf.nocache;
476 
477 			add_dma_require(priv, newbuf.length);
478 
479 			new_entry = dmalist_insert_after_entry(priv, entry,
480 							       &newbuf);
481 			if (!new_entry)
482 				return TEE_ERROR_OUT_OF_MEMORY;
483 
484 			new_entry->newbuf = true;
485 
486 			/* Update current entry with new length */
487 			entry->origbuf.length -= newbuf.length;
488 
489 			/*
490 			 * Set current entry to new entry to continue
491 			 * the FOREACH loop from this new_entry and then
492 			 * verify the rest of the entry modified.
493 			 */
494 			entry = new_entry;
495 			acclen -= newbuf.length;
496 			continue;
497 		}
498 	}
499 
500 	return TEE_SUCCESS;
501 }
502 
503 /*
504  * Go through all the @orig space to extract all physical area used to
505  * map the buffer.
506  * If one of the physical area is not accessible by the CAAM DMA, flag it
507  * to be reallocated with DMA accessible buffer.
508  * If the DMA Object is an output buffer, check and flag the start/end
509  * address of the buffer to be aligned on a cache line.
510  *
511  * @obj     CAAM DMA object
512  * @orig    Original Data
513  * @maxlen  Maximum length to use
514  */
check_buffer_boundary(struct caamdmaobj * obj,struct caambuf * orig,size_t maxlen)515 static TEE_Result check_buffer_boundary(struct caamdmaobj *obj,
516 					struct caambuf *orig, size_t maxlen)
517 {
518 	TEE_Result ret = TEE_ERROR_OUT_OF_MEMORY;
519 	struct priv_dmaobj *priv = obj->priv;
520 	struct dmaentry *entry = NULL;
521 	struct caambuf *pabufs = NULL;
522 	int nb_pa_area = -1;
523 	int idx = 0;
524 	paddr_t last_pa = 0;
525 	size_t remlen = maxlen;
526 	size_t tmp = 0;
527 
528 	/*
529 	 * Get the number of physical areas used by the
530 	 * DMA Buffer
531 	 */
532 	nb_pa_area = caam_mem_get_pa_area(orig, &pabufs);
533 	DMAOBJ_TRACE("Number of pa areas = %d (for max length %zu bytes)",
534 		     nb_pa_area, remlen);
535 	if (nb_pa_area == -1)
536 		goto out;
537 
538 	for (idx = 0; idx < nb_pa_area && remlen; idx++) {
539 		DMAOBJ_TRACE("Remaining length = %zu", remlen);
540 		if (ADD_OVERFLOW(pabufs[idx].paddr, pabufs[idx].length,
541 				 &last_pa))
542 			goto out;
543 
544 		DMAOBJ_TRACE("PA 0x%" PRIxPA " = 0x%" PRIxPA " + %zu", last_pa,
545 			     pabufs[idx].paddr, pabufs[idx].length);
546 
547 		entry = dmalist_add_entry(priv, &pabufs[idx]);
548 		if (!entry)
549 			goto out;
550 
551 		if (IS_DMA_OVERFLOW(last_pa)) {
552 			entry->nodma_access = true;
553 			if (ADD_OVERFLOW(priv->dmabuf.require,
554 					 pabufs[idx].length, &tmp))
555 				priv->dmabuf.require = SIZE_MAX;
556 			else
557 				priv->dmabuf.require = tmp;
558 		}
559 
560 		if (remlen > pabufs[idx].length)
561 			remlen -= pabufs[idx].length;
562 		else
563 			remlen = 0;
564 	}
565 
566 	/*
567 	 * Check the buffer alignment if the buffer is cacheable and
568 	 * an output buffer.
569 	 */
570 	if (priv->type & DMAOBJ_OUTPUT && !orig->nocache) {
571 		ret = check_buffer_alignment(priv, maxlen);
572 		if (ret)
573 			goto out;
574 	}
575 
576 	orig->length = maxlen;
577 
578 	ret = TEE_SUCCESS;
579 out:
580 	caam_free(pabufs);
581 	return ret;
582 }
583 
584 /*
585  * Re-map a DMA entry into a CAAM DMA accessible buffer.
586  * Create the SGT/Buffer entry to be used in the CAAM Descriptor
587  * Record this entry in the SGT/Buffer Data to get information on current
588  * working data.
589  *
590  * @obj         CAAM DMA object
591  * @entry       DMA entry to re-map
592  * @index       Index in the SGT/Buffer table
593  * @off         Start offset of the DMA entry data
594  */
entry_sgtbuf_dmabuf(struct caamdmaobj * obj,struct dmaentry * entry,unsigned int index,size_t off)595 static enum caam_status entry_sgtbuf_dmabuf(struct caamdmaobj *obj,
596 					    struct dmaentry *entry,
597 					    unsigned int index, size_t off)
598 {
599 	struct priv_dmaobj *priv = obj->priv;
600 	struct caambuf *sgtbuf = &obj->sgtbuf.buf[index];
601 	struct caamdmabuf *dmabuf = &priv->dmabuf;
602 
603 	if (!priv->dmabuf.allocated)
604 		return CAAM_OUT_MEMORY;
605 
606 	sgtbuf->data = dmabuf->buf.data + dmabuf->buf.length;
607 	sgtbuf->length = MIN(dmabuf->remind, entry->origbuf.length - off);
608 	sgtbuf->paddr = dmabuf->buf.paddr + dmabuf->buf.length;
609 	sgtbuf->nocache = dmabuf->buf.nocache;
610 	dmabuf->remind -= sgtbuf->length;
611 	dmabuf->buf.length += sgtbuf->length;
612 
613 	if (priv->type & DMAOBJ_INPUT)
614 		memcpy(sgtbuf->data, &entry->origbuf.data[off], sgtbuf->length);
615 	else
616 		entry->newbuf = true;
617 
618 	add_sgtdata_entry(obj, &priv->sgtdata[index], entry, sgtbuf, off);
619 
620 	return CAAM_NO_ERROR;
621 }
622 
623 /*
624  * Create the SGT/Buffer entry mapping the DMA @entry.
625  * Record these entry in the SGT/buffer Data to get information on current
626  * working data.
627  *
628  * @obj         CAAM DMA object
629  * @entry       DMA entry to re-map
630  * @index       Index in the SGT/Buffer table
631  * @off         Start offset of the DMA entry data
632  */
entry_sgtbuf(struct caamdmaobj * obj,struct dmaentry * entry,unsigned int index,size_t off)633 static enum caam_status entry_sgtbuf(struct caamdmaobj *obj,
634 				     struct dmaentry *entry, unsigned int index,
635 				     size_t off)
636 {
637 	struct priv_dmaobj *priv = obj->priv;
638 	struct caambuf *sgtbuf = &obj->sgtbuf.buf[index];
639 	struct sgtdata *sgtdata = &priv->sgtdata[index];
640 
641 	memcpy(sgtbuf, &entry->origbuf, sizeof(*sgtbuf));
642 	sgtbuf->data += off;
643 	sgtbuf->paddr += off;
644 	sgtbuf->length -= off;
645 
646 	DMAOBJ_TRACE("DMA buffer %p - %zu", sgtbuf->data, sgtbuf->length);
647 	add_sgtdata_entry(obj, sgtdata, entry, sgtbuf, off);
648 
649 	return CAAM_NO_ERROR;
650 }
651 
caam_dmaobj_init_input(struct caamdmaobj * obj,const void * data,size_t length)652 TEE_Result caam_dmaobj_init_input(struct caamdmaobj *obj, const void *data,
653 				  size_t length)
654 {
655 	TEE_Result ret = TEE_ERROR_GENERIC;
656 
657 	DMAOBJ_TRACE("Input object with data @%p of %zu bytes", data, length);
658 
659 	if (!data || !length || !obj) {
660 		ret = TEE_ERROR_BAD_PARAMETERS;
661 		goto out;
662 	}
663 
664 	obj->orig.paddr = virt_to_phys((void *)data);
665 	if (!obj->orig.paddr) {
666 		DMAOBJ_TRACE("Object virtual address error");
667 		ret = TEE_ERROR_BAD_PARAMETERS;
668 		goto out;
669 	}
670 
671 	obj->orig.data = (void *)data;
672 	obj->orig.length = length;
673 	if (!caam_mem_is_cached_buf((void *)data, length))
674 		obj->orig.nocache = 1;
675 
676 	ret = allocate_private(obj, DMAOBJ_INPUT);
677 	if (!ret)
678 		ret = check_buffer_boundary(obj, &obj->orig, obj->orig.length);
679 
680 out:
681 	DMAOBJ_TRACE("Object returns 0x%" PRIx32, ret);
682 	return ret;
683 }
684 
caam_dmaobj_input_sgtbuf(struct caamdmaobj * obj,const void * data,size_t length)685 TEE_Result caam_dmaobj_input_sgtbuf(struct caamdmaobj *obj, const void *data,
686 				    size_t length)
687 {
688 	TEE_Result ret = TEE_ERROR_GENERIC;
689 	size_t size_done = length;
690 
691 	ret = caam_dmaobj_init_input(obj, data, length);
692 	if (ret)
693 		return ret;
694 
695 	ret = caam_dmaobj_prepare(obj, NULL, length);
696 	if (ret)
697 		return ret;
698 
699 	ret = caam_dmaobj_sgtbuf_build(obj, &size_done, 0, length);
700 	if (ret)
701 		return ret;
702 
703 	if (size_done != length)
704 		return TEE_ERROR_OUT_OF_MEMORY;
705 
706 	return TEE_SUCCESS;
707 }
708 
caam_dmaobj_init_output(struct caamdmaobj * obj,void * data,size_t length,size_t min_length)709 TEE_Result caam_dmaobj_init_output(struct caamdmaobj *obj, void *data,
710 				   size_t length, size_t min_length)
711 {
712 	TEE_Result ret = TEE_ERROR_GENERIC;
713 	struct dmaentry *entry = NULL;
714 	struct caambuf newbuf = {};
715 
716 	DMAOBJ_TRACE("Output object with data @%p of %zu bytes (%zu)", data,
717 		     length, min_length);
718 
719 	if (!obj) {
720 		ret = TEE_ERROR_BAD_PARAMETERS;
721 		goto out;
722 	}
723 
724 	ret = allocate_private(obj, DMAOBJ_OUTPUT);
725 	if (ret)
726 		goto out;
727 
728 	if (data) {
729 		obj->orig.paddr = virt_to_phys((void *)data);
730 		if (!obj->orig.paddr) {
731 			DMAOBJ_TRACE("Object virtual address error");
732 			ret = TEE_ERROR_BAD_PARAMETERS;
733 			goto out;
734 		}
735 
736 		obj->orig.data = (void *)data;
737 		obj->orig.length = length;
738 		if (!caam_mem_is_cached_buf((void *)data, length))
739 			obj->orig.nocache = 1;
740 
741 		ret = check_buffer_boundary(obj, &obj->orig,
742 					    MIN(min_length, obj->orig.length));
743 		if (ret)
744 			goto out;
745 	}
746 
747 	if (length < min_length || !data) {
748 		DMAOBJ_TRACE("Output buffer too short need %zu bytes (+%zu)",
749 			     min_length, min_length - length);
750 		newbuf.length = min_length - length;
751 
752 		entry = dmalist_add_entry(obj->priv, &newbuf);
753 		if (!entry) {
754 			ret = TEE_ERROR_OUT_OF_MEMORY;
755 			goto out;
756 		}
757 
758 		/* Add the additional size in the DMA buffer length */
759 		add_dma_require(obj->priv, newbuf.length);
760 
761 		entry->nocopy = true;
762 		entry->newbuf = true;
763 	}
764 
765 	ret = TEE_SUCCESS;
766 
767 out:
768 	DMAOBJ_TRACE("Object returns 0x%" PRIx32, ret);
769 	return ret;
770 }
771 
caam_dmaobj_output_sgtbuf(struct caamdmaobj * obj,void * data,size_t length,size_t min_length)772 TEE_Result caam_dmaobj_output_sgtbuf(struct caamdmaobj *obj, void *data,
773 				     size_t length, size_t min_length)
774 {
775 	enum caam_status retstatus = CAAM_FAILURE;
776 	TEE_Result ret = TEE_ERROR_GENERIC;
777 	struct priv_dmaobj *priv = NULL;
778 	size_t size = 0;
779 	struct caambuf buf = {};
780 
781 	if (!data && !length && min_length) {
782 		/*
783 		 * We are sure that the minimum size of the allocated
784 		 * buffer is a cache line, hence we know that
785 		 * start/end address are cache aligned.
786 		 * If the @min_length is less than a cache line size, we
787 		 * can initializing the output buffer with the cache line size
788 		 * to prevent end buffer misalignement so reallocate a not used
789 		 * buffer.
790 		 */
791 		size = MAX(min_length, dcache_get_line_size());
792 
793 		/* Allocate a new cache aligned buffer */
794 		retstatus = caam_alloc_align_buf(&buf, size);
795 		DMAOBJ_TRACE("New output buffer of %zu bytes ret 0x%" PRIx32,
796 			     min_length, retstatus);
797 		if (retstatus != CAAM_NO_ERROR)
798 			return caam_status_to_tee_result(retstatus);
799 
800 		ret = caam_dmaobj_init_output(obj, buf.data, buf.length, size);
801 		if (ret)
802 			return ret;
803 
804 		/* Set the correct origin buffer length asked */
805 		obj->orig.length = min_length;
806 
807 		/* Flag origin buffer as new allocation to free it */
808 		priv = obj->priv;
809 		priv->type |= DMAOBJ_ALLOC_ORIG;
810 	} else {
811 		ret = caam_dmaobj_init_output(obj, data, length, min_length);
812 		if (ret)
813 			return ret;
814 	}
815 
816 	ret = caam_dmaobj_prepare(NULL, obj, min_length);
817 	if (ret)
818 		return ret;
819 
820 	size = min_length;
821 	ret = caam_dmaobj_sgtbuf_build(obj, &size, 0, min_length);
822 	if (ret)
823 		return ret;
824 
825 	if (size != min_length)
826 		return TEE_ERROR_OUT_OF_MEMORY;
827 
828 	return TEE_SUCCESS;
829 }
830 
caam_dmaobj_cache_push(struct caamdmaobj * obj)831 void caam_dmaobj_cache_push(struct caamdmaobj *obj)
832 {
833 	struct priv_dmaobj *priv = NULL;
834 	enum utee_cache_operation op = TEE_CACHECLEAN;
835 
836 	if (!obj || !obj->priv)
837 		return;
838 
839 	priv = obj->priv;
840 	if (priv->type & DMAOBJ_OUTPUT)
841 		op = TEE_CACHEFLUSH;
842 
843 	dmaobj_cache_operation(op, obj);
844 }
845 
caam_dmaobj_copy_to_orig(struct caamdmaobj * obj)846 size_t caam_dmaobj_copy_to_orig(struct caamdmaobj *obj)
847 {
848 	struct priv_dmaobj *priv = NULL;
849 	unsigned int idx = 0;
850 	size_t length = 0;
851 	size_t dst_rlen = 0;
852 	size_t copy_size = 0;
853 
854 	if (!obj || !obj->orig.data || !obj->priv)
855 		return 0;
856 
857 	dmaobj_cache_operation(TEE_CACHEINVALIDATE, obj);
858 
859 	priv = obj->priv;
860 
861 	/*
862 	 * The maximum data size to copy cannot exceed the output buffer size
863 	 * (obj->orig.length) and cannot exceed the data processed by the
864 	 * CAAM (obj->sgtbuf.length).
865 	 */
866 	dst_rlen = MIN(obj->orig.length, obj->sgtbuf.length);
867 
868 	DMAOBJ_TRACE("Copy (len=%zu)", dst_rlen);
869 
870 	for (idx = 0; idx < obj->sgtbuf.number; idx++) {
871 		struct sgtdata *sgtdata = priv->sgtdata + idx;
872 
873 		if (!sgtdata)
874 			break;
875 
876 		copy_size = MIN(dst_rlen, sgtdata->length);
877 		if (sgtdata->orig != sgtdata->dma && sgtdata->orig) {
878 			copy_size = MIN(dst_rlen, sgtdata->length);
879 			memcpy(sgtdata->orig, sgtdata->dma, copy_size);
880 		}
881 
882 		length += copy_size;
883 		dst_rlen -= copy_size;
884 	}
885 
886 	return length;
887 }
888 
caam_dmaobj_copy_ltrim_to_orig(struct caamdmaobj * obj)889 size_t caam_dmaobj_copy_ltrim_to_orig(struct caamdmaobj *obj)
890 {
891 	struct priv_dmaobj *priv = NULL;
892 	uint8_t *dst = NULL;
893 	size_t off = 0;
894 	size_t offset = 0;
895 	size_t dst_rlen = 0;
896 	size_t copy_size = 0;
897 	unsigned int idx = 0;
898 	size_t length = 0;
899 
900 	if (!obj || !obj->orig.data || !obj->priv)
901 		return 0;
902 
903 	dmaobj_cache_operation(TEE_CACHEINVALIDATE, obj);
904 
905 	priv = obj->priv;
906 
907 	/* Parse the SGT data list to discard leading zeros */
908 	for (idx = 0; idx < obj->sgtbuf.number; idx++) {
909 		struct sgtdata *sgtdata = priv->sgtdata + idx;
910 
911 		if (!sgtdata)
912 			break;
913 
914 		if (!sgtdata->orig)
915 			continue;
916 
917 		for (offset = 0; offset < sgtdata->length; off++, offset++) {
918 			if (sgtdata->dma[offset])
919 				goto do_copy;
920 		}
921 	}
922 
923 do_copy:
924 	if (off < obj->orig.length)
925 		dst_rlen = obj->orig.length - off;
926 
927 	dst = obj->orig.data;
928 
929 	DMAOBJ_TRACE("Copy/Move Offset=%zu (len=%zu) TYPE=%d", off, dst_rlen,
930 		     obj->sgtbuf.sgt_type);
931 
932 	if (!dst_rlen) {
933 		dst[0] = 0;
934 		return 1;
935 	}
936 
937 	/*
938 	 * After discarding leading zeros in the SGT data list, start the copy
939 	 * operation on the remaining elements of the data list.
940 	 * List index must not be re-initialized before entering this loop.
941 	 */
942 	for (; idx < obj->sgtbuf.number; idx++) {
943 		struct sgtdata *sgtdata = priv->sgtdata + idx;
944 
945 		if (!sgtdata)
946 			break;
947 
948 		if (!sgtdata->orig)
949 			continue;
950 
951 		if (offset) {
952 			copy_size = MIN(dst_rlen, sgtdata->length - offset);
953 			memmove(dst, &sgtdata->dma[offset], copy_size);
954 			offset = 0;
955 		} else {
956 			copy_size = MIN(dst_rlen, sgtdata->length);
957 			if (dst != sgtdata->dma)
958 				memmove(dst, sgtdata->dma, copy_size);
959 		}
960 
961 		dst += copy_size;
962 		dst_rlen -= copy_size;
963 		length += copy_size;
964 	}
965 
966 	return length;
967 }
968 
caam_dmaobj_free(struct caamdmaobj * obj)969 void caam_dmaobj_free(struct caamdmaobj *obj)
970 {
971 	struct priv_dmaobj *priv = NULL;
972 	struct dmaentry *entry = NULL;
973 	struct dmaentry *next = NULL;
974 	uint32_t exceptions = 0;
975 
976 	if (!obj)
977 		return;
978 
979 	exceptions = cpu_spin_lock_xsave(&memlock);
980 	priv = obj->priv;
981 	if (!priv)
982 		goto out;
983 
984 	DMAOBJ_TRACE("Free %s object with data @%p of %zu bytes",
985 		     priv->type & DMAOBJ_INPUT ? "Input" : "Output",
986 		     obj->orig.data, obj->orig.length);
987 
988 	TAILQ_FOREACH_SAFE(entry, &priv->list, link, next) {
989 		DMAOBJ_TRACE("Is type 0x%" PRIx8 " newbuf %s", priv->type,
990 			     entry->newbuf ? "true" : "false");
991 
992 		DMAOBJ_TRACE("Free entry %p", entry);
993 		caam_free(entry);
994 	}
995 
996 	if (priv->nb_sgtbuf) {
997 		DMAOBJ_TRACE("Free #%d SGT data %p", priv->nb_sgtbuf,
998 			     priv->sgtdata);
999 		caam_free(priv->sgtdata);
1000 
1001 		obj->sgtbuf.number = priv->nb_sgtbuf;
1002 		obj->sgtbuf.sgt_type = (priv->nb_sgtbuf > 1) ? true : false;
1003 	}
1004 
1005 	if (priv->dmabuf.allocated) {
1006 		DMAOBJ_TRACE("Free CAAM DMA buffer");
1007 		caam_free_buf(&priv->dmabuf.buf);
1008 	}
1009 
1010 	if (priv->type & DMAOBJ_ALLOC_ORIG) {
1011 		DMAOBJ_TRACE("Free Allocated origin");
1012 		caam_free_buf(&obj->orig);
1013 	}
1014 
1015 	DMAOBJ_TRACE("Free private object %p", priv);
1016 	caam_free(priv);
1017 
1018 out:
1019 	if (obj->sgtbuf.number) {
1020 		DMAOBJ_TRACE("Free #%d SGT/Buffer %p", obj->sgtbuf.number,
1021 			     &obj->sgtbuf);
1022 		caam_sgtbuf_free(&obj->sgtbuf);
1023 	}
1024 
1025 	memset(obj, 0, sizeof(*obj));
1026 
1027 	cpu_spin_unlock_xrestore(&memlock, exceptions);
1028 }
1029 
caam_dmaobj_add_first_block(struct caamdmaobj * obj,struct caamblock * block)1030 TEE_Result caam_dmaobj_add_first_block(struct caamdmaobj *obj,
1031 				       struct caamblock *block)
1032 {
1033 	struct priv_dmaobj *priv = NULL;
1034 	struct caambuf newbuf = {};
1035 	struct dmaentry *entry = NULL;
1036 
1037 	if (!obj || !obj->priv || !block)
1038 		return TEE_ERROR_BAD_PARAMETERS;
1039 
1040 	priv = obj->priv;
1041 
1042 	/* Save the block buffer reference and insert it at the head list */
1043 	newbuf.data = block->buf.data;
1044 	newbuf.length = block->filled;
1045 	newbuf.paddr = block->buf.paddr;
1046 	newbuf.nocache = block->buf.nocache;
1047 
1048 	entry = dmalist_add_entry_head(priv, &newbuf);
1049 
1050 	if (!entry)
1051 		return TEE_ERROR_OUT_OF_MEMORY;
1052 
1053 	/*
1054 	 * Block buffer added in the output DMA buffer doesn't have to
1055 	 * be part of the output copy to origin buffer.
1056 	 */
1057 	if (priv->type & DMAOBJ_OUTPUT)
1058 		entry->nocopy = true;
1059 
1060 	return TEE_SUCCESS;
1061 }
1062 
caam_dmaobj_derive_sgtbuf(struct caamdmaobj * obj,const struct caamdmaobj * from,size_t offset,size_t length)1063 TEE_Result caam_dmaobj_derive_sgtbuf(struct caamdmaobj *obj,
1064 				     const struct caamdmaobj *from,
1065 				     size_t offset, size_t length)
1066 {
1067 	TEE_Result ret = TEE_ERROR_GENERIC;
1068 	enum caam_status retstatus = CAAM_FAILURE;
1069 	struct priv_dmaobj *priv = NULL;
1070 
1071 	DMAOBJ_TRACE("Derive object %p - offset %zu - length %zu bytes", from,
1072 		     offset, length);
1073 
1074 	if (!obj || !from || !length || !from->priv) {
1075 		ret = TEE_ERROR_BAD_PARAMETERS;
1076 		goto out;
1077 	}
1078 
1079 	if (!from->orig.data || !from->orig.length) {
1080 		DMAOBJ_TRACE("No data/length to derive from");
1081 		ret = TEE_ERROR_NO_DATA;
1082 		goto out;
1083 	}
1084 
1085 	priv = from->priv;
1086 	if (!priv->nb_sgtbuf) {
1087 		DMAOBJ_TRACE("From SGT/Buffer not prepared");
1088 		ret = TEE_ERROR_NO_DATA;
1089 		goto out;
1090 	}
1091 
1092 	retstatus = caam_sgt_derive(&obj->sgtbuf, &from->sgtbuf, offset,
1093 				    length);
1094 
1095 	ret = caam_status_to_tee_result(retstatus);
1096 
1097 out:
1098 	DMAOBJ_TRACE("Object returns 0x%" PRIx32, ret);
1099 	return ret;
1100 }
1101 
1102 /*
1103  * Get the maximum allocation size for the given CAAM DMA object.
1104  * Return the maximum allocation size.
1105  *
1106  * @obj CAAM DMA object
1107  */
get_dma_max_alloc_size(struct caamdmaobj * obj)1108 static size_t get_dma_max_alloc_size(struct caamdmaobj *obj)
1109 {
1110 	size_t alloc_size = 0;
1111 	struct priv_dmaobj *priv = NULL;
1112 
1113 	if (!obj)
1114 		return 0;
1115 
1116 	priv = obj->priv;
1117 
1118 	DMAOBJ_TRACE("DMA buffer size require %zu", priv->dmabuf.require);
1119 	alloc_size = MIN(priv->dmabuf.require, MAX_BUFFER_ALLOC_SIZE);
1120 	if (alloc_size > 1024)
1121 		alloc_size = ROUNDDOWN(alloc_size, 1024);
1122 
1123 	return alloc_size;
1124 }
1125 
1126 /*
1127  * Allocate the CAAM DMA buffer.
1128  * First, try to allocate the with the maximum size. If it fails, try to
1129  * allocate with the same size divided by two. Try to allocate until
1130  * minimum size is reached. If the allocation cannot be done with the
1131  * minimum size, return TEE_ERROR_OUT_OF_MEMORY, TEE_SUCCESS otherwise.
1132  *
1133  * @obj       CAAM DMA object
1134  * @min_size  minimum size allocation
1135  * @size[out] successful allocation size
1136  */
try_allocate_dmabuf_max_size(struct caamdmaobj * obj,size_t min_size,size_t * size)1137 static TEE_Result try_allocate_dmabuf_max_size(struct caamdmaobj *obj,
1138 					       size_t min_size,
1139 					       size_t *size)
1140 {
1141 	TEE_Result ret = TEE_ERROR_GENERIC;
1142 	size_t alloc_size = 0;
1143 	struct priv_dmaobj *priv = NULL;
1144 	bool try_alloc = false;
1145 	uint32_t exceptions = 0;
1146 
1147 	alloc_size = get_dma_max_alloc_size(obj);
1148 	if (alloc_size) {
1149 		try_alloc = true;
1150 	} else {
1151 		ret = TEE_SUCCESS;
1152 		goto out;
1153 	}
1154 
1155 	priv = obj->priv;
1156 
1157 	exceptions = cpu_spin_lock_xsave(&memlock);
1158 
1159 	while (try_alloc) {
1160 		ret = try_allocate_dmabuf(priv, alloc_size);
1161 		if (!ret) {
1162 			try_alloc = false;
1163 		} else {
1164 			if (alloc_size > min_size)
1165 				alloc_size = MAX(min_size, alloc_size / 2);
1166 			else
1167 				try_alloc = false;
1168 		}
1169 	}
1170 
1171 	cpu_spin_unlock_xrestore(&memlock, exceptions);
1172 
1173 out:
1174 	*size = alloc_size;
1175 
1176 	return ret;
1177 }
1178 
caam_dmaobj_prepare(struct caamdmaobj * input,struct caamdmaobj * output,size_t min_size)1179 TEE_Result caam_dmaobj_prepare(struct caamdmaobj *input,
1180 			       struct caamdmaobj *output, size_t min_size)
1181 {
1182 	TEE_Result ret = TEE_ERROR_GENERIC;
1183 	size_t alloc_input = 0;
1184 	size_t alloc_output = 0;
1185 
1186 	if (!input && !output) {
1187 		ret = TEE_ERROR_BAD_PARAMETERS;
1188 		goto out;
1189 	}
1190 
1191 	if ((input && !input->priv) || (output && !output->priv)) {
1192 		ret = TEE_ERROR_BAD_PARAMETERS;
1193 		goto out;
1194 	}
1195 
1196 	DMAOBJ_TRACE("input=%p - output=%p - min=%zu", input, output, min_size);
1197 
1198 	ret = try_allocate_dmabuf_max_size(input, min_size, &alloc_input);
1199 	if (ret)
1200 		goto out;
1201 
1202 	ret = try_allocate_dmabuf_max_size(output, min_size, &alloc_output);
1203 	if (ret)
1204 		goto out;
1205 
1206 out:
1207 	DMAOBJ_TRACE("Allocation (input %zu, output %zu) returns 0x%" PRIx32,
1208 		     input ? alloc_input : 0, output ? alloc_output : 0,
1209 		     ret);
1210 
1211 	return ret;
1212 }
1213 
caam_dmaobj_sgtbuf_inout_build(struct caamdmaobj * input,struct caamdmaobj * output,size_t * length,size_t off,size_t align)1214 TEE_Result caam_dmaobj_sgtbuf_inout_build(struct caamdmaobj *input,
1215 					  struct caamdmaobj *output,
1216 					  size_t *length, size_t off,
1217 					  size_t align)
1218 {
1219 	TEE_Result ret = TEE_ERROR_GENERIC;
1220 	size_t len = 0;
1221 
1222 	DMAOBJ_TRACE("input=%p/output=%p %zu bytes (offset=%zu, align=%zu)",
1223 		     input, output, *length, off, align);
1224 
1225 	if (!input || !output || !length || !input->priv || !output->priv ||
1226 	    !*length) {
1227 		ret = TEE_ERROR_BAD_PARAMETERS;
1228 		goto out;
1229 	}
1230 
1231 	/*
1232 	 * First build the input SGT/Buffer
1233 	 */
1234 	ret = caam_dmaobj_sgtbuf_build(input, length, off, align);
1235 	if (ret)
1236 		goto out;
1237 
1238 	/*
1239 	 * Next build the output SGT/Buffer.
1240 	 * If returned length is not same as input, redo the input
1241 	 * SGT/Buffer with the same length as the output.
1242 	 */
1243 	len = *length;
1244 	ret = caam_dmaobj_sgtbuf_build(output, &len, off, *length);
1245 	if (ret)
1246 		goto out;
1247 
1248 	if (len != *length) {
1249 		DMAOBJ_TRACE("Retry In %zu bytes vs Out %zu bytes", *length,
1250 			     len);
1251 
1252 		/* Redo the input with the output length */
1253 		*length = len;
1254 		ret = caam_dmaobj_sgtbuf_build(input, length, off, len);
1255 		if (!ret && *length != len) {
1256 			DMAOBJ_TRACE("Error In %zu bytes vs Out %zu bytes",
1257 				     *length, len);
1258 			ret = TEE_ERROR_OUT_OF_MEMORY;
1259 		}
1260 	}
1261 
1262 out:
1263 	DMAOBJ_TRACE("Input/Output SGTBUF returns 0x%" PRIx32, ret);
1264 
1265 	return ret;
1266 }
1267 
caam_dmaobj_sgtbuf_build(struct caamdmaobj * obj,size_t * length,size_t off,size_t align)1268 TEE_Result caam_dmaobj_sgtbuf_build(struct caamdmaobj *obj, size_t *length,
1269 				    size_t off, size_t align)
1270 {
1271 	TEE_Result ret = TEE_ERROR_GENERIC;
1272 	enum caam_status retstatus = CAAM_FAILURE;
1273 	struct priv_dmaobj *priv = NULL;
1274 	struct dmaentry *entry = NULL;
1275 	struct dmaentry *start_entry = NULL;
1276 	size_t max_length = 0;
1277 	size_t acc_length = 0;
1278 	size_t offset = off;
1279 	unsigned int idx = 0;
1280 	unsigned int nb_sgt = 0;
1281 
1282 	DMAOBJ_TRACE("obj=%p of %zu bytes (offset=%zu) - align %zu", obj,
1283 		     *length, off, align);
1284 
1285 	if (!obj || !obj->priv || !length || !*length) {
1286 		ret = TEE_ERROR_BAD_PARAMETERS;
1287 		goto out;
1288 	}
1289 
1290 	priv = obj->priv;
1291 
1292 	max_length = *length;
1293 	if (priv->dmabuf.allocated && max_length > priv->dmabuf.allocated &&
1294 	    priv->dmabuf.allocated > align)
1295 		max_length = ROUNDDOWN(priv->dmabuf.allocated, align);
1296 
1297 	DMAOBJ_TRACE("Prepare SGT/Buffer to do %zu of %zu", max_length,
1298 		     *length);
1299 
1300 	/* Find the first DMA buffer to start with */
1301 	TAILQ_FOREACH(entry, &priv->list, link)	{
1302 		if (offset < entry->origbuf.length)
1303 			break;
1304 
1305 		offset -= entry->origbuf.length;
1306 	}
1307 
1308 	if (!entry) {
1309 		DMAOBJ_TRACE("There is no DMA Object available");
1310 		ret = TEE_ERROR_GENERIC;
1311 		goto out;
1312 	}
1313 
1314 	start_entry = entry;
1315 	DMAOBJ_TRACE("Start with %p data %p offset %zu", start_entry,
1316 		     start_entry->origbuf.data, offset);
1317 
1318 	acc_length = entry->origbuf.length - offset;
1319 	nb_sgt = 1;
1320 
1321 	/* Calculate the number of SGT entry */
1322 	for (entry = TAILQ_NEXT(entry, link); entry && acc_length < max_length;
1323 	     entry = TAILQ_NEXT(entry, link)) {
1324 		acc_length += entry->origbuf.length;
1325 		nb_sgt++;
1326 	}
1327 
1328 	DMAOBJ_TRACE("%d of %d SGT/Buffer entries to handle", nb_sgt,
1329 		     priv->nb_sgtbuf);
1330 	if (priv->nb_sgtbuf < nb_sgt) {
1331 		if (priv->nb_sgtbuf) {
1332 			obj->sgtbuf.number = priv->nb_sgtbuf;
1333 			obj->sgtbuf.sgt_type = (priv->nb_sgtbuf > 1);
1334 
1335 			caam_sgtbuf_free(&obj->sgtbuf);
1336 			caam_free(priv->sgtdata);
1337 			priv->nb_sgtbuf = 0;
1338 		}
1339 
1340 		obj->sgtbuf.number = nb_sgt;
1341 		obj->sgtbuf.sgt_type = (nb_sgt > 1) ? true : false;
1342 
1343 		/* Allocate a new SGT/Buffer object */
1344 		retstatus = caam_sgtbuf_alloc(&obj->sgtbuf);
1345 		DMAOBJ_TRACE("Allocate %d SGT entries ret 0x%" PRIx32,
1346 			     obj->sgtbuf.number, retstatus);
1347 		if (retstatus != CAAM_NO_ERROR) {
1348 			ret = caam_status_to_tee_result(retstatus);
1349 			goto out;
1350 		}
1351 
1352 		priv->sgtdata = caam_calloc(nb_sgt * sizeof(*priv->sgtdata));
1353 		if (!priv->sgtdata) {
1354 			ret = TEE_ERROR_OUT_OF_MEMORY;
1355 			goto out;
1356 		}
1357 
1358 		priv->nb_sgtbuf = nb_sgt;
1359 	} else {
1360 		obj->sgtbuf.number = nb_sgt;
1361 		obj->sgtbuf.sgt_type = (nb_sgt > 1) ? true : false;
1362 	}
1363 
1364 	/* Reset the DMA Buffer index if allocated */
1365 	if (priv->dmabuf.allocated) {
1366 		priv->dmabuf.remind = priv->dmabuf.allocated;
1367 		priv->dmabuf.buf.length = 0;
1368 	}
1369 
1370 	obj->sgtbuf.length = 0;
1371 	for (entry = start_entry; entry && idx < nb_sgt;
1372 	     entry = TAILQ_NEXT(entry, link), idx++) {
1373 		DMAOBJ_TRACE("entry %p (%d)", entry, idx);
1374 		if (entry->nodma_access || entry->newbuf) {
1375 			retstatus = entry_sgtbuf_dmabuf(obj, entry, idx,
1376 							offset);
1377 			if (retstatus != CAAM_NO_ERROR) {
1378 				ret = caam_status_to_tee_result(retstatus);
1379 				goto out;
1380 			}
1381 		} else {
1382 			retstatus = entry_sgtbuf(obj, entry, idx, offset);
1383 			if (retstatus != CAAM_NO_ERROR) {
1384 				ret = caam_status_to_tee_result(retstatus);
1385 				goto out;
1386 			}
1387 		}
1388 
1389 		if (obj->sgtbuf.length >= max_length) {
1390 			DMAOBJ_TRACE("Hold-on enough length %zu", max_length);
1391 			obj->sgtbuf.length = max_length;
1392 			break;
1393 		}
1394 		offset = 0;
1395 	}
1396 
1397 	if (obj->sgtbuf.sgt_type) {
1398 		/* Build the SGT table based on the physical area list */
1399 		caam_sgt_fill_table(&obj->sgtbuf);
1400 
1401 		obj->sgtbuf.paddr = virt_to_phys(obj->sgtbuf.sgt);
1402 	} else {
1403 		obj->sgtbuf.paddr = obj->sgtbuf.buf->paddr;
1404 	}
1405 
1406 	*length = obj->sgtbuf.length;
1407 	ret = TEE_SUCCESS;
1408 out:
1409 	DMAOBJ_TRACE("SGTBUF (%zu) returns 0x%" PRIx32, *length, ret);
1410 	return ret;
1411 }
1412