Home
last modified time | relevance | path

Searched refs:num_pages (Results 1 – 18 of 18) sorted by relevance

/optee_os/core/mm/
A Dfobj.c67 unsigned int num_pages) in fobj_init() argument
70 fobj->num_pages = num_pages; in fobj_init()
187 assert(page_idx < fobj->num_pages); in rwp_paged_iv_load_page()
200 assert(page_idx < fobj->num_pages); in rwp_paged_iv_save_page()
234 assert(page_idx < fobj->num_pages); in rwp_paged_iv_get_iv_vaddr()
298 assert(page_idx < fobj->num_pages); in rwp_unpaged_iv_load_page()
311 assert(page_idx < fobj->num_pages); in rwp_unpaged_iv_save_page()
403 assert(num_pages); in fobj_rw_paged_alloc()
687 assert(num_pages); in fobj_locked_paged_alloc()
711 assert(page_idx < fobj->num_pages); in lop_load_page()
[all …]
A Dfile.c97 ADD_OVERFLOW(page_offset, fse->slice.fobj->num_pages, &s)) { in file_add_slice()
189 page_offset < fs->page_offset + fs->fobj->num_pages) in file_find_slice()
A Dmobj.c584 m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE; in mobj_with_fobj_alloc()
/optee_os/core/include/mm/
A Dfobj.h24 unsigned int num_pages; member
62 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages);
73 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages);
87 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
106 struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes,
165 #define fobj_ta_mem_alloc(num_pages) fobj_rw_paged_alloc(num_pages) argument
173 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages);
175 #define fobj_ta_mem_alloc(num_pages) fobj_sec_mem_alloc(num_pages) argument
A Dmobj.h243 struct mobj_ffa *mobj_ffa_sel1_spmc_new(unsigned int num_pages);
249 unsigned int num_pages);
255 paddr_t pa, unsigned int num_pages);
260 struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
292 struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages,
298 size_t num_pages __unused, in mobj_mapped_shm_alloc()
A Dsp_mem.h83 paddr_t pa, unsigned int num_pages);
/optee_os/core/kernel/
A Dmsg_param.c63 size_t num_pages) in msg_param_extract_pages() argument
85 for (cnt = 0; cnt < num_pages; cnt++, va++) { in msg_param_extract_pages()
121 size_t num_pages = 0; in msg_param_mobj_from_noncontig() local
128 num_pages = (size_plus_offs - 1) / SMALL_PAGE_SIZE + 1; in msg_param_mobj_from_noncontig()
129 if (MUL_OVERFLOW(num_pages, sizeof(paddr_t), &msize)) in msg_param_mobj_from_noncontig()
137 pages, num_pages)) in msg_param_mobj_from_noncontig()
141 mobj = mobj_mapped_shm_alloc(pages, num_pages, page_offset, in msg_param_mobj_from_noncontig()
144 mobj = mobj_reg_shm_alloc(pages, num_pages, page_offset, in msg_param_mobj_from_noncontig()
A Dldelf_syscalls.c275 size_t num_pages = 0; in ldelf_syscall_map_bin() local
314 num_pages = num_rounded_bytes / SMALL_PAGE_SIZE; in ldelf_syscall_map_bin()
332 num_pages > fs->fobj->num_pages) { in ldelf_syscall_map_bin()
355 struct fobj *f = fobj_ta_mem_alloc(num_pages); in ldelf_syscall_map_bin()
/optee_os/core/arch/arm/mm/
A Dmobj_ffa.c52 static size_t shm_size(size_t num_pages) in shm_size() argument
56 if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s)) in shm_size()
63 static struct mobj_ffa *ffa_new(unsigned int num_pages) in ffa_new() argument
68 if (!num_pages) in ffa_new()
71 s = shm_size(num_pages); in ffa_new()
79 mf->mobj.size = num_pages * SMALL_PAGE_SIZE; in ffa_new()
93 mf = ffa_new(num_pages); in mobj_ffa_sel1_spmc_new()
196 unsigned int num_pages) in mobj_ffa_sel2_spmc_new() argument
201 mf = ffa_new(num_pages); in mobj_ffa_sel2_spmc_new()
214 paddr_t pa, unsigned int num_pages) in mobj_ffa_add_pages_at() argument
[all …]
A Dmobj_dyn_shm.c296 struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages, in mobj_reg_shm_alloc() argument
304 if (!num_pages || page_offset >= SMALL_PAGE_SIZE) in mobj_reg_shm_alloc()
307 s = mobj_reg_shm_size(num_pages); in mobj_reg_shm_alloc()
315 mobj_reg_shm->mobj.size = num_pages * SMALL_PAGE_SIZE - page_offset; in mobj_reg_shm_alloc()
321 memcpy(mobj_reg_shm->pages, pages, sizeof(*pages) * num_pages); in mobj_reg_shm_alloc()
324 for (i = 0; i < num_pages; i++) { in mobj_reg_shm_alloc()
433 struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages, in mobj_mapped_shm_alloc() argument
436 struct mobj *mobj = mobj_reg_shm_alloc(pages, num_pages, in mobj_mapped_shm_alloc()
A Dsp_mem.c34 static size_t mobj_sp_size(size_t num_pages) in mobj_sp_size() argument
38 if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s)) in mobj_sp_size()
73 paddr_t pa, unsigned int num_pages) in sp_mem_add_pages() argument
79 if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count) in sp_mem_add_pages()
82 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE)) in sp_mem_add_pages()
85 for (n = 0; n < num_pages; n++) in sp_mem_add_pages()
A Dtee_pager.c634 base, base + fobj->num_pages * SMALL_PAGE_SIZE, type); in tee_pager_add_core_region()
636 reg = alloc_region(base, fobj->num_pages * SMALL_PAGE_SIZE); in tee_pager_add_core_region()
742 size_t s = fobj->num_pages * SMALL_PAGE_SIZE; in pager_add_um_region()
2001 size_t num_pages = 0; in tee_pager_alloc() local
2012 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; in tee_pager_alloc()
2013 fobj = fobj_locked_paged_alloc(num_pages); in tee_pager_alloc()
2022 asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE); in tee_pager_alloc()
2034 mm = tee_mm_alloc(&tee_mm_vcore, fobj->num_pages * SMALL_PAGE_SIZE); in tee_pager_init_iv_region()
2042 asan_tag_access(smem, smem + fobj->num_pages * SMALL_PAGE_SIZE); in tee_pager_init_iv_region()
A Dcore_mmu.c1710 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages, in core_mmu_map_pages() argument
1733 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) in core_mmu_map_pages()
1739 for (i = 0; i < num_pages; i++) { in core_mmu_map_pages()
1788 size_t num_pages, in core_mmu_map_contiguous_pages() argument
1811 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) in core_mmu_map_contiguous_pages()
1817 for (i = 0; i < num_pages; i++) { in core_mmu_map_contiguous_pages()
1854 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages) in core_mmu_unmap_pages() argument
1865 if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1)) in core_mmu_unmap_pages()
1871 for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) { in core_mmu_unmap_pages()
/optee_os/scripts/
A Dmem_usage.py51 num_pages = 0
57 num_pages = (size - 1) / 4096 + 1
62 printf(' %d pages', num_pages)
/optee_os/core/arch/arm/include/mm/
A Dcore_mmu.h593 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
609 size_t num_pages,
617 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages);
/optee_os/core/arch/arm/kernel/
A Dthread_spmc.c1426 unsigned int num_regions, unsigned int num_pages, in set_pages() argument
1440 if (idx != num_pages) in set_pages()
1453 unsigned int num_pages = 0; in thread_spmc_populate_mobj_from_rx() local
1474 num_pages = READ_ONCE(descr->total_page_count); in thread_spmc_populate_mobj_from_rx()
1475 mf = mobj_ffa_sel2_spmc_new(cookie, num_pages); in thread_spmc_populate_mobj_from_rx()
1480 READ_ONCE(descr->address_range_count), num_pages, mf)) { in thread_spmc_populate_mobj_from_rx()
A Dboot.c381 const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; in ro_paged_alloc() local
387 return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs, in ro_paged_alloc()
390 return fobj_ro_paged_alloc(num_pages, hashes, store); in ro_paged_alloc()
A Dthread.c1008 size_t num_pages = 0; in init_thread_stacks() local
1020 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; in init_thread_stacks()
1021 fobj = fobj_locked_paged_alloc(num_pages); in init_thread_stacks()

Completed in 54 milliseconds