/optee_os/core/arch/arm/kernel/ |
A D | kern.ld.S | 121 . = ALIGN(SMALL_PAGE_SIZE); 171 . = ALIGN(SMALL_PAGE_SIZE); 213 . = ALIGN(SMALL_PAGE_SIZE); 289 . = ALIGN(SMALL_PAGE_SIZE); 299 ASSERT(!(__flatmap_init_rx_start & (SMALL_PAGE_SIZE - 1)), 316 . = ALIGN(SMALL_PAGE_SIZE); 331 __init_end = ALIGN(__rodata_init_end, SMALL_PAGE_SIZE); 343 . = ALIGN(SMALL_PAGE_SIZE); 348 . = ALIGN(SMALL_PAGE_SIZE); 479 SMALL_PAGE_SIZE; [all …]
|
A D | boot.c | 322 stats.npages_all * SMALL_PAGE_SIZE / 1024); in print_pager_pool_size() 381 const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; in ro_paged_alloc() 402 size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) * in init_runtime() 411 assert(pageable_size % SMALL_PAGE_SIZE == 0); in init_runtime() 468 for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) { in init_runtime() 470 const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE; in init_runtime() 474 res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE); in init_runtime() 528 tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false); in init_runtime() 530 (pageable_size - init_size) / SMALL_PAGE_SIZE, in init_runtime() 534 SMALL_PAGE_SIZE, true); in init_runtime() [all …]
|
A D | stmm_sp.c | 70 static const unsigned int stmm_stack_size = 4 * SMALL_PAGE_SIZE; 71 static const unsigned int stmm_heap_size = 398 * SMALL_PAGE_SIZE; 72 static const unsigned int stmm_sec_buf_size = 4 * SMALL_PAGE_SIZE; 73 static const unsigned int stmm_ns_comm_buf_size = 4 * SMALL_PAGE_SIZE; 168 size_t num_pgs = ROUNDUP(sz, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE; in alloc_and_map_sp_fobj() 177 res = vm_map(&spc->uctx, va, num_pgs * SMALL_PAGE_SIZE, in alloc_and_map_sp_fobj() 233 SMALL_PAGE_SIZE); in load_stmm() 503 res = vm_get_prot(&spc->uctx, va, SMALL_PAGE_SIZE, &attrs); in sp_svc_get_mem_attr() 528 if (!va || !nr_pages || MUL_OVERFLOW(nr_pages, SMALL_PAGE_SIZE, &sz)) in sp_svc_set_mem_attr()
|
A D | ldelf_loader.c | 32 size_t num_pgs = ROUNDUP(sz, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE; in alloc_and_map_ldelf_fobj() 40 res = vm_map(uctx, va, num_pgs * SMALL_PAGE_SIZE, in alloc_and_map_ldelf_fobj() 75 rw_addr = ROUNDUP(code_addr + ldelf_code_size, SMALL_PAGE_SIZE); in ldelf_load_ldelf() 91 ROUNDUP(ldelf_code_size, SMALL_PAGE_SIZE), prot); in ldelf_load_ldelf()
|
A D | thread_optee_smc.c | 129 arg = mobj_get_va(mobj, 0, SMALL_PAGE_SIZE); in map_cmd_buffer() 138 if (args_size > SMALL_PAGE_SIZE) { in map_cmd_buffer() 299 !(pa & SMALL_PAGE_MASK) && sz <= SMALL_PAGE_SIZE) in rpc_shm_mobj_alloc() 609 if (IS_ENABLED(CFG_CORE_DYN_SHM) && size > SMALL_PAGE_SIZE) in thread_rpc_alloc_kernel_payload()
|
/optee_os/core/arch/arm/plat-aspeed/ |
A D | platform_ast2600.c | 48 SMALL_PAGE_SIZE); 52 SMALL_PAGE_SIZE); 56 SMALL_PAGE_SIZE); 60 SMALL_PAGE_SIZE); 78 MEM_AREA_IO_SEC, SMALL_PAGE_SIZE); in main_init_gic() 80 MEM_AREA_IO_SEC, SMALL_PAGE_SIZE); in main_init_gic() 110 MEM_AREA_IO_SEC, SMALL_PAGE_SIZE); in plat_primary_init_early()
|
/optee_os/core/mm/ |
A D | fobj.c | 94 memset(va, 0, SMALL_PAGE_SIZE); in rwp_load_page() 99 NULL, 0, src, SMALL_PAGE_SIZE, va, in rwp_load_page() 124 NULL, 0, va, SMALL_PAGE_SIZE, dst, in rwp_save_page() 137 return rwp_store_base + idx * SMALL_PAGE_SIZE; in idx_to_store() 152 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size)) in rwp_paged_iv_alloc() 265 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size)) in rwp_unpaged_iv_alloc() 377 SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE; in rwp_init() 473 memcpy(va, src, SMALL_PAGE_SIZE); in rop_load_page_helper() 576 pg_idx = r / SMALL_PAGE_SIZE; in init_rels() 582 rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE; in init_rels() [all …]
|
A D | mobj.c | 67 if (granule != SMALL_PAGE_SIZE && in mobj_phys_get_pa() 376 if (granule != SMALL_PAGE_SIZE && in mobj_shm_get_pa() 543 m->fobj = fobj_rw_paged_alloc(ROUNDUP(size, SMALL_PAGE_SIZE) / in mobj_seccpy_shm_alloc() 544 SMALL_PAGE_SIZE); in mobj_seccpy_shm_alloc() 584 m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE; in mobj_with_fobj_alloc() 585 m->mobj.phys_granule = SMALL_PAGE_SIZE; in mobj_with_fobj_alloc() 650 p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) + in mobj_with_fobj_get_pa() 651 offs % SMALL_PAGE_SIZE; in mobj_with_fobj_get_pa() 654 if (granule != SMALL_PAGE_SIZE && in mobj_with_fobj_get_pa()
|
A D | vm.c | 67 pad = SMALL_PAGE_SIZE; in select_va_in_range() 90 pad = SMALL_PAGE_SIZE; in select_va_in_range() 182 tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE, in rem_um_region() 227 if (offs_plus_size > ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE)) in umap_add_region() 230 granul = MAX(align, SMALL_PAGE_SIZE); in umap_add_region() 291 reg->size = ROUNDUP(len, SMALL_PAGE_SIZE); in vm_map_pad() 770 if (ROUNDUP_OVERFLOW(len, SMALL_PAGE_SIZE, &l)) in vm_unmap() 1034 reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE); in vm_add_rwmem() 1163 (size_t)SMALL_PAGE_SIZE); in tee_mmu_user_va2pa_attr()
|
/optee_os/core/arch/arm/mm/ |
A D | sp_mem.c | 59 m->mobj.size = pages * SMALL_PAGE_SIZE; in sp_mem_new_mobj() 60 m->mobj.phys_granule = SMALL_PAGE_SIZE; in sp_mem_new_mobj() 68 return ROUNDUP(ms->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE; in get_page_count() 82 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE)) in sp_mem_add_pages() 86 ms->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE; in sp_mem_add_pages() 121 p = ms->pages[offset / SMALL_PAGE_SIZE] + in get_pa() 124 case SMALL_PAGE_SIZE: in get_pa() 125 p = ms->pages[offset / SMALL_PAGE_SIZE]; in get_pa() 250 (new_reg->page_count * SMALL_PAGE_SIZE); in sp_mem_is_shared() 260 (reg->page_count * SMALL_PAGE_SIZE); in sp_mem_is_shared()
|
A D | tee_pager.c | 415 tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE); in tee_pager_set_alias_area() 455 .idx = (va & mask) / SMALL_PAGE_SIZE, in region_va2tblidx() 578 pager_alias_next_free += SMALL_PAGE_SIZE; in pager_add_alias_page() 742 size_t s = fobj->num_pages * SMALL_PAGE_SIZE; in pager_add_um_region() 1049 size_t s = ROUNDUP(size, SMALL_PAGE_SIZE); in tee_pager_rem_um_region() 1267 icache_inv_user_range(va, SMALL_PAGE_SIZE); in tee_pager_unhide_page() 1449 dcache_clean_range_pou(va, SMALL_PAGE_SIZE); in pager_deploy_page() 1451 icache_inv_user_range(va, SMALL_PAGE_SIZE); in pager_deploy_page() 1453 icache_inv_range(va, SMALL_PAGE_SIZE); in pager_deploy_page() 1576 asan_tag_access(va, va + SMALL_PAGE_SIZE); in pager_get_page() [all …]
|
A D | mobj_dyn_shm.c | 82 p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE] + in mobj_reg_shm_get_pa() 85 case SMALL_PAGE_SIZE: in mobj_reg_shm_get_pa() 86 p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE]; in mobj_reg_shm_get_pa() 207 sz = ROUNDUP(mobj->size + r->page_offset, SMALL_PAGE_SIZE); in mobj_reg_shm_inc_map() 215 sz / SMALL_PAGE_SIZE, in mobj_reg_shm_inc_map() 304 if (!num_pages || page_offset >= SMALL_PAGE_SIZE) in mobj_reg_shm_alloc() 315 mobj_reg_shm->mobj.size = num_pages * SMALL_PAGE_SIZE - page_offset; in mobj_reg_shm_alloc() 316 mobj_reg_shm->mobj.phys_granule = SMALL_PAGE_SIZE; in mobj_reg_shm_alloc() 330 SMALL_PAGE_SIZE)) in mobj_reg_shm_alloc()
|
A D | mobj_ffa.c | 79 mf->mobj.size = num_pages * SMALL_PAGE_SIZE; in ffa_new() 80 mf->mobj.phys_granule = SMALL_PAGE_SIZE; in ffa_new() 121 return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE; in get_page_count() 222 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE)) in mobj_ffa_add_pages_at() 226 mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE; in mobj_ffa_add_pages_at() 359 if (internal_offs >= SMALL_PAGE_SIZE) 450 p = mf->pages[full_offset / SMALL_PAGE_SIZE] + 453 case SMALL_PAGE_SIZE: 454 p = mf->pages[full_offset / SMALL_PAGE_SIZE]; 557 sz = ROUNDUP(mobj->size + mf->page_offset, SMALL_PAGE_SIZE); [all …]
|
A D | pgt_cache.c | 83 COMPILE_TIME_ASSERT(PGT_SIZE * PGT_NUM_PGT_PER_PAGE == SMALL_PAGE_SIZE); in pgt_init() 86 uint8_t *tbl = tee_pager_alloc(SMALL_PAGE_SIZE); in pgt_init() 165 tee_pager_release_phys((void *)va, SMALL_PAGE_SIZE); in push_to_free_list() 339 if (!core_is_buffer_inside(p->vabase, SMALL_PAGE_SIZE, begin, in pgt_entry_matches() 453 idx = (b - p->vabase) / SMALL_PAGE_SIZE; in clear_ctx_range_from_list() 454 n = (e - b) / SMALL_PAGE_SIZE; in clear_ctx_range_from_list()
|
A D | core_mmu.c | 829 mmap[pos].region_size = SMALL_PAGE_SIZE; in add_pager_vaspace() 893 map->region_size = SMALL_PAGE_SIZE; in assign_mem_granularity() 898 map->region_size = SMALL_PAGE_SIZE; in assign_mem_granularity() 1064 vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE); in mem_map_add_id_map() 1083 .region_size = SMALL_PAGE_SIZE, in mem_map_add_id_map() 1234 SMALL_PAGE_SIZE); in core_init_mmu_map() 1248 .region_size = SMALL_PAGE_SIZE, in core_init_mmu_map() 1617 r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE); in set_pg_region() 1766 vaddr += SMALL_PAGE_SIZE; in core_mmu_map_pages() 1839 paddr += SMALL_PAGE_SIZE; in core_mmu_map_contiguous_pages() [all …]
|
A D | core_mmu_lpae.c | 425 return ROUNDUP(sizeof(base_xlation_table), SMALL_PAGE_SIZE) + in core_mmu_get_total_pages_size() 435 assert(((vaddr_t)tbl) % SMALL_PAGE_SIZE == 0); in core_alloc_mmu_prtn() 447 COMPILE_TIME_ASSERT(sizeof(base_xlation_table) <= SMALL_PAGE_SIZE); in core_alloc_mmu_prtn() 448 memset(prtn->base_tables, 0, SMALL_PAGE_SIZE); in core_alloc_mmu_prtn() 449 tbl += ROUNDUP(sizeof(base_xlation_table), SMALL_PAGE_SIZE); in core_alloc_mmu_prtn() 454 assert(((vaddr_t)tbl) % SMALL_PAGE_SIZE == 0); in core_alloc_mmu_prtn()
|
A D | core_mmu_v7.c | 269 uint32_t to_alloc = ROUNDUP(size, NUM_L2_ENTRIES * SMALL_PAGE_SIZE) / in core_mmu_alloc_l2() 270 (NUM_L2_ENTRIES * SMALL_PAGE_SIZE); in core_mmu_alloc_l2() 600 NUM_L2_ENTRIES * SMALL_PAGE_SIZE); in core_mmu_entry_to_finer_grained() 613 for (i = 0; i < NUM_L2_ENTRIES; i++, pa += SMALL_PAGE_SIZE) in core_mmu_entry_to_finer_grained()
|
/optee_os/core/kernel/ |
A D | msg_param.c | 82 va = mobj_get_va(mobj, 0, SMALL_PAGE_SIZE); in msg_param_extract_pages() 101 va = mobj_get_va(mobj, 0, SMALL_PAGE_SIZE); in msg_param_extract_pages() 128 num_pages = (size_plus_offs - 1) / SMALL_PAGE_SIZE + 1; in msg_param_mobj_from_noncontig()
|
A D | ldelf_syscalls.c | 46 f = fobj_ta_mem_alloc(ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE)); in ldelf_syscall_map_zi() 65 size_t sz = ROUNDUP(num_bytes, SMALL_PAGE_SIZE); in ldelf_syscall_unmap() 312 if (ROUNDUP_OVERFLOW(num_bytes, SMALL_PAGE_SIZE, &num_rounded_bytes)) in ldelf_syscall_map_bin() 314 num_pages = num_rounded_bytes / SMALL_PAGE_SIZE; in ldelf_syscall_map_bin() 454 size_t sz = ROUNDUP(num_bytes, SMALL_PAGE_SIZE); in ldelf_syscall_set_prot()
|
/optee_os/core/arch/arm/include/mm/ |
A D | generic_ram_layout.h | 162 SMALL_PAGE_SIZE) 167 SMALL_PAGE_SIZE) - TA_RAM_START)
|
/optee_os/core/drivers/crypto/caam/utils/ |
A D | utils_mem.c | 253 if (buf->length > SMALL_PAGE_SIZE) { in caam_mem_get_pa_area() 254 nb_pa_area = buf->length / SMALL_PAGE_SIZE + 1; in caam_mem_get_pa_area() 255 if (buf->length % SMALL_PAGE_SIZE) in caam_mem_get_pa_area() 289 MIN(SMALL_PAGE_SIZE - (va & SMALL_PAGE_MASK), len); in caam_mem_get_pa_area()
|
/optee_os/ldelf/ |
A D | ta_elf.c | 454 elf->max_offs = SMALL_PAGE_SIZE; in init_elf() 470 if (sz > SMALL_PAGE_SIZE) in init_elf() 478 return ROUNDUP(v, SMALL_PAGE_SIZE); in roundup() 483 return ROUNDDOWN(v, SMALL_PAGE_SIZE); in rounddown() 722 return min * SMALL_PAGE_SIZE; in get_pad_begin() 727 return (min + rnd) * SMALL_PAGE_SIZE; in get_pad_begin() 790 SMALL_PAGE_SIZE; in populate_segments() 801 vaddr += SMALL_PAGE_SIZE; in populate_segments() 802 filesz -= SMALL_PAGE_SIZE; in populate_segments() 803 memsz -= SMALL_PAGE_SIZE; in populate_segments() [all …]
|
A D | sys.h | 26 #define SMALL_PAGE_SIZE 0x00001000 macro
|
/optee_os/core/arch/arm/plat-rcar/ |
A D | main.c | 40 register_phys_mem_pgdir(MEM_AREA_IO_SEC, PRR_BASE, SMALL_PAGE_SIZE);
|
/optee_os/core/arch/arm/plat-marvell/ |
A D | main.c | 67 SMALL_PAGE_SIZE);
|