Lines Matching refs:n

138 #define GET_STACK_TOP_HARD(stack, n) \  argument
139 ((vaddr_t)&(stack)[n] + STACK_CANARY_SIZE / 2)
140 #define GET_STACK_TOP_SOFT(stack, n) \ argument
141 (GET_STACK_TOP_HARD(stack, n) + STACK_CHECK_EXTRA)
142 #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \ argument
181 size_t n; in init_canaries() local
183 for (n = 0; n < ARRAY_SIZE(name); n++) { \ in init_canaries()
184 uint32_t *start_canary = &GET_START_CANARY(name, n); \ in init_canaries()
185 uint32_t *end_canary = &GET_END_CANARY(name, n); \ in init_canaries()
199 #define CANARY_DIED(stack, loc, n, addr) \ argument
202 n, (void *)addr); \
210 size_t n = 0; in thread_check_canaries() local
212 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { in thread_check_canaries()
213 canary = &GET_START_CANARY(stack_tmp, n); in thread_check_canaries()
215 CANARY_DIED(stack_tmp, start, n, canary); in thread_check_canaries()
216 canary = &GET_END_CANARY(stack_tmp, n); in thread_check_canaries()
218 CANARY_DIED(stack_tmp, end, n, canary); in thread_check_canaries()
221 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { in thread_check_canaries()
222 canary = &GET_START_CANARY(stack_abt, n); in thread_check_canaries()
224 CANARY_DIED(stack_abt, start, n, canary); in thread_check_canaries()
225 canary = &GET_END_CANARY(stack_abt, n); in thread_check_canaries()
227 CANARY_DIED(stack_abt, end, n, canary); in thread_check_canaries()
231 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { in thread_check_canaries()
232 canary = &GET_START_CANARY(stack_thread, n); in thread_check_canaries()
234 CANARY_DIED(stack_thread, start, n, canary); in thread_check_canaries()
235 canary = &GET_END_CANARY(stack_thread, n); in thread_check_canaries()
237 CANARY_DIED(stack_thread, end, n, canary); in thread_check_canaries()
341 size_t n = 0; in print_stack_limits() local
345 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { in print_stack_limits()
346 start = GET_STACK_TOP_SOFT(stack_tmp, n); in print_stack_limits()
347 end = GET_STACK_BOTTOM(stack_tmp, n); in print_stack_limits()
348 DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); in print_stack_limits()
350 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { in print_stack_limits()
351 start = GET_STACK_TOP_SOFT(stack_abt, n); in print_stack_limits()
352 end = GET_STACK_BOTTOM(stack_abt, n); in print_stack_limits()
353 DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); in print_stack_limits()
355 for (n = 0; n < CFG_NUM_THREADS; n++) { in print_stack_limits()
356 end = threads[n].stack_va_end; in print_stack_limits()
358 DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); in print_stack_limits()
542 size_t n; in __thread_alloc_and_run() local
550 for (n = 0; n < CFG_NUM_THREADS; n++) { in __thread_alloc_and_run()
551 if (threads[n].state == THREAD_STATE_FREE) { in __thread_alloc_and_run()
552 threads[n].state = THREAD_STATE_ACTIVE; in __thread_alloc_and_run()
563 l->curr_thread = n; in __thread_alloc_and_run()
565 threads[n].flags = 0; in __thread_alloc_and_run()
566 init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc); in __thread_alloc_and_run()
571 thread_resume(&threads[n].regs); in __thread_alloc_and_run()
675 size_t n = thread_id; in thread_resume_from_rpc() local
683 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { in thread_resume_from_rpc()
684 threads[n].state = THREAD_STATE_ACTIVE; in thread_resume_from_rpc()
693 l->curr_thread = n; in thread_resume_from_rpc()
695 if (threads[n].have_user_map) { in thread_resume_from_rpc()
696 core_mmu_set_user_map(&threads[n].user_map); in thread_resume_from_rpc()
697 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) in thread_resume_from_rpc()
701 if (is_user_mode(&threads[n].regs)) in thread_resume_from_rpc()
708 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { in thread_resume_from_rpc()
709 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3); in thread_resume_from_rpc()
710 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; in thread_resume_from_rpc()
715 if (threads[n].have_user_map) in thread_resume_from_rpc()
719 thread_resume(&threads[n].regs); in thread_resume_from_rpc()
1000 size_t n = 0; in init_thread_stacks() local
1005 for (n = 0; n < CFG_NUM_THREADS; n++) { in init_thread_stacks()
1031 if (!thread_init_stack(n, sp)) in init_thread_stacks()
1038 size_t n; in init_thread_stacks() local
1041 for (n = 0; n < CFG_NUM_THREADS; n++) { in init_thread_stacks()
1042 if (!thread_init_stack(n, GET_STACK_BOTTOM(stack_thread, n))) in init_thread_stacks()
1075 size_t n = 0; in thread_init_threads() local
1083 for (n = 0; n < CFG_NUM_THREADS; n++) { in thread_init_threads()
1084 TAILQ_INIT(&threads[n].tsd.sess_stack); in thread_init_threads()
1085 SLIST_INIT(&threads[n].tsd.pgt_cache); in thread_init_threads()
1091 size_t n = 0; in thread_init_thread_core_local() local
1094 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { in thread_init_thread_core_local()
1095 tcl[n].curr_thread = THREAD_ID_INVALID; in thread_init_thread_core_local()
1096 tcl[n].flags = THREAD_CLF_TMP; in thread_init_thread_core_local()