Lines Matching refs:thread

44 	struct thread_struct *thread = &task->thread;  in update_cr_regs()  local
58 if (task->thread.per_flags & PER_FLAG_NO_TE) in update_cr_regs()
62 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { in update_cr_regs()
63 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) in update_cr_regs()
72 if (task->thread.gs_cb) in update_cr_regs()
83 new.control = thread->per_user.control; in update_cr_regs()
84 new.start = thread->per_user.start; in update_cr_regs()
85 new.end = thread->per_user.end; in update_cr_regs()
138 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); in ptrace_disable()
139 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); in ptrace_disable()
142 task->thread.per_flags = 0; in ptrace_disable()
155 PER_EVENT_IFETCH : child->thread.per_user.control; in __peek_user_per()
159 0 : child->thread.per_user.start; in __peek_user_per()
163 -1UL : child->thread.per_user.end; in __peek_user_per()
170 return child->thread.per_user.start; in __peek_user_per()
173 return child->thread.per_user.end; in __peek_user_per()
177 child->thread.per_event.cause << (BITS_PER_LONG - 16); in __peek_user_per()
180 return child->thread.per_event.address; in __peek_user_per()
184 child->thread.per_event.paid << (BITS_PER_LONG - 8); in __peek_user_per()
224 tmp = ((unsigned long) child->thread.acrs[15]) << 32; in __peek_user()
226 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); in __peek_user()
245 tmp = child->thread.fpu.fpc; in __peek_user()
256 ((addr_t) child->thread.fpu.vxrs + 2*offset); in __peek_user()
259 ((addr_t) child->thread.fpu.fprs + offset); in __peek_user()
313 child->thread.per_user.control = in __poke_user_per()
317 child->thread.per_user.start = data; in __poke_user_per()
320 child->thread.per_user.end = data; in __poke_user_per()
374 child->thread.acrs[15] = (unsigned int) (data >> 32); in __poke_user()
376 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; in __poke_user()
398 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32); in __poke_user()
408 child->thread.fpu.vxrs + 2*offset) = data; in __poke_user()
411 child->thread.fpu.fprs + offset) = data; in __poke_user()
484 put_user(child->thread.last_break, in arch_ptrace()
490 child->thread.per_flags &= ~PER_FLAG_NO_TE; in arch_ptrace()
495 child->thread.per_flags |= PER_FLAG_NO_TE; in arch_ptrace()
496 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
499 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE)) in arch_ptrace()
503 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
506 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
507 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND; in arch_ptrace()
510 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
511 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND; in arch_ptrace()
548 PER_EVENT_IFETCH : child->thread.per_user.control; in __peek_user_per_compat()
552 0 : child->thread.per_user.start; in __peek_user_per_compat()
556 PSW32_ADDR_INSN : child->thread.per_user.end; in __peek_user_per_compat()
563 return (__u32) child->thread.per_user.start; in __peek_user_per_compat()
566 return (__u32) child->thread.per_user.end; in __peek_user_per_compat()
569 return (__u32) child->thread.per_event.cause << 16; in __peek_user_per_compat()
572 return (__u32) child->thread.per_event.address; in __peek_user_per_compat()
575 return (__u32) child->thread.per_event.paid << 24; in __peek_user_per_compat()
611 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset); in __peek_user_compat()
630 tmp = child->thread.fpu.fpc; in __peek_user_compat()
640 ((addr_t) child->thread.fpu.vxrs + 2*offset); in __peek_user_compat()
643 ((addr_t) child->thread.fpu.fprs + offset); in __peek_user_compat()
680 child->thread.per_user.control = in __poke_user_per_compat()
684 child->thread.per_user.start = data; in __poke_user_per_compat()
687 child->thread.per_user.end = data; in __poke_user_per_compat()
740 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; in __poke_user_compat()
761 child->thread.fpu.fpc = data; in __poke_user_compat()
771 child->thread.fpu.vxrs + 2*offset) = tmp; in __poke_user_compat()
774 child->thread.fpu.fprs + offset) = tmp; in __poke_user_compat()
840 put_user(child->thread.last_break, in compat_arch_ptrace()
858 save_access_regs(target->thread.acrs); in s390_regs_get()
873 save_access_regs(target->thread.acrs); in s390_regs_set()
896 restore_access_regs(target->thread.acrs); in s390_regs_set()
910 fp_regs.fpc = target->thread.fpu.fpc; in s390_fpregs_get()
911 fpregs_store(&fp_regs, &target->thread.fpu); in s390_fpregs_get()
928 convert_vx_to_fp(fprs, target->thread.fpu.vxrs); in s390_fpregs_set()
930 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs)); in s390_fpregs_set()
934 u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; in s390_fpregs_set()
941 target->thread.fpu.fpc = ufpc[0]; in s390_fpregs_set()
951 convert_fp_to_vx(target->thread.fpu.vxrs, fprs); in s390_fpregs_set()
953 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); in s390_fpregs_set()
962 return membuf_store(&to, target->thread.last_break); in s390_last_break_get()
982 size = sizeof(target->thread.trap_tdb.data); in s390_tdb_get()
983 return membuf_write(&to, target->thread.trap_tdb.data, size); in s390_tdb_get()
1006 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); in s390_vxrs_low_get()
1024 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); in s390_vxrs_low_set()
1029 *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i]; in s390_vxrs_low_set()
1042 return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW, in s390_vxrs_high_get()
1059 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); in s390_vxrs_high_set()
1067 return membuf_store(&to, target->thread.system_call); in s390_system_call_get()
1075 unsigned int *data = &target->thread.system_call; in s390_system_call_set()
1084 struct gs_cb *data = target->thread.gs_cb; in s390_gs_cb_get()
1105 if (!target->thread.gs_cb) { in s390_gs_cb_set()
1110 if (!target->thread.gs_cb) in s390_gs_cb_set()
1115 gs_cb = *target->thread.gs_cb; in s390_gs_cb_set()
1123 if (!target->thread.gs_cb) in s390_gs_cb_set()
1124 target->thread.gs_cb = data; in s390_gs_cb_set()
1125 *target->thread.gs_cb = gs_cb; in s390_gs_cb_set()
1128 restore_gs_cb(target->thread.gs_cb); in s390_gs_cb_set()
1138 struct gs_cb *data = target->thread.gs_bc_cb; in s390_gs_bc_get()
1152 struct gs_cb *data = target->thread.gs_bc_cb; in s390_gs_bc_set()
1160 target->thread.gs_bc_cb = data; in s390_gs_bc_set()
1196 struct runtime_instr_cb *data = target->thread.ri_cb; in s390_runtime_instr_get()
1217 if (!target->thread.ri_cb) { in s390_runtime_instr_set()
1223 if (target->thread.ri_cb) { in s390_runtime_instr_set()
1227 ri_cb = *target->thread.ri_cb; in s390_runtime_instr_set()
1247 if (!target->thread.ri_cb) in s390_runtime_instr_set()
1248 target->thread.ri_cb = data; in s390_runtime_instr_set()
1249 *target->thread.ri_cb = ri_cb; in s390_runtime_instr_set()
1251 load_runtime_instr_cb(target->thread.ri_cb); in s390_runtime_instr_set()
1355 save_access_regs(target->thread.acrs); in s390_compat_regs_get()
1370 save_access_regs(target->thread.acrs); in s390_compat_regs_set()
1393 restore_access_regs(target->thread.acrs); in s390_compat_regs_set()
1448 compat_ulong_t last_break = target->thread.last_break; in s390_compat_last_break_get()