Lines Matching refs:sregs
10350 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __get_sregs_common() argument
10357 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __get_sregs_common()
10358 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __get_sregs_common()
10359 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __get_sregs_common()
10360 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __get_sregs_common()
10361 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __get_sregs_common()
10362 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __get_sregs_common()
10364 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __get_sregs_common()
10365 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __get_sregs_common()
10368 sregs->idt.limit = dt.size; in __get_sregs_common()
10369 sregs->idt.base = dt.address; in __get_sregs_common()
10371 sregs->gdt.limit = dt.size; in __get_sregs_common()
10372 sregs->gdt.base = dt.address; in __get_sregs_common()
10374 sregs->cr2 = vcpu->arch.cr2; in __get_sregs_common()
10375 sregs->cr3 = kvm_read_cr3(vcpu); in __get_sregs_common()
10378 sregs->cr0 = kvm_read_cr0(vcpu); in __get_sregs_common()
10379 sregs->cr4 = kvm_read_cr4(vcpu); in __get_sregs_common()
10380 sregs->cr8 = kvm_get_cr8(vcpu); in __get_sregs_common()
10381 sregs->efer = vcpu->arch.efer; in __get_sregs_common()
10382 sregs->apic_base = kvm_get_apic_base(vcpu); in __get_sregs_common()
10385 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __get_sregs() argument
10387 __get_sregs_common(vcpu, sregs); in __get_sregs()
10394 (unsigned long *)sregs->interrupt_bitmap); in __get_sregs()
10414 struct kvm_sregs *sregs) in kvm_arch_vcpu_ioctl_get_sregs() argument
10417 __get_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_get_sregs()
10507 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in kvm_is_valid_sregs() argument
10509 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { in kvm_is_valid_sregs()
10515 if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) in kvm_is_valid_sregs()
10517 if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) in kvm_is_valid_sregs()
10524 if (sregs->efer & EFER_LMA || sregs->cs.l) in kvm_is_valid_sregs()
10528 return kvm_is_valid_cr4(vcpu, sregs->cr4); in kvm_is_valid_sregs()
10531 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, in __set_sregs_common() argument
10538 if (!kvm_is_valid_sregs(vcpu, sregs)) in __set_sregs_common()
10541 apic_base_msr.data = sregs->apic_base; in __set_sregs_common()
10549 dt.size = sregs->idt.limit; in __set_sregs_common()
10550 dt.address = sregs->idt.base; in __set_sregs_common()
10552 dt.size = sregs->gdt.limit; in __set_sregs_common()
10553 dt.address = sregs->gdt.base; in __set_sregs_common()
10556 vcpu->arch.cr2 = sregs->cr2; in __set_sregs_common()
10557 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; in __set_sregs_common()
10558 vcpu->arch.cr3 = sregs->cr3; in __set_sregs_common()
10561 kvm_set_cr8(vcpu, sregs->cr8); in __set_sregs_common()
10563 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in __set_sregs_common()
10564 static_call(kvm_x86_set_efer)(vcpu, sregs->efer); in __set_sregs_common()
10566 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; in __set_sregs_common()
10567 static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); in __set_sregs_common()
10568 vcpu->arch.cr0 = sregs->cr0; in __set_sregs_common()
10570 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; in __set_sregs_common()
10571 static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); in __set_sregs_common()
10582 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __set_sregs_common()
10583 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __set_sregs_common()
10584 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __set_sregs_common()
10585 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __set_sregs_common()
10586 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __set_sregs_common()
10587 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __set_sregs_common()
10589 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __set_sregs_common()
10590 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __set_sregs_common()
10596 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && in __set_sregs_common()
10603 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __set_sregs() argument
10607 int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true); in __set_sregs()
10617 (const unsigned long *)sregs->interrupt_bitmap, max_bits); in __set_sregs()
10660 struct kvm_sregs *sregs) in kvm_arch_vcpu_ioctl_set_sregs() argument
10665 ret = __set_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_set_sregs()
10828 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); in store_regs()
10842 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) in sync_regs()