Lines Matching refs:kvm
263 static int sca_switch_to_extended(struct kvm *kvm);
296 struct kvm *kvm; in kvm_clock_sync() local
301 list_for_each_entry(kvm, &vm_list, vm_list) { in kvm_clock_sync()
302 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_clock_sync()
305 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
306 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
533 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) in kvm_vm_ioctl_check_extension() argument
574 if (hpage && !kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_check_extension()
615 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument
620 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
636 mark_page_dirty(kvm, cur_gfn + i); in kvm_arch_sync_dirty_log()
651 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_get_dirty_log() argument
659 if (kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_get_dirty_log()
662 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
668 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); in kvm_vm_ioctl_get_dirty_log()
679 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
683 static void icpt_operexc_on_all_vcpus(struct kvm *kvm) in icpt_operexc_on_all_vcpus() argument
688 kvm_for_each_vcpu(i, vcpu, kvm) { in icpt_operexc_on_all_vcpus()
693 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) in kvm_vm_ioctl_enable_cap() argument
702 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP"); in kvm_vm_ioctl_enable_cap()
703 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
707 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP"); in kvm_vm_ioctl_enable_cap()
708 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
712 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
713 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
716 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
717 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
719 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
720 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
723 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
724 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
727 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
728 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
731 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
732 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
735 set_kvm_facility(kvm->arch.model.fac_mask, 192); in kvm_vm_ioctl_enable_cap()
736 set_kvm_facility(kvm->arch.model.fac_list, 192); in kvm_vm_ioctl_enable_cap()
741 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
742 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", in kvm_vm_ioctl_enable_cap()
747 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
748 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
751 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
752 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
755 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
756 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s", in kvm_vm_ioctl_enable_cap()
760 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
761 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
764 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
765 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
768 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
769 VM_EVENT(kvm, 3, "ENABLE: AIS %s", in kvm_vm_ioctl_enable_cap()
774 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
775 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
778 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
779 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
782 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
783 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s", in kvm_vm_ioctl_enable_cap()
787 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
788 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
790 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
794 mmap_write_lock(kvm->mm); in kvm_vm_ioctl_enable_cap()
795 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
796 mmap_write_unlock(kvm->mm); in kvm_vm_ioctl_enable_cap()
802 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
803 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
805 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
806 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s", in kvm_vm_ioctl_enable_cap()
810 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI"); in kvm_vm_ioctl_enable_cap()
811 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
815 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0"); in kvm_vm_ioctl_enable_cap()
816 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
817 icpt_operexc_on_all_vcpus(kvm); in kvm_vm_ioctl_enable_cap()
827 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_mem_control() argument
834 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", in kvm_s390_get_mem_control()
835 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
836 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
846 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_mem_control() argument
856 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support"); in kvm_s390_set_mem_control()
857 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
858 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
860 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
863 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
865 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
868 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
875 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
878 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states"); in kvm_s390_set_mem_control()
879 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
880 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
881 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
882 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
883 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
889 if (kvm_is_ucontrol(kvm)) in kvm_s390_set_mem_control()
895 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
896 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
907 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
908 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
915 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
916 new->private = kvm; in kvm_s390_set_mem_control()
917 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
921 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
922 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit); in kvm_s390_set_mem_control()
923 VM_EVENT(kvm, 3, "New guest asce: 0x%pK", in kvm_s390_set_mem_control()
924 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
936 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) in kvm_s390_vcpu_crypto_reset_all() argument
941 kvm_s390_vcpu_block_all(kvm); in kvm_s390_vcpu_crypto_reset_all()
943 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vcpu_crypto_reset_all()
949 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_vcpu_crypto_reset_all()
952 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_crypto() argument
954 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
957 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
958 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
962 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
963 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
964 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
965 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); in kvm_s390_vm_set_crypto()
968 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
969 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
973 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
974 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
975 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
976 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); in kvm_s390_vm_set_crypto()
979 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
980 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
983 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
984 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
985 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
986 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); in kvm_s390_vm_set_crypto()
989 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
990 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
993 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
994 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
995 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
996 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support"); in kvm_s390_vm_set_crypto()
1000 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1003 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
1007 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1010 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
1013 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1017 kvm_s390_vcpu_crypto_reset_all(kvm); in kvm_s390_vm_set_crypto()
1018 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1022 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) in kvm_s390_sync_request_broadcast() argument
1027 kvm_for_each_vcpu(cx, vcpu, kvm) in kvm_s390_sync_request_broadcast()
1035 static int kvm_s390_vm_start_migration(struct kvm *kvm) in kvm_s390_vm_start_migration() argument
1043 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1045 slots = kvm_memslots(kvm); in kvm_s390_vm_start_migration()
1049 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1050 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1067 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1068 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1069 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION); in kvm_s390_vm_start_migration()
1077 static int kvm_s390_vm_stop_migration(struct kvm *kvm) in kvm_s390_vm_stop_migration() argument
1080 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1082 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1083 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1084 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); in kvm_s390_vm_stop_migration()
1088 static int kvm_s390_vm_set_migration(struct kvm *kvm, in kvm_s390_vm_set_migration() argument
1093 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1096 res = kvm_s390_vm_start_migration(kvm); in kvm_s390_vm_set_migration()
1099 res = kvm_s390_vm_stop_migration(kvm); in kvm_s390_vm_set_migration()
1104 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1109 static int kvm_s390_vm_get_migration(struct kvm *kvm, in kvm_s390_vm_get_migration() argument
1112 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1122 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_ext() argument
1129 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) in kvm_s390_set_tod_ext()
1131 kvm_s390_set_tod_clock(kvm, >od); in kvm_s390_set_tod_ext()
1133 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_set_tod_ext()
1139 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_high() argument
1149 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high); in kvm_s390_set_tod_high()
1154 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_low() argument
1162 kvm_s390_set_tod_clock(kvm, >od); in kvm_s390_set_tod_low()
1163 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); in kvm_s390_set_tod_low()
1167 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod() argument
1176 ret = kvm_s390_set_tod_ext(kvm, attr); in kvm_s390_set_tod()
1179 ret = kvm_s390_set_tod_high(kvm, attr); in kvm_s390_set_tod()
1182 ret = kvm_s390_set_tod_low(kvm, attr); in kvm_s390_set_tod()
1191 static void kvm_s390_get_tod_clock(struct kvm *kvm, in kvm_s390_get_tod_clock() argument
1200 gtod->tod = clk.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1202 if (test_kvm_facility(kvm, 139)) { in kvm_s390_get_tod_clock()
1203 gtod->epoch_idx = clk.ei + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1211 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_ext() argument
1216 kvm_s390_get_tod_clock(kvm, >od); in kvm_s390_get_tod_ext()
1220 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_get_tod_ext()
1225 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_high() argument
1232 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); in kvm_s390_get_tod_high()
1237 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_low() argument
1241 gtod = kvm_s390_get_tod_clock_fast(kvm); in kvm_s390_get_tod_low()
1244 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); in kvm_s390_get_tod_low()
1249 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod() argument
1258 ret = kvm_s390_get_tod_ext(kvm, attr); in kvm_s390_get_tod()
1261 ret = kvm_s390_get_tod_high(kvm, attr); in kvm_s390_get_tod()
1264 ret = kvm_s390_get_tod_low(kvm, attr); in kvm_s390_get_tod()
1273 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_processor() argument
1279 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1280 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1291 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1296 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1298 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1300 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1302 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1304 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx", in kvm_s390_set_processor()
1305 kvm->arch.model.ibc, in kvm_s390_set_processor()
1306 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1307 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_set_processor()
1308 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1309 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1310 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1315 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1319 static int kvm_s390_set_processor_feat(struct kvm *kvm, in kvm_s390_set_processor_feat() argument
1331 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1332 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1333 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1336 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat, in kvm_s390_set_processor_feat()
1338 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1339 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_set_processor_feat()
1346 static int kvm_s390_set_processor_subfunc(struct kvm *kvm, in kvm_s390_set_processor_subfunc() argument
1349 mutex_lock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1350 if (kvm->created_vcpus) { in kvm_s390_set_processor_subfunc()
1351 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1355 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1357 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1360 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1362 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1363 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1364 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1365 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1366 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1367 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1368 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1369 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1370 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1371 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1372 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1373 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1374 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1375 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1376 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1377 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1378 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1379 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1380 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1381 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1382 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1383 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1384 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1385 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1386 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1387 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1388 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1389 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1390 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1391 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1392 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1393 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1394 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1395 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1396 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1397 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1398 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1399 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1400 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1401 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1402 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1403 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1404 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1405 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1406 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1407 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1408 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1409 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1410 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1411 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1412 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1413 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1414 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1415 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1416 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1417 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1418 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1423 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_cpu_model() argument
1429 ret = kvm_s390_set_processor(kvm, attr); in kvm_s390_set_cpu_model()
1432 ret = kvm_s390_set_processor_feat(kvm, attr); in kvm_s390_set_cpu_model()
1435 ret = kvm_s390_set_processor_subfunc(kvm, attr); in kvm_s390_set_cpu_model()
1441 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_processor() argument
1451 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1452 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1453 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1455 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx", in kvm_s390_get_processor()
1456 kvm->arch.model.ibc, in kvm_s390_get_processor()
1457 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1458 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_processor()
1459 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1460 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1461 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1469 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_machine() argument
1481 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1485 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx", in kvm_s390_get_machine()
1486 kvm->arch.model.ibc, in kvm_s390_get_machine()
1487 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1488 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_machine()
1492 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_machine()
1503 static int kvm_s390_get_processor_feat(struct kvm *kvm, in kvm_s390_get_processor_feat() argument
1508 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat, in kvm_s390_get_processor_feat()
1512 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_get_processor_feat()
1519 static int kvm_s390_get_machine_feat(struct kvm *kvm, in kvm_s390_get_machine_feat() argument
1529 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_get_machine_feat()
1536 static int kvm_s390_get_processor_subfunc(struct kvm *kvm, in kvm_s390_get_processor_subfunc() argument
1539 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1543 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1544 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1545 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1546 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1547 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1548 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1549 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1550 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1551 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1552 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1553 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1554 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1555 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1556 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1557 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1558 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1559 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1560 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1561 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1562 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1563 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1564 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1565 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1566 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1567 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1568 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1569 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1570 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1571 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1572 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1573 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1574 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1575 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1576 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1577 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1578 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1579 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1580 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1581 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1582 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1583 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1584 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1585 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1586 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1587 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1588 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1589 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1590 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1591 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1592 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1593 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1594 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1595 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1596 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1597 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1598 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1599 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
1604 static int kvm_s390_get_machine_subfunc(struct kvm *kvm, in kvm_s390_get_machine_subfunc() argument
1611 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1616 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1619 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1622 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1625 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1628 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1631 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1634 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1637 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1640 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1643 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1646 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1649 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1652 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1655 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1658 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1663 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1672 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_cpu_model() argument
1678 ret = kvm_s390_get_processor(kvm, attr); in kvm_s390_get_cpu_model()
1681 ret = kvm_s390_get_machine(kvm, attr); in kvm_s390_get_cpu_model()
1684 ret = kvm_s390_get_processor_feat(kvm, attr); in kvm_s390_get_cpu_model()
1687 ret = kvm_s390_get_machine_feat(kvm, attr); in kvm_s390_get_cpu_model()
1690 ret = kvm_s390_get_processor_subfunc(kvm, attr); in kvm_s390_get_cpu_model()
1693 ret = kvm_s390_get_machine_subfunc(kvm, attr); in kvm_s390_get_cpu_model()
1699 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_attr() argument
1705 ret = kvm_s390_set_mem_control(kvm, attr); in kvm_s390_vm_set_attr()
1708 ret = kvm_s390_set_tod(kvm, attr); in kvm_s390_vm_set_attr()
1711 ret = kvm_s390_set_cpu_model(kvm, attr); in kvm_s390_vm_set_attr()
1714 ret = kvm_s390_vm_set_crypto(kvm, attr); in kvm_s390_vm_set_attr()
1717 ret = kvm_s390_vm_set_migration(kvm, attr); in kvm_s390_vm_set_attr()
1727 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_get_attr() argument
1733 ret = kvm_s390_get_mem_control(kvm, attr); in kvm_s390_vm_get_attr()
1736 ret = kvm_s390_get_tod(kvm, attr); in kvm_s390_vm_get_attr()
1739 ret = kvm_s390_get_cpu_model(kvm, attr); in kvm_s390_vm_get_attr()
1742 ret = kvm_s390_vm_get_migration(kvm, attr); in kvm_s390_vm_get_attr()
1752 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_has_attr() argument
1825 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_get_skeys() argument
1847 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
1849 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
1859 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
1873 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_set_skeys() argument
1905 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
1908 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
1930 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
1981 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, in kvm_s390_peek_cmma() argument
1988 hva = gfn_to_hva(kvm, cur_gfn); in kvm_s390_peek_cmma()
1995 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
2029 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, in kvm_s390_get_cmma() argument
2033 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_s390_get_cmma()
2040 ms = gfn_to_memslot(kvm, cur_gfn); in kvm_s390_get_cmma()
2049 hva = gfn_to_hva(kvm, cur_gfn); in kvm_s390_get_cmma()
2054 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2055 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
2072 ms = gfn_to_memslot(kvm, cur_gfn); in kvm_s390_get_cmma()
2088 static int kvm_s390_get_cmma_bits(struct kvm *kvm, in kvm_s390_get_cmma_bits() argument
2095 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2102 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2106 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
2111 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2120 mmap_read_lock(kvm->mm); in kvm_s390_get_cmma_bits()
2121 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
2123 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize); in kvm_s390_get_cmma_bits()
2125 ret = kvm_s390_get_cmma(kvm, args, values, bufsize); in kvm_s390_get_cmma_bits()
2126 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
2127 mmap_read_unlock(kvm->mm); in kvm_s390_get_cmma_bits()
2129 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2130 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2146 static int kvm_s390_set_cmma_bits(struct kvm *kvm, in kvm_s390_set_cmma_bits() argument
2155 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2177 mmap_read_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2178 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
2180 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
2189 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
2191 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
2192 mmap_read_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2194 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
2195 mmap_write_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2196 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
2197 mmap_write_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2204 static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp) in kvm_s390_cpus_from_pv() argument
2219 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_cpus_from_pv()
2231 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_cpus_to_pv() argument
2238 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_cpus_to_pv()
2246 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy); in kvm_s390_cpus_to_pv()
2250 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) in kvm_s390_handle_pv() argument
2259 if (kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2266 r = sca_switch_to_extended(kvm); in kvm_s390_handle_pv()
2276 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2280 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2282 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy); in kvm_s390_handle_pv()
2285 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2290 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2293 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2301 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2304 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2312 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2332 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length, in kvm_s390_handle_pv()
2342 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) in kvm_s390_handle_pv()
2349 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak, in kvm_s390_handle_pv()
2355 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2358 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2360 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, in kvm_s390_handle_pv()
2366 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2369 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2371 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x", in kvm_s390_handle_pv()
2377 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2380 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2382 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x", in kvm_s390_handle_pv()
2395 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl() local
2407 r = kvm_s390_inject_vm(kvm, &s390int); in kvm_arch_vm_ioctl()
2414 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
2417 r = kvm_set_irq_routing(kvm, &routing, 0, 0); in kvm_arch_vm_ioctl()
2425 r = kvm_s390_vm_set_attr(kvm, &attr); in kvm_arch_vm_ioctl()
2432 r = kvm_s390_vm_get_attr(kvm, &attr); in kvm_arch_vm_ioctl()
2439 r = kvm_s390_vm_has_attr(kvm, &attr); in kvm_arch_vm_ioctl()
2449 r = kvm_s390_get_skeys(kvm, &args); in kvm_arch_vm_ioctl()
2459 r = kvm_s390_set_skeys(kvm, &args); in kvm_arch_vm_ioctl()
2468 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2469 r = kvm_s390_get_cmma_bits(kvm, &args); in kvm_arch_vm_ioctl()
2470 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2484 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2485 r = kvm_s390_set_cmma_bits(kvm, &args); in kvm_arch_vm_ioctl()
2486 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2493 kvm_s390_set_user_cpu_state_ctrl(kvm); in kvm_arch_vm_ioctl()
2507 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
2508 r = kvm_s390_handle_pv(kvm, &args); in kvm_arch_vm_ioctl()
2509 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
2543 static void kvm_s390_set_crycb_format(struct kvm *kvm) in kvm_s390_set_crycb_format() argument
2545 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
2548 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
2551 if (!test_kvm_facility(kvm, 76)) in kvm_s390_set_crycb_format()
2555 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
2557 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
2575 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, in kvm_arch_crypto_set_masks() argument
2578 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
2580 kvm_s390_vcpu_block_all(kvm); in kvm_arch_crypto_set_masks()
2582 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
2585 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
2588 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
2591 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
2599 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x", in kvm_arch_crypto_set_masks()
2608 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); in kvm_arch_crypto_set_masks()
2609 kvm_s390_vcpu_unblock_all(kvm); in kvm_arch_crypto_set_masks()
2625 void kvm_arch_crypto_clear_masks(struct kvm *kvm) in kvm_arch_crypto_clear_masks() argument
2627 kvm_s390_vcpu_block_all(kvm); in kvm_arch_crypto_clear_masks()
2629 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
2630 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
2631 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
2632 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
2634 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:"); in kvm_arch_crypto_clear_masks()
2636 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); in kvm_arch_crypto_clear_masks()
2637 kvm_s390_vcpu_unblock_all(kvm); in kvm_arch_crypto_clear_masks()
2650 static void kvm_s390_crypto_init(struct kvm *kvm) in kvm_s390_crypto_init() argument
2652 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
2653 kvm_s390_set_crycb_format(kvm); in kvm_s390_crypto_init()
2654 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); in kvm_s390_crypto_init()
2656 if (!test_kvm_facility(kvm, 76)) in kvm_s390_crypto_init()
2660 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
2661 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
2662 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
2663 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
2664 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
2665 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
2668 static void sca_dispose(struct kvm *kvm) in sca_dispose() argument
2670 if (kvm->arch.use_esca) in sca_dispose()
2671 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
2673 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
2674 kvm->arch.sca = NULL; in sca_dispose()
2677 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument
2703 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
2705 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
2706 if (!kvm->arch.sca) in kvm_arch_init_vm()
2712 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
2713 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
2718 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
2719 if (!kvm->arch.dbf) in kvm_arch_init_vm()
2723 kvm->arch.sie_page2 = in kvm_arch_init_vm()
2725 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
2728 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
2729 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
2732 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
2735 kvm->arch.model.fac_list[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
2738 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
2741 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
2742 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
2744 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
2745 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
2747 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
2748 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
2752 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
2754 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
2755 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
2757 kvm_s390_crypto_init(kvm); in kvm_arch_init_vm()
2759 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
2760 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
2762 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
2763 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
2764 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
2766 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
2767 VM_EVENT(kvm, 3, "vm created with type %lu", type); in kvm_arch_init_vm()
2770 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
2771 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
2774 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
2776 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
2778 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
2779 if (!kvm->arch.gmap) in kvm_arch_init_vm()
2781 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
2782 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
2785 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
2786 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
2787 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
2788 kvm_s390_vsie_init(kvm); in kvm_arch_init_vm()
2790 kvm_s390_gisa_init(kvm); in kvm_arch_init_vm()
2791 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
2795 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
2796 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
2797 sca_dispose(kvm); in kvm_arch_init_vm()
2810 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
2813 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
2816 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
2824 static void kvm_free_vcpus(struct kvm *kvm) in kvm_free_vcpus() argument
2829 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_free_vcpus()
2832 mutex_lock(&kvm->lock); in kvm_free_vcpus()
2833 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_free_vcpus()
2834 kvm->vcpus[i] = NULL; in kvm_free_vcpus()
2836 atomic_set(&kvm->online_vcpus, 0); in kvm_free_vcpus()
2837 mutex_unlock(&kvm->lock); in kvm_free_vcpus()
2840 void kvm_arch_destroy_vm(struct kvm *kvm) in kvm_arch_destroy_vm() argument
2844 kvm_free_vcpus(kvm); in kvm_arch_destroy_vm()
2845 sca_dispose(kvm); in kvm_arch_destroy_vm()
2846 kvm_s390_gisa_destroy(kvm); in kvm_arch_destroy_vm()
2853 if (kvm_s390_pv_get_handle(kvm)) in kvm_arch_destroy_vm()
2854 kvm_s390_pv_deinit_vm(kvm, &rc, &rrc); in kvm_arch_destroy_vm()
2855 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
2856 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
2857 if (!kvm_is_ucontrol(kvm)) in kvm_arch_destroy_vm()
2858 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
2859 kvm_s390_destroy_adapters(kvm); in kvm_arch_destroy_vm()
2860 kvm_s390_clear_float_irqs(kvm); in kvm_arch_destroy_vm()
2861 kvm_s390_vsie_destroy(kvm); in kvm_arch_destroy_vm()
2862 KVM_EVENT(3, "vm 0x%pK destroyed", kvm); in kvm_arch_destroy_vm()
2871 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
2880 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2881 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
2882 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2887 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2892 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2898 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2905 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2906 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
2907 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2915 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2922 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2943 static int sca_switch_to_extended(struct kvm *kvm) in sca_switch_to_extended() argument
2945 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
2951 if (kvm->arch.use_esca) in sca_switch_to_extended()
2961 kvm_s390_vcpu_block_all(kvm); in sca_switch_to_extended()
2962 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2966 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) { in sca_switch_to_extended()
2971 kvm->arch.sca = new_sca; in sca_switch_to_extended()
2972 kvm->arch.use_esca = 1; in sca_switch_to_extended()
2974 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2975 kvm_s390_vcpu_unblock_all(kvm); in sca_switch_to_extended()
2979 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)", in sca_switch_to_extended()
2980 old_sca, kvm->arch.sca); in sca_switch_to_extended()
2984 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id) in sca_can_add_vcpu() argument
2998 mutex_lock(&kvm->lock); in sca_can_add_vcpu()
2999 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
3000 mutex_unlock(&kvm->lock); in sca_can_add_vcpu()
3115 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3117 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3118 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3120 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3121 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
3122 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3125 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3131 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr) in kvm_has_pckmo_subfunc() argument
3133 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3139 static bool kvm_has_pckmo_ecc(struct kvm *kvm) in kvm_has_pckmo_ecc() argument
3142 return kvm_has_pckmo_subfunc(kvm, 32) || in kvm_has_pckmo_ecc()
3143 kvm_has_pckmo_subfunc(kvm, 33) || in kvm_has_pckmo_ecc()
3144 kvm_has_pckmo_subfunc(kvm, 34) || in kvm_has_pckmo_ecc()
3145 kvm_has_pckmo_subfunc(kvm, 40) || in kvm_has_pckmo_ecc()
3146 kvm_has_pckmo_subfunc(kvm, 41); in kvm_has_pckmo_ecc()
3156 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3159 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3164 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3168 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3171 if (kvm_has_pckmo_ecc(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3175 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3195 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3198 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
3211 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_s390_vcpu_setup()
3213 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_s390_vcpu_setup()
3221 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_s390_vcpu_setup()
3223 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_s390_vcpu_setup()
3225 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_s390_vcpu_setup()
3228 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3230 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_s390_vcpu_setup()
3241 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_s390_vcpu_setup()
3245 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_s390_vcpu_setup()
3247 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_s390_vcpu_setup()
3263 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3275 mutex_lock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3276 if (kvm_s390_pv_is_protected(vcpu->kvm)) { in kvm_s390_vcpu_setup()
3281 mutex_unlock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3286 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) in kvm_arch_vcpu_precreate() argument
3288 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id)) in kvm_arch_vcpu_precreate()
3312 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin; in kvm_arch_vcpu_create()
3327 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_create()
3329 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_create()
3331 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_create()
3333 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_create()
3343 if (kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_create()
3349 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", in kvm_arch_vcpu_create()
3359 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_create()
3368 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in kvm_arch_vcpu_runnable()
3427 struct kvm *kvm = gmap->private; in kvm_gmap_notifier() local
3437 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_gmap_notifier()
3575 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_arch_vcpu_ioctl_normal_reset()
3807 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_mpstate()
3904 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
3905 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
3918 void kvm_s390_set_tod_clock(struct kvm *kvm, in kvm_s390_set_tod_clock() argument
3925 mutex_lock(&kvm->lock); in kvm_s390_set_tod_clock()
3930 kvm->arch.epoch = gtod->tod - clk.tod; in kvm_s390_set_tod_clock()
3931 kvm->arch.epdx = 0; in kvm_s390_set_tod_clock()
3932 if (test_kvm_facility(kvm, 139)) { in kvm_s390_set_tod_clock()
3933 kvm->arch.epdx = gtod->epoch_idx - clk.ei; in kvm_s390_set_tod_clock()
3934 if (kvm->arch.epoch > gtod->tod) in kvm_s390_set_tod_clock()
3935 kvm->arch.epdx -= 1; in kvm_s390_set_tod_clock()
3938 kvm_s390_vcpu_block_all(kvm); in kvm_s390_set_tod_clock()
3939 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_set_tod_clock()
3940 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in kvm_s390_set_tod_clock()
3941 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in kvm_s390_set_tod_clock()
3944 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_set_tod_clock()
3946 mutex_unlock(&kvm->lock); in kvm_s390_set_tod_clock()
3978 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
4032 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); in kvm_arch_setup_async_pf()
4057 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
4072 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4155 } else if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_post_run()
4182 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
4189 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
4226 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
4231 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
4267 test_kvm_facility(vcpu->kvm, 64) && in sync_regs_fmt2()
4278 test_kvm_facility(vcpu->kvm, 133) && in sync_regs_fmt2()
4287 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs_fmt2()
4434 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
4553 static void __disable_ibs_on_all_vcpus(struct kvm *kvm) in __disable_ibs_on_all_vcpus() argument
4558 kvm_for_each_vcpu(i, vcpu, kvm) { in __disable_ibs_on_all_vcpus()
4580 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4581 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
4587 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4593 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) in kvm_s390_vcpu_start()
4606 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
4622 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4636 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4637 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
4643 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4655 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { in kvm_s390_vcpu_stop()
4657 started_vcpu = vcpu->kvm->vcpus[i]; in kvm_s390_vcpu_stop()
4669 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4683 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
4684 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
4685 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
4686 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
4788 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_guest_memsida_op()
4804 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_guest_memsida_op()
4849 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4851 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4917 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
4934 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5012 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
5022 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
5038 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
5042 if (kvm_s390_pv_get_handle(kvm)) in kvm_arch_prepare_memory_region()
5047 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
5057 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5061 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5067 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, in kvm_arch_commit_memory_region()