Lines Matching refs:arch

303 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);  in kvm_clock_sync()
305 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
306 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
308 if (vcpu->arch.cputm_enabled) in kvm_clock_sync()
309 vcpu->arch.cputm_start += *delta; in kvm_clock_sync()
310 if (vcpu->arch.vsie_block) in kvm_clock_sync()
311 kvm_clock_sync_scb(vcpu->arch.vsie_block, in kvm_clock_sync()
620 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
703 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
708 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
716 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
717 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
719 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
720 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
723 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
724 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
727 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
728 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
731 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
732 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
735 set_kvm_facility(kvm->arch.model.fac_mask, 192); in kvm_vm_ioctl_enable_cap()
736 set_kvm_facility(kvm->arch.model.fac_list, 192); in kvm_vm_ioctl_enable_cap()
751 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
752 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
764 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
765 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
778 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
779 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
790 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
802 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
803 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
811 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
816 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
835 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
836 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
863 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
865 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
875 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
881 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
895 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
896 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
915 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
917 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
924 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
962 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
963 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
964 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
973 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
974 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
975 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
983 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
984 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
985 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
993 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
994 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
995 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1003 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
1010 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
1043 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1049 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1050 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1067 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1068 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1080 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1082 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1083 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1112 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1200 gtod->tod = clk.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1203 gtod->epoch_idx = clk.ei + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1291 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1296 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1298 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1300 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1302 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1305 kvm->arch.model.ibc, in kvm_s390_set_processor()
1306 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1308 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1309 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1310 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1336 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat, in kvm_s390_set_processor_feat()
1355 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1363 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1364 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1365 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1366 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1368 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1369 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1371 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1372 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1374 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1375 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1377 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1378 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1380 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1381 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1383 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1384 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1386 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1387 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1389 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1390 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1392 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1393 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1395 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1396 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1398 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1399 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1401 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1402 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1404 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1405 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1407 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1408 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1410 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1411 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1412 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1413 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1415 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1416 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1417 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1418 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1451 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1452 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1453 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1456 kvm->arch.model.ibc, in kvm_s390_get_processor()
1457 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1459 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1460 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1461 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1481 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1486 kvm->arch.model.ibc, in kvm_s390_get_machine()
1487 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1508 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat, in kvm_s390_get_processor_feat()
1539 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1544 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1545 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1546 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1547 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1549 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1550 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1552 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1553 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1555 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1556 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1558 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1559 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1561 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1562 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1564 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1565 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1567 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1568 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1570 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1571 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1573 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1574 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1576 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1577 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1579 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1580 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1582 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1583 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1585 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1586 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1588 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1589 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1591 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1592 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1593 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1594 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1596 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1597 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1598 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1599 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
2054 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2095 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2102 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2111 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2129 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2130 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2155 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2285 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2304 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2414 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
2545 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
2548 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
2555 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
2557 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
2578 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
2582 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
2629 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
2630 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
2631 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
2632 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
2652 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
2654 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); in kvm_s390_crypto_init()
2660 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
2661 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
2662 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
2663 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
2664 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
2665 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
2670 if (kvm->arch.use_esca) in sca_dispose()
2671 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
2673 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
2674 kvm->arch.sca = NULL; in sca_dispose()
2703 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
2705 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
2706 if (!kvm->arch.sca) in kvm_arch_init_vm()
2712 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
2713 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
2718 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
2719 if (!kvm->arch.dbf) in kvm_arch_init_vm()
2723 kvm->arch.sie_page2 = in kvm_arch_init_vm()
2725 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
2728 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
2729 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
2732 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
2735 kvm->arch.model.fac_list[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
2738 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
2741 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
2742 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
2744 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
2745 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
2747 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
2748 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
2752 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
2754 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
2755 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
2759 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
2760 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
2762 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
2763 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
2764 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
2766 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
2770 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
2771 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
2774 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
2776 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
2778 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
2779 if (!kvm->arch.gmap) in kvm_arch_init_vm()
2781 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
2782 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
2785 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
2786 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
2787 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
2795 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
2796 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
2814 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
2816 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
2821 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
2855 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
2856 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
2858 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
2868 vcpu->arch.gmap = gmap_create(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
2869 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
2871 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
2880 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2881 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
2882 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2887 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2892 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2898 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2901 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2902 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; in sca_add_vcpu()
2905 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2906 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
2907 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2909 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; in sca_add_vcpu()
2910 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2911 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU; in sca_add_vcpu()
2912 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_add_vcpu()
2915 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2917 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; in sca_add_vcpu()
2918 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2919 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; in sca_add_vcpu()
2922 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2945 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
2951 if (kvm->arch.use_esca) in sca_switch_to_extended()
2962 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2967 vcpu->arch.sie_block->scaoh = scaoh; in sca_switch_to_extended()
2968 vcpu->arch.sie_block->scaol = scaol; in sca_switch_to_extended()
2969 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_switch_to_extended()
2971 kvm->arch.sca = new_sca; in sca_switch_to_extended()
2972 kvm->arch.use_esca = 1; in sca_switch_to_extended()
2974 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2980 old_sca, kvm->arch.sca); in sca_switch_to_extended()
2999 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
3008 WARN_ON_ONCE(vcpu->arch.cputm_start != 0); in __start_cpu_timer_accounting()
3009 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
3010 vcpu->arch.cputm_start = get_tod_clock_fast(); in __start_cpu_timer_accounting()
3011 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
3017 WARN_ON_ONCE(vcpu->arch.cputm_start == 0); in __stop_cpu_timer_accounting()
3018 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
3019 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; in __stop_cpu_timer_accounting()
3020 vcpu->arch.cputm_start = 0; in __stop_cpu_timer_accounting()
3021 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
3027 WARN_ON_ONCE(vcpu->arch.cputm_enabled); in __enable_cpu_timer_accounting()
3028 vcpu->arch.cputm_enabled = true; in __enable_cpu_timer_accounting()
3035 WARN_ON_ONCE(!vcpu->arch.cputm_enabled); in __disable_cpu_timer_accounting()
3037 vcpu->arch.cputm_enabled = false; in __disable_cpu_timer_accounting()
3058 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3059 if (vcpu->arch.cputm_enabled) in kvm_s390_set_cpu_timer()
3060 vcpu->arch.cputm_start = get_tod_clock_fast(); in kvm_s390_set_cpu_timer()
3061 vcpu->arch.sie_block->cputm = cputm; in kvm_s390_set_cpu_timer()
3062 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3072 if (unlikely(!vcpu->arch.cputm_enabled)) in kvm_s390_get_cpu_timer()
3073 return vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3077 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); in kvm_s390_get_cpu_timer()
3083 value = vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3085 if (likely(vcpu->arch.cputm_start)) in kvm_s390_get_cpu_timer()
3086 value -= get_tod_clock_fast() - vcpu->arch.cputm_start; in kvm_s390_get_cpu_timer()
3087 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); in kvm_s390_get_cpu_timer()
3095 gmap_enable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_load()
3097 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_load()
3105 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_put()
3108 vcpu->arch.enabled_gmap = gmap_get_enabled(); in kvm_arch_vcpu_put()
3109 gmap_disable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_put()
3117 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3118 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3122 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3125 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3126 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_arch_vcpu_postcreate()
3128 vcpu->arch.enabled_gmap = vcpu->arch.gmap; in kvm_arch_vcpu_postcreate()
3133 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3156 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3159 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3160 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
3161 vcpu->arch.sie_block->eca &= ~ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3162 vcpu->arch.sie_block->ecd &= ~ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3164 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3165 vcpu->arch.sie_block->eca |= ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3168 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3169 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
3172 vcpu->arch.sie_block->ecd |= ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3175 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3176 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
3181 free_page(vcpu->arch.sie_block->cbrlo); in kvm_s390_vcpu_unsetup_cmma()
3182 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
3187 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT); in kvm_s390_vcpu_setup_cmma()
3188 if (!vcpu->arch.sie_block->cbrlo) in kvm_s390_vcpu_setup_cmma()
3195 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3197 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
3199 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list; in kvm_s390_vcpu_setup_model()
3207 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_s390_vcpu_setup()
3220 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; in kvm_s390_vcpu_setup()
3222 vcpu->arch.sie_block->ecb |= ECB_SRSI; in kvm_s390_vcpu_setup()
3224 vcpu->arch.sie_block->ecb |= ECB_TE; in kvm_s390_vcpu_setup()
3226 vcpu->arch.sie_block->ecb |= ECB_SPECI; in kvm_s390_vcpu_setup()
3228 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3229 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; in kvm_s390_vcpu_setup()
3231 vcpu->arch.sie_block->ecb2 |= ECB2_IEP; in kvm_s390_vcpu_setup()
3232 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; in kvm_s390_vcpu_setup()
3234 vcpu->arch.sie_block->eca |= ECA_CEI; in kvm_s390_vcpu_setup()
3236 vcpu->arch.sie_block->eca |= ECA_IB; in kvm_s390_vcpu_setup()
3238 vcpu->arch.sie_block->eca |= ECA_SII; in kvm_s390_vcpu_setup()
3240 vcpu->arch.sie_block->eca |= ECA_SIGPI; in kvm_s390_vcpu_setup()
3242 vcpu->arch.sie_block->eca |= ECA_VX; in kvm_s390_vcpu_setup()
3243 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in kvm_s390_vcpu_setup()
3246 vcpu->arch.sie_block->ecd |= ECD_MEF; in kvm_s390_vcpu_setup()
3248 vcpu->arch.sie_block->ecd |= ECD_ETOKENF; in kvm_s390_vcpu_setup()
3249 if (vcpu->arch.sie_block->gd) { in kvm_s390_vcpu_setup()
3250 vcpu->arch.sie_block->eca |= ECA_AIV; in kvm_s390_vcpu_setup()
3252 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); in kvm_s390_vcpu_setup()
3254 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx) in kvm_s390_vcpu_setup()
3256 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; in kvm_s390_vcpu_setup()
3261 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_s390_vcpu_setup()
3263 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3268 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in kvm_s390_vcpu_setup()
3269 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; in kvm_s390_vcpu_setup()
3271 vcpu->arch.sie_block->hpid = HPID_KVM; in kvm_s390_vcpu_setup()
3303 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
3304 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; in kvm_arch_vcpu_create()
3307 vcpu->arch.sie_block->mso = 0; in kvm_arch_vcpu_create()
3308 vcpu->arch.sie_block->msl = sclp.hamax; in kvm_arch_vcpu_create()
3310 vcpu->arch.sie_block->icpua = vcpu->vcpu_id; in kvm_arch_vcpu_create()
3311 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
3312 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin; in kvm_arch_vcpu_create()
3313 if (vcpu->arch.sie_block->gd && sclp.has_gisaf) in kvm_arch_vcpu_create()
3314 vcpu->arch.sie_block->gd |= GISA_FORMAT1; in kvm_arch_vcpu_create()
3315 seqcount_init(&vcpu->arch.cputm_seqcount); in kvm_arch_vcpu_create()
3317 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_create()
3350 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
3351 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
3360 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_create()
3362 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
3368 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in kvm_arch_vcpu_runnable()
3374 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); in kvm_arch_vcpu_in_kernel()
3379 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_block()
3385 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_unblock()
3390 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request()
3396 return atomic_read(&vcpu->arch.sie_block->prog20) & in kvm_s390_vcpu_sie_inhibited()
3402 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request_handled()
3413 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
3473 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
3477 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
3485 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
3489 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
3493 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
3497 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
3501 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
3505 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
3523 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
3527 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
3536 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
3540 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
3542 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
3546 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
3550 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
3554 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
3558 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
3570 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI; in kvm_arch_vcpu_ioctl_normal_reset()
3571 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_ioctl_normal_reset()
3589 vcpu->arch.sie_block->gpsw.mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3590 vcpu->arch.sie_block->gpsw.addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3593 vcpu->arch.sie_block->ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3594 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); in kvm_arch_vcpu_ioctl_initial_reset()
3595 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
3596 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
3617 vcpu->arch.sie_block->gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
3618 vcpu->arch.sie_block->pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3619 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in kvm_arch_vcpu_ioctl_initial_reset()
3620 vcpu->arch.sie_block->todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3662 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
3674 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
3770 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
3853 rc = gmap_mprotect_notify(vcpu->arch.gmap, in kvm_s390_handle_requests()
3864 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
3885 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_s390_handle_requests()
3895 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; in kvm_s390_handle_requests()
3904 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
3906 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in kvm_s390_handle_requests()
3930 kvm->arch.epoch = gtod->tod - clk.tod; in kvm_s390_set_tod_clock()
3931 kvm->arch.epdx = 0; in kvm_s390_set_tod_clock()
3933 kvm->arch.epdx = gtod->epoch_idx - clk.ei; in kvm_s390_set_tod_clock()
3934 if (kvm->arch.epoch > gtod->tod) in kvm_s390_set_tod_clock()
3935 kvm->arch.epdx -= 1; in kvm_s390_set_tod_clock()
3940 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in kvm_s390_set_tod_clock()
3941 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in kvm_s390_set_tod_clock()
3961 return gmap_fault(vcpu->arch.gmap, gpa, in kvm_arch_fault_in_page()
3985 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
3986 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
3994 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
3995 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
4016 struct kvm_arch_async_pf arch; in kvm_arch_setup_async_pf() local
4018 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
4020 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
4021 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
4027 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) in kvm_arch_setup_async_pf()
4029 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
4034 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
4037 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); in kvm_arch_setup_async_pf()
4051 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; in vcpu_pre_run()
4052 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; in vcpu_pre_run()
4072 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4074 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
4075 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
4101 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1); in vcpu_post_run_fault_in_sie()
4110 pgm_info = vcpu->arch.pgm; in vcpu_post_run_fault_in_sie()
4124 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
4125 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
4130 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; in vcpu_post_run()
4131 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; in vcpu_post_run()
4135 sie_page = container_of(vcpu->arch.sie_block, in vcpu_post_run()
4142 if (vcpu->arch.sie_block->icptcode > 0) { in vcpu_post_run()
4148 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in vcpu_post_run()
4149 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in vcpu_post_run()
4150 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in vcpu_post_run()
4176 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block; in __vcpu_run()
4205 exit_reason = sie64a(vcpu->arch.sie_block, in __vcpu_run()
4217 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR || in __vcpu_run()
4218 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) { in __vcpu_run()
4219 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in __vcpu_run()
4243 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs_fmt2()
4244 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs_fmt2()
4246 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs_fmt2()
4247 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs_fmt2()
4248 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs_fmt2()
4251 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs_fmt2()
4252 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs_fmt2()
4253 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs_fmt2()
4254 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs_fmt2()
4258 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; in sync_regs_fmt2()
4259 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; in sync_regs_fmt2()
4260 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc); in sync_regs_fmt2()
4269 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { in sync_regs_fmt2()
4271 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in sync_regs_fmt2()
4280 !vcpu->arch.gs_enabled) { in sync_regs_fmt2()
4282 vcpu->arch.sie_block->ecb |= ECB_GS; in sync_regs_fmt2()
4283 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in sync_regs_fmt2()
4284 vcpu->arch.gs_enabled = 1; in sync_regs_fmt2()
4288 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in sync_regs_fmt2()
4289 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; in sync_regs_fmt2()
4295 vcpu->arch.host_gscb = current->thread.gs_cb; in sync_regs_fmt2()
4296 save_gs_cb(vcpu->arch.host_gscb); in sync_regs_fmt2()
4298 if (vcpu->arch.gs_enabled) { in sync_regs_fmt2()
4315 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
4321 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
4323 save_access_regs(vcpu->arch.host_acrs); in sync_regs()
4327 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; in sync_regs()
4328 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; in sync_regs()
4351 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC; in sync_regs()
4352 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask & in sync_regs()
4363 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs_fmt2()
4364 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs_fmt2()
4365 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs_fmt2()
4366 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; in store_regs_fmt2()
4367 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; in store_regs_fmt2()
4371 if (vcpu->arch.gs_enabled) in store_regs_fmt2()
4373 current->thread.gs_cb = vcpu->arch.host_gscb; in store_regs_fmt2()
4374 restore_gs_cb(vcpu->arch.host_gscb); in store_regs_fmt2()
4375 if (!vcpu->arch.host_gscb) in store_regs_fmt2()
4377 vcpu->arch.host_gscb = NULL; in store_regs_fmt2()
4387 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
4388 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
4390 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
4392 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
4393 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
4394 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
4395 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
4397 restore_access_regs(vcpu->arch.host_acrs); in store_regs()
4402 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; in store_regs()
4403 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; in store_regs()
4513 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
4519 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
4523 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
4529 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
4580 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4587 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4616 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in kvm_s390_vcpu_start()
4622 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4636 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4643 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4669 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4683 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
4684 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
4707 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) in kvm_s390_guest_sida_op()
4712 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) + in kvm_s390_guest_sida_op()
4718 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) + in kvm_s390_guest_sida_op()
4777 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_guest_mem_op()
4922 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
4939 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
4945 r = gmap_fault(vcpu->arch.gmap, arg, 0); in kvm_arch_vcpu_ioctl()
5013 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()
5038 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
5057 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5061 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5067 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, in kvm_arch_commit_memory_region()