/xen/xen/tools/kconfig/ |
A D | Makefile.host | 69 host-csingle := $(addprefix $(obj)/,$(host-csingle)) 70 host-cmulti := $(addprefix $(obj)/,$(host-cmulti)) 71 host-cobjs := $(addprefix $(obj)/,$(host-cobjs)) 72 host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti)) 73 host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs)) 74 host-cshlib := $(addprefix $(obj)/,$(host-cshlib)) 75 host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib)) 76 host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs)) 77 host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs)) 180 targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\ [all …]
|
A D | Makefile.kconfig | 79 include $(src)/Makefile.host 94 clean-deps = $(foreach f,$(host-cobjs) $(host-cxxobjs),$(dir $f).$(notdir $f).d) 100 rm -rf $(host-csingle) $(host-cmulti) $(host-cxxmulti) $(host-cobjs) $(host-cxxobjs)
|
/xen/tools/hotplug/Linux/ |
A D | external-device-migrate | 35 -host <host> : the destination host 56 local step host domname typ recover filename func stype 61 -host) host=$2; shift; shift;; 72 "$host" = "" -o \ 90 eval $func $host $domname $step $* 93 eval $func $host $domname $step $*
|
/xen/tools/libxl/ |
A D | libxl_colo_qdisk.c | 47 const char *host = disk->colo_host; in colo_qdisk_setup() local 55 !host || !export_name || (disk->colo_port <= 0) || in colo_qdisk_setup() 78 ret = libxl__qmp_nbd_server_start(gc, domid, host, port); in colo_qdisk_setup() 83 crs->host = host; in colo_qdisk_setup() 86 if (strcmp(crs->host, host) || strcmp(crs->port, port)) { in colo_qdisk_setup() 160 const char *host = disk->colo_host; in colo_qdisk_save_preresume() local 175 host, port, export_name, node); in colo_qdisk_save_preresume()
|
A D | libxl_colo.h | 117 const char *host; member
|
/xen/xen/lib/x86/ |
A D | policy.c | 5 int x86_cpu_policies_are_compatible(const struct cpu_policy *host, in x86_cpu_policies_are_compatible() argument 18 if ( guest->cpuid->basic.max_leaf > host->cpuid->basic.max_leaf ) in x86_cpu_policies_are_compatible() 21 if ( guest->cpuid->extd.max_leaf > host->cpuid->extd.max_leaf ) in x86_cpu_policies_are_compatible() 26 if ( ~host->msr->platform_info.raw & guest->msr->platform_info.raw ) in x86_cpu_policies_are_compatible()
|
/xen/docs/features/ |
A D | livepatch.pandoc | 34 allows the host administrator to break their system rather easily 50 results in an insecure host: 52 results in an insecure host, this shall not be considered a security 56 4) Loading an incorrect live patch that results in an insecure host or 57 host crash: 59 alternative) is loaded and it results in an insecure host or host 81 1) Is guest->host privilege escalation possible? 85 There is a caveat -- an incorrect live patch can introduce a guest->host
|
/xen/docs/hypervisor-guide/ |
A D | code-coverage.rst | 46 [root@host ~]# xencov read > coverage.dat 54 [root@host ~]# xencov reset 67 * Obtain the raw coverage data from the test host, and pull it back to the 77 xen.git/xen$ ssh root@host xencov read > coverage.dat 88 * Obtain the raw coverage data from the test host, and pull it back to the 97 xen.git/xen$ ssh root@host xencov read > xen.profraw
|
/xen/tools/xl/ |
A D | xl_migrate.c | 539 char *host; in main_migrate() local 577 host = argv[optind + 1]; in main_migrate() 582 rune= host; in main_migrate() 596 ssh_command, host, in main_migrate() 613 char *host = NULL, *rune = NULL; in main_remus() local 658 host = argv[optind + 1]; in main_remus() 705 rune = host; in main_remus() 709 ssh_command, host, in main_remus() 714 ssh_command, host, in main_remus()
|
/xen/docs/admin-guide/ |
A D | microcode-loading.rst | 36 [root@host ~]# xl dmesg | grep microcode 45 [root@host ~]# head /proc/cpuinfo 124 [root@host ~]# xen-ucode 127 [root@host ~]# 133 [root@host ~]# xen-ucode /lib/firmware/intel-ucode/06-3c-03 134 [root@host ~]# 143 [root@host ~]# xl dmesg | grep microcode
|
/xen/tools/misc/ |
A D | xencons | 60 def connect(host,port): argument 62 sock.connect((host,port))
|
A D | xenpvnetboot | 150 host = self.location[7:].split('/', 1)[0].replace(':', ' ') 153 host = self.location[7:].replace(':', ' ') 157 cmd = '/usr/bin/tftp %s -c get %s %s' % (host, os.path.join(basedir, filename), local_name)
|
/xen/xen/include/xen/lib/x86/ |
A D | cpu-policy.h | 36 int x86_cpu_policies_are_compatible(const struct cpu_policy *host,
|
/xen/tools/libxc/ |
A D | xc_cpuid_x86.c | 294 xen_cpuid_leaf_t *host = NULL, *max = NULL, *cur = NULL; in xc_cpuid_xend_policy() local 314 if ( (host = calloc(nr_leaves, sizeof(*host))) == NULL || in xc_cpuid_xend_policy() 350 &nr_host, host, &nr_msrs, NULL); in xc_cpuid_xend_policy() 363 const xen_cpuid_leaf_t *host_leaf = find_leaf(host, nr_host, xend); in xc_cpuid_xend_policy() 423 free(host); in xc_cpuid_xend_policy()
|
/xen/docs/man/ |
A D | xentrace.8.pod | 12 output in the following binary format (host endian): 62 Trace only on CPUs 3 up to maximum numbers of CPUs the host has 64 If using I<all> it will use all of the CPUs the host has.
|
A D | xl-numa-placement.7.pod | 22 running memory-intensive workloads on a shared host. In fact, the cost 34 of the host where the memory for the domain is being allocated (mostly, 78 the host resources (e.g., the pCPUs). 132 both manual or automatic placement of them across the host's NUMA nodes. 188 nodes of an host is an incarnation of the Bin Packing Problem. In fact, 190 and the host nodes are the bins. As such problem is known to be NP-hard, 228 there ensures a good balance of the overall host load. Finally, if more 254 pCPU on the host, but the memory from the domain will come from the 293 if it is requested on a host with more than 16 NUMA nodes.
|
A D | xl.cfg.5.pod.in | 72 single host must be unique. 172 host, except for the CPUs belonging to the host NUMA node 1. 178 run on CPUs 3,4,6,7,8 of the host (excluding CPU 5). 228 with a weight of 256 on a contended host. 924 =item B<host> 947 #1. If B<strategy> is set to "host", for example: 949 rdm = "strategy=host,policy=strict" or rdm = "strategy=host,policy=relaxed" 1999 cpuid="host,tm=0,sse3=0" 2135 as the host. If it is later migrated to another host that provide 2588 host/client side. [all …]
|
A D | xl-disk-configuration.5.pod | 258 Specifies that B<target> is not a normal host path, but rather 362 =item B<colo-host> 368 Secondary host's address 382 Secondary port. We will run a nbd server on secondary host, 397 We will run a nbd server on secondary host, exportname is
|
A D | xl.1.pod.in | 461 =item B<migrate> [I<OPTIONS>] I<domain-id> I<host> 473 <host> instead of ssh <host> xl migrate-receive [-d -e]. 477 On the new <host>, do not wait in the background for the death of the 496 such that it will be identical on the destination host, unless that 502 =item B<remus> [I<OPTIONS>] I<domain-id> I<host> 527 =item B<colo-host> : Secondary host's ip address. 530 secondary host, and the nbd server will listen on this port. 571 If empty, run <host> instead of ssh <host> xl migrate-receive -r [-e]. 903 host : scarlett 983 List host NUMA topology information [all …]
|
/xen/docs/misc/ |
A D | xen-error-handling.txt | 24 Crashes the host system with an informative file/line error message 61 Like BUG() and ASSERT() this will crash and reboot the host 71 bootstrap. The failure is unexpected since a host should always have
|
A D | distro_mapping.txt | 12 build host) and for some scripts at run-time. If the Red Hat
|
/xen/tools/firmware/vgabios/ |
A D | TODO | 24 - have plex86 host side display interface
|
/xen/automation/build/suse/ |
A D | sles11sp4.dockerfile | 15 # distros nowadays disable vsyscall. To run this container, the host needs to
|
/xen/automation/build/ |
A D | README.md | 71 If your docker host has Linux kernel > 4.11, and you want to use containers 78 to the host kernel command line. That enables a legacy interface that is used
|
/xen/docs/designs/ |
A D | non-cooperative-migration.md | 8 an era when it was assumed that the host administrator had control of at 12 which is purely host driven, requiring no co-operation from the software 99 re-establish these in the new host environment after migration. 104 and valid in the new host environment, and the grant table entries and event 109 up grant entries and event channels, the backend drivers in the new host 203 early in migration, the toolstack running on the remote host would set up a 243 unavailable on the target host, but randomization of domid on creation
|