/xen/tools/libxl/ |
A D | check-xl-vcpupin-parse.data-example | 14 nodes:all*0*cpumap: all 15 all,nodes:all*0*cpumap: all 35 nodes:1*0*cpumap: 8-15 36 nodes:0*0*cpumap: 0-7 37 nodes:0*0*cpumap: 0-7 38 nodes:0*0*cpumap: 0-7 45 nodes:1-1*0*cpumap: 8-15 46 nodes:1-1*0*cpumap: 8-15 47 nodes:0-1*0*cpumap: all 48 nodes:0-0*0*cpumap: 0-7 [all …]
|
A D | libxl.c | 456 uint32_t *nodes; in libxl_get_pci_topology() local 468 nodes = libxl__zalloc(gc, sizeof(*nodes) * *num_devs); in libxl_get_pci_topology() 476 if (xc_pcitopoinfo(ctx->xch, *num_devs, devs, nodes) != 0) { in libxl_get_pci_topology() 487 ret[i].node = ((nodes[i] == XEN_INVALID_NODE_ID) || in libxl_get_pci_topology() 488 (nodes[i] == XEN_INVALID_DEV)) ? in libxl_get_pci_topology() 489 LIBXL_PCITOPOLOGY_INVALID_ENTRY : nodes[i]; in libxl_get_pci_topology()
|
/xen/xen/arch/x86/ |
A D | numa.c | 77 spdx = paddr_to_pdx(nodes[i].start); in populate_memnodemap() 78 epdx = paddr_to_pdx(nodes[i].end - 1) + 1; in populate_memnodemap() 128 spdx = paddr_to_pdx(nodes[i].start); in extract_lsb_from_nodes() 129 epdx = paddr_to_pdx(nodes[i].end - 1) + 1; in extract_lsb_from_nodes() 150 shift = extract_lsb_from_nodes(nodes, numnodes); in compute_hash_shift() 207 struct node nodes[MAX_NUMNODES]; in numa_emulation() local 221 memset(&nodes,0,sizeof(nodes)); in numa_emulation() 227 nodes[i].end = nodes[i].start + sz; in numa_emulation() 230 nodes[i].start, nodes[i].end, in numa_emulation() 231 (nodes[i].end - nodes[i].start) >> 20); in numa_emulation() [all …]
|
A D | srat.c | 29 static struct node nodes[MAX_NUMNODES] __initdata; variable 140 struct node *nd = &nodes[i]; in cutoff_node() 333 struct node *nd = &nodes[node]; in acpi_numa_memory_affinity_init() 380 if (start < nodes[j].end in nodes_cover_memory() 381 && end > nodes[j].start) { in nodes_cover_memory() 382 if (start >= nodes[j].start) { in nodes_cover_memory() 383 start = nodes[j].end; in nodes_cover_memory() 386 if (end <= nodes[j].end) { in nodes_cover_memory() 387 end = nodes[j].start; in nodes_cover_memory() 488 u64 size = nodes[i].end - nodes[i].start; in acpi_scan_nodes() [all …]
|
/xen/docs/man/ |
A D | xlcpupool.cfg.5.pod | 86 =item B<nodes="NODES"> 88 Specifies the cpus of the NUMA-nodes given in C<NODES> (an integer or 90 specified nodes are allocated in the new cpupool. 107 "nodes:" modifier can be used. E.g., "0,node:1,nodes:2-3,^10-13" means 108 that pcpus 0, plus all the cpus of NUMA nodes 1,2,3 with the exception 113 If neither B<nodes> nor B<cpus> are specified only the first free cpu
|
A D | xl-numa-placement.7.pod | 33 I<node-affinity>. The node-affinity of a domain is the set of NUMA nodes 74 itself always tries to run the domain's vCPUs on one of the nodes in 164 to the nodes to which the pCPUs in the soft affinity mask belong; 170 will be equal to the nodes to which the pCPUs present both in hard and 193 The first thing to do is find the nodes or the sets of nodes (from now 205 candidates involving fewer nodes are considered better. In case 206 two (or more) candidates span the same number of nodes, 225 Giving preference to candidates with fewer nodes ensures better 227 different nodes. Favoring candidates with fewer vCPUs already runnable 291 it won't scale well to systems with arbitrary number of nodes. [all …]
|
/xen/tools/ocaml/xenstored/ |
A D | trie.ml | 55 let mem_node nodes key = 56 List.exists (fun n -> n.Node.key = key) nodes 58 let find_node nodes key = 59 List.find (fun n -> n.Node.key = key) nodes 61 let replace_node nodes key node = 67 aux nodes 69 let remove_node nodes key = 75 aux nodes
|
A D | trie.mli | 40 every nodes of [t] containing no values and having no chil. *) 44 As nodes of the trie [t] do not necessary contains a value, the second argument of 48 (** [iter_path f t p] iterates [f] over nodes associated with the path [p] in the trie [t]. 52 (** [fold f t x] fold [f] over every nodes of [t], with [x] as initial value. *)
|
A D | oxenstored.conf.in | 33 # involve a set of nodes that is writable by at most one other domain, 39 # A transaction which involves a set of nodes which can be modified by
|
A D | symbol.mli | 19 (** Xenstore nodes names are often the same, ie. "local", "domain", "device", ... so it is worth to
|
/xen/stubdom/grub.patches/ |
A D | 61btrfs.diff | 1636 + btrfs_item_key_to_cpu(&path->nodes[0], 2397 + leaf = &path->nodes[0]; 2804 + buf = &path->nodes[level]; 2921 + fi = btrfs_item_ptr(&path->nodes[0], 2934 + nodes[0], 2945 + path->nodes[0].dev.part, 2946 + path->nodes[0].dev.length, 2947 + path->nodes[0].dev_bytenr >> 2957 + path->nodes[0].data + from, 3144 + di = btrfs_item_ptr(&path->nodes[0], [all …]
|
/xen/tools/hotplug/Linux/systemd/ |
A D | xen-init-dom0.service.in | 2 Description=xen-init-dom0, initialise Dom0 configuration (xenstore nodes, JSON configuration stub)
|
/xen/tools/xenstore/ |
A D | TODO | 6 - Dynamic/supply nodes
|
/xen/xen/arch/x86/oprofile/ |
A D | op_model_athlon.c | 454 int nodes; in init_ibs_nmi() local 459 nodes = 0; in init_ibs_nmi() 486 nodes++; in init_ibs_nmi() 492 if (!nodes) { in init_ibs_nmi()
|
/xen/tools/libxc/ |
A D | xc_misc.c | 294 uint32_t *nodes) in xc_pcitopoinfo() argument 301 DECLARE_HYPERCALL_BOUNCE(nodes, num_devs* sizeof(*nodes), in xc_pcitopoinfo() 306 if ( (ret = xc_hypercall_bounce_pre(xch, nodes)) ) in xc_pcitopoinfo() 316 set_xen_guest_handle_offset(sysctl.u.pcitopoinfo.nodes, nodes, in xc_pcitopoinfo() 327 xc_hypercall_bounce_post(xch, nodes); in xc_pcitopoinfo()
|
/xen/docs/misc/ |
A D | block-scripts.txt | 45 When the script is run, the following nodes shall already have been 58 When the script is run, the following nodes shall already have been 109 other nodes. The reason we haven't done this yet is that the main
|
A D | qemu-backends.txt | 15 user root) the backend nodes must be written before qemu is dropping
|
A D | efi.pandoc | 16 relevant device tree nodes.
|
A D | 9pfs.pandoc | 65 information. The toolstack creates front and back nodes with state 95 Backend configuration nodes, written by the toolstack, read by the
|
/xen/xen/include/asm-x86/ |
A D | numa.h | 24 extern int compute_hash_shift(struct node *nodes, int numnodes,
|
/xen/tools/xl/ |
A D | xl_cpupool.c | 47 XLU_ConfigList *nodes; in main_cpupoolcreate() local 157 if (!xlu_cfg_get_list(config, "nodes", &nodes, 0, 0)) { in main_cpupoolcreate() 166 while ((buf = xlu_cfg_get_listitem(nodes, n_nodes)) != NULL) { in main_cpupoolcreate()
|
/xen/docs/designs/ |
A D | non-cooperative-migration.md | 52 The toolstack will place two nodes in the frontend area to explicitly locate 58 and similarly two nodes in the backend area to locate the frontend area: 70 the value of both state nodes to 1 (XenbusStateInitialising[2]). This 90 both xenstore nodes and node paths), and because guest’s own domid and the 118 nodes. 164 nodes relating to emulators but no record type is defined for nodes
|
/xen/tools/xenstat/xentop/ |
A D | TODO | 25 from any node of all other nodes in a cluster)
|
/xen/xen/common/ |
A D | sysctl.c | 420 guest_handle_is_null(ti->nodes) ) in do_sysctl() 448 if ( copy_to_guest_offset(ti->nodes, i, &node, 1) ) in do_sysctl()
|
/xen/docs/misc/arm/device-tree/ |
A D | booting.txt | 165 both sub-nodes (described shortly) have reg properties. 167 Under the "xen,domain" compatible node, one or more sub-nodes are present
|