Home
last modified time | relevance | path

Searched refs:range (Results 1 – 25 of 79) sorted by relevance

1234

/xen/xen/xsm/flask/ss/
A Dcontext.h38 memset(&c->range, 0, sizeof(c->range)); in mls_context_init()
48 dst->range.level[0].sens = src->range.level[0].sens; in mls_context_cpy()
49 rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); in mls_context_cpy()
53 dst->range.level[1].sens = src->range.level[1].sens; in mls_context_cpy()
54 rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat); in mls_context_cpy()
71 dst->range.level[0].sens = src->range.level[0].sens; in mls_context_cpy_low()
76 dst->range.level[1].sens = src->range.level[0].sens; in mls_context_cpy_low()
89 return ((c1->range.level[0].sens == c2->range.level[0].sens) && in mls_context_cmp()
90 ebitmap_cmp(&c1->range.level[0].cat,&c2->range.level[0].cat) && in mls_context_cmp()
91 (c1->range.level[1].sens == c2->range.level[1].sens) && in mls_context_cmp()
[all …]
A Dmls.c43 int index_sens = context->range.level[l].sens; in mls_compute_context_len()
49 e = &context->range.level[l].cat; in mls_compute_context_len()
114 e = &context->range.level[l].cat; in mls_sid_to_context()
218 if ( !mls_range_isvalid(p, &c->range) ) in mls_context_isvalid()
230 if ( !mls_range_contains(usrdatum->range, c->range) ) in mls_context_isvalid()
366 context->range.level[1].sens = context->range.level[0].sens; in mls_context_to_sid()
368 &context->range.level[0].cat); in mls_context_to_sid()
389 context->range.level[l].sens = range->level[l].sens; in mls_range_set()
391 &range->level[l].cat); in mls_range_set()
438 ebitmap_destroy(&c->range.level[l].cat); in mls_convert_context()
[all …]
/xen/xen/arch/x86/x86_64/
A Dmmconf-fam10h.c44 struct range { in get_fam10h_pci_mmconf_base() struct
46 } range[8]; in get_fam10h_pci_mmconf_base() local
100 if (range[j - 1].start < start) in get_fam10h_pci_mmconf_base()
102 range[j] = range[j - 1]; in get_fam10h_pci_mmconf_base()
104 range[j].start = start; in get_fam10h_pci_mmconf_base()
105 range[j].end = end; in get_fam10h_pci_mmconf_base()
116 if (range[hi_mmio_num - 1].end < start) in get_fam10h_pci_mmconf_base()
118 if (range[0].start > start + SIZE) in get_fam10h_pci_mmconf_base()
122 start = (range[0].start & MASK) - UNIT; in get_fam10h_pci_mmconf_base()
130 start = (range[i - 1].end + UNIT) & MASK; in get_fam10h_pci_mmconf_base()
[all …]
A Dmachine_kexec.c14 int machine_kexec_get_xen(xen_kexec_range_t *range) in machine_kexec_get_xen() argument
16 range->start = virt_to_maddr(_start); in machine_kexec_get_xen()
17 range->size = virt_to_maddr(_end) - (unsigned long)range->start; in machine_kexec_get_xen()
/xen/xen/common/
A Drangeset.c16 struct range { struct
48 struct range *x = NULL, *y; in find_range()
80 struct rangeset *r, struct range *x, struct range *y) in insert_range()
99 struct range *x; in alloc_range()
118 struct range *x, *y; in rangeset_add_range()
186 struct range *x, *y, *t; in rangeset_remove_range()
254 struct range *x; in rangeset_contains_range()
273 struct range *x; in rangeset_overlaps_range()
293 struct range *x; in rangeset_report_ranges()
461 struct range *x; in rangeset_destroy()
[all …]
A Dkexec.c625 range->start = range->size = 0; in kexec_get_reserve()
631 int nr = range->nr; in kexec_get_cpu()
653 range->start = range->size = 0; in kexec_get_cpu()
661 range->size = VMCOREINFO_BYTES; in kexec_get_vmcoreinfo()
669 switch ( range->range ) in kexec_get_range_internal()
672 ret = kexec_get_reserve(range); in kexec_get_range_internal()
675 ret = kexec_get_cpu(range); in kexec_get_range_internal()
681 ret = machine_kexec_get(range); in kexec_get_range_internal()
690 xen_kexec_range_t range; in kexec_get_range() local
707 xen_kexec_range_t range; in kexec_get_range_compat() local
[all …]
A Dunlzma.c68 uint32_t range; member
109 rc->range = 0xFFFFFFFF; in rc_init()
129 rc->range <<= 8; in rc_do_normalize()
134 if (rc->range < (1 << RC_TOP_BITS)) in rc_normalize()
145 rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS); in rc_is_bit_0_helper()
157 rc->range = rc->bound; in rc_update_bit_0()
162 rc->range -= rc->bound; in rc_update_bit_1()
185 rc->range >>= 1; in rc_direct_bit()
186 if (rc->code >= rc->range) { in rc_direct_bit()
187 rc->code -= rc->range; in rc_direct_bit()
/xen/xen/arch/x86/hvm/
A Dmtrr.c553 list_del(&range->list); in hvm_destroy_cacheattr_region_list()
554 xfree(range); in hvm_destroy_cacheattr_region_list()
575 rc = range->type; in hvm_get_mem_pinned_cacheattr()
615 if ( range->start == gfn_start && range->end == gfn_end ) in hvm_set_mem_pinned_cacheattr()
659 if ( range->start == gfn_start && range->end == gfn_end ) in hvm_set_mem_pinned_cacheattr()
661 range->type = type; in hvm_set_mem_pinned_cacheattr()
665 if ( range->start <= gfn_end && gfn_start <= range->end ) in hvm_set_mem_pinned_cacheattr()
676 if ( range == NULL ) in hvm_set_mem_pinned_cacheattr()
679 range->start = gfn_start; in hvm_set_mem_pinned_cacheattr()
680 range->end = gfn_end; in hvm_set_mem_pinned_cacheattr()
[all …]
/xen/xen/include/asm-x86/
A Damd.h123 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) argument
124 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) argument
125 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) argument
/xen/xen/drivers/passthrough/amd/
A Diommu_acpi.c47 struct acpi_ivhd_device_range range; member
454 dev_length = sizeof(*range); in parse_ivhd_device_range()
469 first_bdf = range->start.header.id; in parse_ivhd_device_range()
477 last_bdf = range->end.header.id; in parse_ivhd_device_range()
536 dev_length = sizeof(*range); in parse_ivhd_device_alias_range()
551 first_bdf = range->alias.header.id; in parse_ivhd_device_alias_range()
559 last_bdf = range->end.header.id; in parse_ivhd_device_alias_range()
567 alias_id = range->alias.used_id; in parse_ivhd_device_alias_range()
615 dev_length = sizeof(*range); in parse_ivhd_device_extended_range()
638 last_bdf = range->end.header.id; in parse_ivhd_device_extended_range()
[all …]
/xen/xen/arch/arm/arm64/
A Dinsn.c160 long range) in branch_imm_common() argument
166 return range; in branch_imm_common()
171 if (offset < -range || offset >= range) { in branch_imm_common()
173 return range; in branch_imm_common()
/xen/xen/arch/x86/
A Dmachine_kexec.c187 int machine_kexec_get(xen_kexec_range_t *range) in machine_kexec_get() argument
189 if (range->range != KEXEC_RANGE_MA_XEN) in machine_kexec_get()
191 return machine_kexec_get_xen(range); in machine_kexec_get()
/xen/xen/include/xen/
A Dlivepatch.h101 long range = ARCH_LIVEPATCH_RANGE; in livepatch_verify_distance() local
107 if ( offset < -range || offset >= range ) in livepatch_verify_distance()
A Dkexec.h56 int machine_kexec_get(xen_kexec_range_t *range);
57 int machine_kexec_get_xen(xen_kexec_range_t *range);
/xen/docs/man/
A Dxen-pci-device-reservations.7.pod17 records reservations made within the device ID range in order to avoid
24 =item 1. A vendor may request a range of device IDs by submitting a patch to
27 =item 2. Vendor allocations should be in the range 0xc000-0xfffe to reduce the
30 =item 3. The vendor is responsible for allocations within the range and should
38 range | vendor/product
/xen/tools/libxc/
A Dxc_kexec.c39 int xc_kexec_get_range(xc_interface *xch, int range, int nr, in xc_kexec_get_range() argument
52 get_range->range = range; in xc_kexec_get_range()
/xen/tools/libxl/
A Dcheck-xl-vcpupin-parse203 range=$((nr_cpus - cpua))
204 cpub=$(($RANDOM % range))
252 range=$((nr_nodes - nodea))
253 nodeb=$(($RANDOM % range))
A Dcheck-xl-vcpupin-parse.data-example29 # A few attempts of pinning to a random range of cpus
44 # A few attempts of pinning to a random range of nodes
/xen/tools/xenmon/
A Dxenmon.py188 for i in range(0, NDOMAINS):
198 for i in range(0, NDOMAINS):
224 for x in range(0, NDOMAINS):
304 for i in range(0, NSAMPLES):
310 for i in range(0, NDOMAINS):
371 for dom in range(0, NDOMAINS):
578 for dom in range(0, NDOMAINS):
593 for i in range(0, NSAMPLES):
599 for i in range(0, NDOMAINS):
632 for dom in range(0, NDOMAINS):
[all …]
/xen/xen/arch/x86/mm/
A Dmem_sharing.c1396 unsigned long start = range->opaque ?: range->first_gfn; in range_share()
1398 while ( range->last_gfn >= start ) in range_share()
1430 range->opaque = start; in range_share()
2013 if ( mso.u.range._pad[0] || mso.u.range._pad[1] || in mem_sharing_memop()
2014 mso.u.range._pad[2] ) in mem_sharing_memop()
2023 if ( mso.u.range.opaque && in mem_sharing_memop()
2024 (mso.u.range.opaque < mso.u.range.first_gfn || in mem_sharing_memop()
2025 mso.u.range.opaque > mso.u.range.last_gfn) ) in mem_sharing_memop()
2070 max_cgfn < mso.u.range.last_gfn ) in mem_sharing_memop()
2077 rc = range_share(d, cd, &mso.u.range); in mem_sharing_memop()
[all …]
/xen/xen/tools/kconfig/tests/rand_nested_choice/
A D__init__.py13 for i in range(20):
/xen/xen/common/xz/
A Ddec_lzma2.c96 uint32_t range; member
440 rc->range = (uint32_t)-1; in rc_reset()
480 if (rc->range < RC_TOP_VALUE) { in rc_normalize()
481 rc->range <<= RC_SHIFT_BITS; in rc_normalize()
503 bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob; in rc_bit()
505 rc->range = bound; in rc_bit()
509 rc->range -= bound; in rc_bit()
559 rc->range >>= 1; in rc_direct()
560 rc->code -= rc->range; in rc_direct()
562 rc->code += rc->range & mask; in rc_direct()
/xen/xen/arch/x86/cpu/mtrr/
A Dgeneric.c346 int block=-1, range; in set_fixed_ranges() local
349 for (range=0; range < fixed_range_blocks[block].ranges; range++) in set_fixed_ranges()
350 set_fixed_range(fixed_range_blocks[block].base_msr + range, in set_fixed_ranges()
/xen/docs/features/
A Dqemu-deprivilege.pandoc38 ## Setting up a group and userid range
48 `xen-qemuuser-range-base` with the first UID. For example, under
51 adduser --system --uid 131072 --group --no-create-home xen-qemuuser-range-base
61 for the range base (as is done above) will result in all UIDs being
/xen/xen/arch/
A DKconfig4 range 1 4095

Completed in 58 milliseconds

1234