/linux/arch/powerpc/mm/nohash/ |
A D | kaslr_booke.c | 24 struct regions { struct 43 struct regions __initdata regions; argument 119 if (regions.reserved_mem < 0) in overlaps_reserved_region() 147 if (base >= regions.pa_end) in overlaps_reserved_region() 166 regions.dtb_end)) in overlaps_region() 170 regions.initrd_end)) in overlaps_region() 174 regions.crash_end)) in overlaps_region() 326 if (regions.reserved_mem >= 0) in kaslr_choose_location() 331 regions.pa_start = memstart_addr; in kaslr_choose_location() 333 regions.dtb_start = __pa(dt_ptr); in kaslr_choose_location() [all …]
|
/linux/drivers/mtd/chips/ |
A D | jedec_probe.c | 306 .regions = { 318 .regions = { 333 .regions = { 348 .regions = { 363 .regions = { 378 .regions = { 394 .regions = { 411 .regions = { 428 .regions = { 443 .regions = { [all …]
|
A D | cfi_util.c | 366 struct mtd_erase_region_info *regions = mtd->eraseregions; in cfi_varsize_frob() local 380 while (i < mtd->numeraseregions && ofs >= regions[i].offset) in cfi_varsize_frob() 390 if (ofs & (regions[i].erasesize-1)) in cfi_varsize_frob() 400 while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset) in cfi_varsize_frob() 408 if ((ofs + len) & (regions[i].erasesize-1)) in cfi_varsize_frob() 417 int size = regions[i].erasesize; in cfi_varsize_frob() 428 if (ofs == regions[i].offset + size * regions[i].numblocks) in cfi_varsize_frob()
|
/linux/mm/damon/ |
A D | vaddr-test.h | 75 struct damon_addr_range regions[3] = {0,}; in damon_test_three_regions_in_vmas() local 88 __damon_va_three_regions(&vmas[0], regions); in damon_test_three_regions_in_vmas() 91 KUNIT_EXPECT_EQ(test, 25ul, regions[0].end); in damon_test_three_regions_in_vmas() 93 KUNIT_EXPECT_EQ(test, 220ul, regions[1].end); in damon_test_three_regions_in_vmas() 95 KUNIT_EXPECT_EQ(test, 330ul, regions[2].end); in damon_test_three_regions_in_vmas() 134 unsigned long *regions, int nr_regions, in damon_do_test_apply_three_regions() argument 144 r = damon_new_region(regions[i * 2], regions[i * 2 + 1]); in damon_do_test_apply_three_regions() 177 damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions), in damon_test_apply_three_regions1() 199 damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions), in damon_test_apply_three_regions2() 223 damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions), in damon_test_apply_three_regions3() [all …]
|
A D | vaddr.c | 125 struct damon_addr_range regions[3]) in __damon_va_three_regions() 165 regions[0].start = ALIGN(start, DAMON_MIN_REGION); in __damon_va_three_regions() 166 regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION); in __damon_va_three_regions() 167 regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION); in __damon_va_three_regions() 181 struct damon_addr_range regions[3]) in damon_va_three_regions() 191 rc = __damon_va_three_regions(mm->mmap, regions); in damon_va_three_regions() 244 struct damon_addr_range regions[3]; in __damon_va_init_regions() local 248 if (damon_va_three_regions(t, regions)) { in __damon_va_init_regions() 254 sz += regions[i].end - regions[i].start; in __damon_va_init_regions() 262 r = damon_new_region(regions[i].start, regions[i].end); in __damon_va_init_regions() [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/nvfw/ |
A D | acr.c | 89 hdr->regions.no_regions); in flcn_acr_desc_dump() 94 hdr->regions.region_props[i].start_addr); in flcn_acr_desc_dump() 96 hdr->regions.region_props[i].end_addr); in flcn_acr_desc_dump() 98 hdr->regions.region_props[i].region_id); in flcn_acr_desc_dump() 100 hdr->regions.region_props[i].read_mask); in flcn_acr_desc_dump() 102 hdr->regions.region_props[i].write_mask); in flcn_acr_desc_dump() 132 hdr->regions.no_regions); in flcn_acr_desc_v1_dump() 137 hdr->regions.region_props[i].start_addr); in flcn_acr_desc_v1_dump() 139 hdr->regions.region_props[i].end_addr); in flcn_acr_desc_v1_dump() 141 hdr->regions.region_props[i].region_id); in flcn_acr_desc_v1_dump() [all …]
|
/linux/drivers/vfio/platform/ |
A D | vfio_platform_common.c | 147 if (!vdev->regions) in vfio_platform_regions_init() 157 vdev->regions[i].addr = res->start; in vfio_platform_regions_init() 158 vdev->regions[i].size = resource_size(res); in vfio_platform_regions_init() 159 vdev->regions[i].flags = 0; in vfio_platform_regions_init() 166 vdev->regions[i].flags |= in vfio_platform_regions_init() 174 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_platform_regions_init() 175 vdev->regions[i].flags |= in vfio_platform_regions_init() 191 kfree(vdev->regions); in vfio_platform_regions_init() 200 iounmap(vdev->regions[i].ioaddr); in vfio_platform_regions_cleanup() 203 kfree(vdev->regions); in vfio_platform_regions_cleanup() [all …]
|
/linux/mm/ |
A D | memblock.c | 189 type->regions[i].size)) in memblock_overlaps_region() 342 memmove(&type->regions[r], &type->regions[r + 1], in memblock_remove_region() 350 type->regions[0].base = 0; in memblock_remove_region() 351 type->regions[0].size = 0; in memblock_remove_region() 352 type->regions[0].flags = 0; in memblock_remove_region() 466 old_array = type->regions; in memblock_double_array() 467 type->regions = new_array; in memblock_double_array() 1206 r = &type->regions[*idx]; in __next_mem_pfn_range() 1649 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); in memblock_end_of_DRAM() 1789 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); in memblock_search_pfn_nid() [all …]
|
/linux/drivers/vfio/fsl-mc/ |
A D | vfio_fsl_mc.c | 32 if (!vdev->regions) in vfio_fsl_mc_open_device() 36 struct resource *res = &mc_dev->regions[i]; in vfio_fsl_mc_open_device() 39 vdev->regions[i].addr = res->start; in vfio_fsl_mc_open_device() 40 vdev->regions[i].size = resource_size(res); in vfio_fsl_mc_open_device() 41 vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS; in vfio_fsl_mc_open_device() 47 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_fsl_mc_open_device() 48 vdev->regions[i].flags |= in vfio_fsl_mc_open_device() 64 iounmap(vdev->regions[i].ioaddr); in vfio_fsl_mc_regions_cleanup() 65 kfree(vdev->regions); in vfio_fsl_mc_regions_cleanup() 255 region = &vdev->regions[index]; in vfio_fsl_mc_read() [all …]
|
/linux/Documentation/admin-guide/device-mapper/ |
A D | dm-clone.rst | 68 dm-clone divides the source and destination devices in fixed sized regions. 112 of regions being copied, the default being 1 region. 119 batches of this many regions. 170 hydration_threshold <#regions> Maximum number of regions being copied from 176 batches of this many regions. 185 <region size> <#hydrated regions>/<#total regions> <#hydrating regions> 194 #hydrated regions Number of regions that have finished hydrating 195 #total regions Total number of regions to hydrate 196 #hydrating regions Number of regions currently hydrating 219 `hydration_threshold <#regions>` [all …]
|
/linux/Documentation/vm/damon/ |
A D | design.rst | 51 address regions is just wasteful. However, because DAMON can deal with some 52 level of noise using the adaptive regions adjustment mechanism, tracking every 58 distinct regions that cover every mapped area of the address space. The two 59 gaps between the three regions are the two biggest unmapped areas in the given 69 (small mmap()-ed regions and munmap()-ed regions) 97 ``regions update interval``, ``minimum number of regions``, and ``maximum 98 number of regions``. 139 to set the minimum and the maximum number of regions for the trade-off. 148 Even somehow the initial monitoring target regions are well constructed to 157 splits each region into two or three regions if the total number of regions [all …]
|
/linux/Documentation/networking/devlink/ |
A D | devlink-region.rst | 7 ``devlink`` regions enable access to driver defined address regions using 10 Each device can create and register its own supported address regions. The 22 address regions that are otherwise inaccessible to the user. 45 # Show all of the exposed regions with region sizes: 68 As regions are likely very device or driver specific, no generic regions are 70 specific regions a driver supports.
|
/linux/drivers/gpu/drm/i915/gem/selftests/ |
A D | i915_gem_dmabuf.c | 91 struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM]; in igt_dmabuf_import_same_driver_lmem() 144 struct intel_memory_region **regions, in igt_dmabuf_import_same_driver() argument 158 regions, num_regions); in igt_dmabuf_import_same_driver() 202 if (obj->mm.region != i915->mm.regions[INTEL_REGION_SMEM]) { in igt_dmabuf_import_same_driver() 245 struct intel_memory_region *smem = i915->mm.regions[INTEL_REGION_SMEM]; in igt_dmabuf_import_same_driver_smem() 253 struct intel_memory_region *regions[2]; in igt_dmabuf_import_same_driver_lmem_smem() local 255 if (!i915->mm.regions[INTEL_REGION_LMEM]) in igt_dmabuf_import_same_driver_lmem_smem() 258 regions[0] = i915->mm.regions[INTEL_REGION_LMEM]; in igt_dmabuf_import_same_driver_lmem_smem() 259 regions[1] = i915->mm.regions[INTEL_REGION_SMEM]; in igt_dmabuf_import_same_driver_lmem_smem() 260 return igt_dmabuf_import_same_driver(i915, regions, 2); in igt_dmabuf_import_same_driver_lmem_smem()
|
/linux/drivers/soc/qcom/ |
A D | smem.c | 276 struct smem_region regions[]; member 408 header = smem->regions[0].virt_base; in qcom_smem_alloc_global() 493 header = smem->regions[0].virt_base; in qcom_smem_get_global() 501 region = &smem->regions[i]; in qcom_smem_get_global() 681 header = smem->regions[0].virt_base; in qcom_smem_get_sbl_version() 692 ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; in qcom_smem_get_ptable() 919 smem->regions[0].size = rmem->size; in qcom_smem_probe() 938 smem->regions[i].aux_base, in qcom_smem_probe() 939 smem->regions[i].size); in qcom_smem_probe() 940 if (!smem->regions[i].virt_base) { in qcom_smem_probe() [all …]
|
/linux/drivers/net/dsa/sja1105/ |
A D | sja1105_devlink.c | 85 priv->regions = kcalloc(num_regions, sizeof(struct devlink_region *), in sja1105_setup_devlink_regions() 87 if (!priv->regions) in sja1105_setup_devlink_regions() 97 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_setup_devlink_regions() 101 priv->regions[i] = region; in sja1105_setup_devlink_regions() 113 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_teardown_devlink_regions() 115 kfree(priv->regions); in sja1105_teardown_devlink_regions()
|
/linux/drivers/virt/acrn/ |
A D | mm.c | 20 struct vm_memory_region_batch *regions; in modify_region() local 23 regions = kzalloc(sizeof(*regions), GFP_KERNEL); in modify_region() 24 if (!regions) in modify_region() 27 regions->vmid = vm->vmid; in modify_region() 28 regions->regions_num = 1; in modify_region() 29 regions->regions_gpa = virt_to_phys(region); in modify_region() 31 ret = hcall_set_memory_regions(virt_to_phys(regions)); in modify_region() 36 kfree(regions); in modify_region()
|
/linux/Documentation/admin-guide/mm/damon/ |
A D | usage.rst | 47 ``regions update interval``, and min/max number of monitoring target regions by 93 updates the monitoring target regions so that entire memory mappings of target 97 workloads and therefore want to set optimal initial regions for the 'adaptive 98 regions adjustment'. 102 monitoring target regions by themselves. 104 In such cases, users can explicitly set the initial monitoring target regions 122 Note that this sets the initial monitoring target regions only. In case of 124 regions after one ``regions update interval``. Therefore, users should set the 125 ``regions update interval`` large enough in this case, if they don't want the 146 Note that the ranges are closed interval. Bytes for the size of regions [all …]
|
/linux/drivers/net/wireless/ath/ath10k/ |
A D | coredump.c | 1297 .regions = qca6174_hw10_mem_regions, 1306 .regions = qca6174_hw10_mem_regions, 1315 .regions = qca6174_hw10_mem_regions, 1324 .regions = qca6174_hw21_mem_regions, 1333 .regions = qca6174_hw30_mem_regions, 1342 .regions = qca6174_hw30_mem_regions, 1360 .regions = qca6174_hw30_mem_regions, 1369 .regions = qca988x_hw20_mem_regions, 1378 .regions = qca9984_hw10_mem_regions, 1387 .regions = qca9984_hw10_mem_regions, [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ |
A D | gp102.c | 200 desc->regions.no_regions = 2; in gp102_acr_load_load() 201 desc->regions.region_props[0].start_addr = acr->wpr_start >> 8; in gp102_acr_load_load() 202 desc->regions.region_props[0].end_addr = acr->wpr_end >> 8; in gp102_acr_load_load() 203 desc->regions.region_props[0].region_id = 1; in gp102_acr_load_load() 204 desc->regions.region_props[0].read_mask = 0xf; in gp102_acr_load_load() 205 desc->regions.region_props[0].write_mask = 0xc; in gp102_acr_load_load() 206 desc->regions.region_props[0].client_mask = 0x2; in gp102_acr_load_load() 207 desc->regions.region_props[0].shadow_mem_start_addr = in gp102_acr_load_load()
|
/linux/arch/microblaze/mm/ |
A D | init.c | 157 memblock.memory.regions[0].size = memory_size; in mm_cmdline_setup() 200 if ((u32) memblock.memory.regions[0].size < 0x400000) { in mmu_init() 205 if ((u32) memblock.memory.regions[0].size < kernel_tlb) { in mmu_init() 211 memory_start = (u32) memblock.memory.regions[0].base; in mmu_init() 212 lowmem_size = memory_size = (u32) memblock.memory.regions[0].size; in mmu_init()
|
/linux/drivers/bus/fsl-mc/ |
A D | fsl-mc-bus.c | 667 struct resource *regions; in fsl_mc_device_get_mmio_regions() local 686 regions = kmalloc_array(obj_desc->region_count, in fsl_mc_device_get_mmio_regions() 687 sizeof(regions[0]), GFP_KERNEL); in fsl_mc_device_get_mmio_regions() 688 if (!regions) in fsl_mc_device_get_mmio_regions() 715 ®ions[i].start); in fsl_mc_device_get_mmio_regions() 741 regions[i].end = regions[i].start + region_desc.size - 1; in fsl_mc_device_get_mmio_regions() 744 regions[i].flags |= IORESOURCE_MEM; in fsl_mc_device_get_mmio_regions() 747 mc_dev->regions = regions; in fsl_mc_device_get_mmio_regions() 751 kfree(regions); in fsl_mc_device_get_mmio_regions() 772 kfree(mc_dev->regions); in fsl_mc_device_release() [all …]
|
/linux/sound/drivers/opl4/ |
A D | opl4_synth.c | 485 const struct opl4_region_ptr *regions; in snd_opl4_note_on() local 493 regions = &snd_yrw801_regions[i]; in snd_opl4_note_on() 494 for (i = 0; i < regions->count; i++) { in snd_opl4_note_on() 495 if (note >= regions->regions[i].key_min && in snd_opl4_note_on() 496 note <= regions->regions[i].key_max) { in snd_opl4_note_on() 497 sound[voices] = ®ions->regions[i].sound; in snd_opl4_note_on()
|
/linux/arch/mips/generic/ |
A D | yamon-dt.c | 44 const struct yamon_mem_region *regions, in gen_fdt_mem_array() argument 53 for (mr = regions; mr->size && memsize; ++mr) { in gen_fdt_mem_array() 76 const struct yamon_mem_region *regions) in yamon_dt_append_memory() argument 136 mem_entries = gen_fdt_mem_array(regions, mem_array, in yamon_dt_append_memory() 145 mem_entries = gen_fdt_mem_array(regions, mem_array, in yamon_dt_append_memory()
|
/linux/drivers/soc/fsl/dpio/ |
A D | dpio-driver.c | 205 desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start, in dpaa2_dpio_probe() 206 resource_size(&dpio_dev->regions[1]), in dpaa2_dpio_probe() 209 desc.regs_cena = devm_memremap(dev, dpio_dev->regions[2].start, in dpaa2_dpio_probe() 210 resource_size(&dpio_dev->regions[2]), in dpaa2_dpio_probe() 220 desc.regs_cinh = devm_ioremap(dev, dpio_dev->regions[1].start, in dpaa2_dpio_probe() 221 resource_size(&dpio_dev->regions[1])); in dpaa2_dpio_probe()
|
/linux/drivers/fpga/ |
A D | dfl-afu-region.c | 21 INIT_LIST_HEAD(&afu->regions); in afu_mmio_region_init() 25 list_for_each_entry((region), &(afu)->regions, node) 78 list_add(®ion->node, &afu->regions); in afu_mmio_region_add() 100 list_for_each_entry_safe(region, tmp, &afu->regions, node) in afu_mmio_region_destroy()
|