/xen/xen/xsm/ |
A D | dummy.c | 31 set_to_dummy_if_null(ops, set_target); in xsm_fixup_ops() 32 set_to_dummy_if_null(ops, domctl); in xsm_fixup_ops() 33 set_to_dummy_if_null(ops, sysctl); in xsm_fixup_ops() 47 set_to_dummy_if_null(ops, grant_copy); in xsm_fixup_ops() 68 set_to_dummy_if_null(ops, profile); in xsm_fixup_ops() 70 set_to_dummy_if_null(ops, kexec); in xsm_fixup_ops() 106 set_to_dummy_if_null(ops, hypfs_op); in xsm_fixup_ops() 138 set_to_dummy_if_null(ops, do_mca); in xsm_fixup_ops() 141 set_to_dummy_if_null(ops, apic); in xsm_fixup_ops() 150 set_to_dummy_if_null(ops, pmu_op); in xsm_fixup_ops() [all …]
|
A D | xsm_core.c | 71 static inline int verify(struct xsm_operations *ops) in verify() argument 74 if ( !ops ) in verify() 76 xsm_fixup_ops(ops); in verify() 198 int __init register_xsm(struct xsm_operations *ops) in register_xsm() argument 200 if ( verify(ops) ) in register_xsm() 209 xsm_ops = ops; in register_xsm()
|
/xen/xen/arch/x86/guest/ |
A D | hypervisor.c | 46 ops = *fns; in hypervisor_probe() 48 return ops.name; in hypervisor_probe() 53 if ( ops.setup ) in hypervisor_setup() 54 ops.setup(); in hypervisor_setup() 63 if ( ops.ap_setup ) in hypervisor_ap_setup() 64 return ops.ap_setup(); in hypervisor_ap_setup() 71 if ( ops.resume ) in hypervisor_resume() 72 ops.resume(); in hypervisor_resume() 77 if ( ops.e820_fixup ) in hypervisor_e820_fixup() 78 ops.e820_fixup(e820); in hypervisor_e820_fixup() [all …]
|
/xen/xen/common/sched/ |
A D | arinc653.c | 184 const struct scheduler *ops, in find_unit() argument 212 find_unit(ops, in update_schedule_units() 230 const struct scheduler *ops, in arinc653_sched_set() argument 277 update_schedule_units(ops); in arinc653_sched_set() 307 const struct scheduler *ops, in arinc653_sched_get() argument 355 ops->sched_data = prv; in a653sched_init() 372 xfree(SCHED_PRIV(ops)); in a653sched_deinit() 373 ops->sched_data = NULL; in a653sched_deinit() 433 update_schedule_units(ops); in a653sched_alloc_udata() 461 update_schedule_units(ops); in a653sched_free_udata() [all …]
|
A D | rt.c | 234 return ops->sched_data; in rt_priv() 244 return &rt_priv(ops)->runq; in rt_runq() 364 rt_dump_unit(ops, svc); in rt_dump_pcpu() 383 runq = rt_runq(ops); in rt_dump() 385 replq = rt_replq(ops); in rt_dump() 676 rt_init(struct scheduler *ops) in rt_init() argument 694 ops->sched_data = prv; in rt_init() 712 ops->sched_data = NULL; in rt_deinit() 1306 replq_insert(ops, svc); in rt_unit_wake() 1308 runq_insert(ops, svc); in rt_unit_wake() [all …]
|
A D | null.c | 114 return ops->sched_data; in null_priv() 133 static int null_init(struct scheduler *ops) in null_init() argument 150 ops->sched_data = prv; in null_init() 155 static void null_deinit(struct scheduler *ops) in null_deinit() argument 157 xfree(ops->sched_data); in null_deinit() 158 ops->sched_data = NULL; in null_deinit() 171 struct null_private *prv = null_priv(ops); in null_deinit_pdata() 223 struct null_private *prv = null_priv(ops); in null_alloc_domdata() 243 struct null_private *prv = null_priv(ops); in null_free_domdata() 456 struct null_private *prv = null_priv(ops); in null_unit_insert() [all …]
|
A D | credit2.c | 600 return ops->sched_data; in csched2_priv() 2282 runq_tickle(ops, svc, now); in csched2_unit_wake() 2327 runq_tickle(ops, svc, now); in csched2_context_saved() 2892 migrate(ops, svc, trqd, now); in csched2_unit_migrate() 2899 const struct scheduler *ops, in csched2_dom_cntl() argument 3859 dump_pcpu(ops, j); in csched2_dump() 4123 csched2_init(struct scheduler *ops) in csched2_init() argument 4154 ops->sched_data = prv; in csched2_init() 4171 csched2_deinit(struct scheduler *ops) in csched2_deinit() argument 4175 prv = csched2_priv(ops); in csched2_deinit() [all …]
|
A D | credit.c | 530 struct csched_private *prv = CSCHED_PRIV(ops); in csched_deinit_pdata() 926 const struct scheduler *ops = sr->scheduler; in csched_unit_acct() local 1048 struct csched_private *prv = CSCHED_PRIV(ops); in csched_unit_remove() 1170 const struct scheduler *ops, in csched_dom_cntl() argument 1242 csched_sys_cntl(const struct scheduler *ops, in csched_sys_cntl() argument 2085 csched_dump(const struct scheduler *ops) in csched_dump() argument 2179 csched_init(struct scheduler *ops) in csched_init() argument 2203 ops->sched_data = prv; in csched_init() 2221 csched_deinit(struct scheduler *ops) in csched_deinit() argument 2225 prv = CSCHED_PRIV(ops); in csched_deinit() [all …]
|
/xen/xen/arch/x86/hvm/ |
A D | intercept.c | 119 const struct hvm_io_ops *ops = handler->ops; in hvm_process_io_intercept() local 234 const struct hvm_io_ops *ops = handler->ops; in hvm_find_io_handler() local 259 ops = handler->ops; in hvm_io_intercept() 260 if ( ops->complete != NULL ) in hvm_io_intercept() 261 ops->complete(handler); in hvm_io_intercept() 290 handler->ops = &mmio_ops; in register_mmio_handler() 291 handler->mmio.ops = ops; in register_mmio_handler() 303 handler->ops = &portio_ops; in register_portio_handler() 349 ops = handler->ops; in hvm_mmio_internal() 350 if ( ops->complete != NULL ) in hvm_mmio_internal() [all …]
|
/xen/tools/libfsimage/common/ |
A D | fsimage.c | 95 fsi_plugin_ops_t *ops; in fsi_open_file() local 99 ops = fsi->f_plugin->fp_ops; in fsi_open_file() 100 ffi = ops->fpo_open(fsi, path); in fsi_open_file() 108 fsi_plugin_ops_t *ops; in fsi_close_file() local 112 ops = ffi->ff_fsi->f_plugin->fp_ops; in fsi_close_file() 113 err = ops->fpo_close(ffi); in fsi_close_file() 121 fsi_plugin_ops_t *ops; in fsi_read_file() local 125 ops = ffi->ff_fsi->f_plugin->fp_ops; in fsi_read_file() 126 ret = ops->fpo_read(ffi, buf, nbytes); in fsi_read_file() 134 fsi_plugin_ops_t *ops; in fsi_pread_file() local [all …]
|
A D | fsimage_grub.c | 226 fsig_plugin_ops_t *ops = fsi->f_plugin->fp_data; in fsig_mount() local 241 if (!ops->fpo_mount(ffi, options)) { in fsig_mount() 265 fsig_plugin_ops_t *ops = fsi->f_plugin->fp_data; in fsig_open() local 272 if (ops->fpo_dir(ffi, path) == 0) { in fsig_open() 286 fsig_plugin_ops_t *ops = ffi->ff_fsi->f_plugin->fp_data; in fsig_pread() local 299 return (ops->fpo_read(ffi, buf, nbytes)); in fsig_pread() 332 fsig_init(fsi_plugin_t *plugin, fsig_plugin_ops_t *ops) in fsig_init() argument 334 if (ops->fpo_version > FSIMAGE_PLUGIN_VERSION) in fsig_init() 337 plugin->fp_data = ops; in fsig_init()
|
/xen/xen/include/asm-x86/ |
A D | iommu.h | 64 # define iommu_call(ops, fn, args...) ({ \ argument 65 (void)(ops); \ 69 # define iommu_vcall(ops, fn, args...) ({ \ argument 70 (void)(ops); \ 82 const struct iommu_ops *ops; member 121 const struct iommu_ops *ops = iommu_get_ops(); \ 123 if ( ops->sync_cache ) \ 124 iommu_vcall(ops, sync_cache, addr, size); \
|
/xen/xen/arch/x86/x86_emulate/ |
A D | x86_emulate.c | 1833 if ( ops->read_cr && get_cpl(ctxt, ops) == 3 ) \ 3948 fail_if(lock_prefix ? !ops->cmpxchg : !ops->write); in x86_emulate() 4259 if ( nr_reps == 1 && ops->read_io && ops->write ) in x86_emulate() 4270 fail_if(!ops->read_io || !ops->write); in x86_emulate() 4480 fail_if(!ops->read_segment || !ops->write); in x86_emulate() 5721 if ( !ops->read_cr || !ops->read_xcr || in x86_emulate() 5734 if ( !ops->read_cr || !ops->write_xcr || in x86_emulate() 5818 fail_if(!ops->read_segment || !ops->read_msr || in x86_emulate() 5819 !ops->write_segment || !ops->write_msr); in x86_emulate() 5892 fail_if(!ops->read_segment || !ops->write); in x86_emulate() [all …]
|
/xen/xen/arch/arm/tee/ |
A D | tee.c | 38 return cur_mediator->ops->handle_call(regs); in tee_handle_call() 52 return cur_mediator->ops->domain_init(d); in tee_domain_init() 60 return cur_mediator->ops->relinquish_resources(d); in tee_relinquish_resources() 78 if ( desc->ops->probe() ) in tee_init()
|
/xen/xen/drivers/passthrough/arm/ |
A D | iommu.c | 37 void __init iommu_set_ops(const struct iommu_ops *ops) in iommu_set_ops() argument 39 BUG_ON(ops == NULL); in iommu_set_ops() 41 if ( iommu_ops && iommu_ops != ops ) in iommu_set_ops() 47 iommu_ops = ops; in iommu_set_ops()
|
/xen/tools/libxc/ |
A D | xc_resource.c | 46 static int xc_resource_op_multi(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops) in xc_resource_op_multi() argument 78 op = ops + i; in xc_resource_op_multi() 114 op = ops + i; in xc_resource_op_multi() 132 int xc_resource_op(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops) in xc_resource_op() argument 135 return xc_resource_op_one(xch, ops); in xc_resource_op() 138 return xc_resource_op_multi(xch, nr_ops, ops); in xc_resource_op()
|
A D | xc_sr_restore.c | 187 ctx->restore.ops.set_gfn(ctx, pfns[i], mfns[i]); in populate_pfns() 234 ctx->restore.ops.set_page_type(ctx, pfns[i], types[i]); in process_page_data() 292 rc = ctx->restore.ops.localise_page(ctx, types[i], page_data); in process_page_data() 402 if ( !ctx->restore.ops.pfn_is_valid(ctx, pfn) ) in handle_page_data() 600 rc = ctx->restore.ops.stream_complete(ctx); in handle_checkpoint() 673 rc = ctx->restore.ops.static_data_complete(ctx, &missing); in handle_static_data_end() 713 rc = ctx->restore.ops.process_record(ctx, rec); in process_record() 743 rc = ctx->restore.ops.setup(ctx); in setup() 788 if ( ctx->restore.ops.cleanup(ctx) ) in cleanup() 863 rc = ctx->restore.ops.stream_complete(ctx); in restore() [all …]
|
A D | xc_sr_save.c | 129 types[i] = mfns[i] = ctx->save.ops.pfn_to_gfn(ctx, in write_batch() 405 return ctx->save.ops.check_vm_state(ctx); in send_dirty_pages() 804 rc = ctx->save.ops.setup(ctx); in setup() 839 if ( ctx->save.ops.cleanup(ctx) ) in cleanup() 869 rc = ctx->save.ops.static_data(ctx); in save() 877 rc = ctx->save.ops.start_of_stream(ctx); in save() 882 rc = ctx->save.ops.start_of_checkpoint(ctx); in save() 886 rc = ctx->save.ops.check_vm_state(ctx); in save() 908 rc = ctx->save.ops.end_of_checkpoint(ctx); in save() 1041 ctx.save.ops = save_ops_x86_hvm; in xc_domain_save() [all …]
|
/xen/tools/libxl/ |
A D | libxl_checkpoint_device.c | 133 dev->ops = dev->cds->ops[++dev->ops_index]; in device_setup_iterate() 134 if (!dev->ops) { in device_setup_iterate() 160 } while (dev->ops->kind != dev->kind); in device_setup_iterate() 164 dev->ops->setup(egc,dev); in device_setup_iterate() 196 if (!dev->ops || !dev->matched) in libxl__checkpoint_devices_teardown() 200 dev->ops->teardown(egc,dev); in libxl__checkpoint_devices_teardown() 252 if (!dev->matched || !dev->ops->api) \ 255 dev->ops->api(egc,dev); \
|
/xen/xen/drivers/passthrough/x86/ |
A D | iommu.c | 55 iommu_ops = *iommu_init_ops->ops; in iommu_hardware_setup() 58 ASSERT(iommu_ops.init == iommu_init_ops->ops->init); in iommu_hardware_setup() 100 iommu_ops = *iommu_init_ops->ops; in iommu_enable_x2apic() 124 const struct iommu_ops *ops = iommu_get_ops(); in iommu_setup_hpet_msi() local 125 return ops->setup_hpet_msi ? ops->setup_hpet_msi(msi) : -ENODEV; in iommu_setup_hpet_msi()
|
/xen/xen/drivers/passthrough/ |
A D | device_tree.c | 132 const struct iommu_ops *ops = iommu_get_ops(); in iommu_add_dt_device() local 140 if ( !ops ) in iommu_add_dt_device() 157 if ( !ops->add_device || !ops->dt_xlate ) in iommu_add_dt_device() 187 rc = ops->dt_xlate(dev, &iommu_spec); in iommu_add_dt_device() 199 rc = ops->add_device(0, dev); in iommu_add_dt_device()
|
/xen/xen/arch/arm/ |
A D | io.c | 43 if ( !handler->ops->read(v, info, &r, handler->priv) ) in handle_read() 75 ret = handler->ops->write(v, info, get_user_reg(regs, dabt.reg), in handle_write() 156 const struct mmio_handler_ops *ops, in register_mmio_handler() argument 168 handler->ops = ops; in register_mmio_handler()
|
/xen/tools/ocaml/libs/xs/ |
A D | xst.mli | 16 type ops = { type 29 val get_operations : int -> Xsraw.con -> ops 30 val transaction : Xsraw.con -> (ops -> 'a) -> 'a
|
/xen/xen/include/asm-arm/tee/ |
A D | tee.h | 54 const struct tee_mediator_ops *ops; member 72 .ops = _ops, \
|
/xen/xen/include/asm-arm/ |
A D | mmio.h | 55 const struct mmio_handler_ops *ops; member 70 const struct mmio_handler_ops *ops,
|