Lines Matching refs:box
133 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) in uncore_msr_read_counter() argument
142 void uncore_mmio_exit_box(struct intel_uncore_box *box) in uncore_mmio_exit_box() argument
144 if (box->io_addr) in uncore_mmio_exit_box()
145 iounmap(box->io_addr); in uncore_mmio_exit_box()
148 u64 uncore_mmio_read_counter(struct intel_uncore_box *box, in uncore_mmio_read_counter() argument
151 if (!box->io_addr) in uncore_mmio_read_counter()
154 if (!uncore_mmio_is_valid_offset(box, event->hw.event_base)) in uncore_mmio_read_counter()
157 return readq(box->io_addr + event->hw.event_base); in uncore_mmio_read_counter()
164 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) in uncore_get_constraint() argument
178 (!uncore_box_is_fake(box) && reg1->alloc)) in uncore_get_constraint()
181 er = &box->shared_regs[reg1->idx]; in uncore_get_constraint()
193 if (!uncore_box_is_fake(box)) in uncore_get_constraint()
201 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) in uncore_put_constraint() argument
214 if (uncore_box_is_fake(box) || !reg1->alloc) in uncore_put_constraint()
217 er = &box->shared_regs[reg1->idx]; in uncore_put_constraint()
222 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) in uncore_shared_reg_config() argument
228 er = &box->shared_regs[idx]; in uncore_shared_reg_config()
237 static void uncore_assign_hw_event(struct intel_uncore_box *box, in uncore_assign_hw_event() argument
243 hwc->last_tag = ++box->tags[idx]; in uncore_assign_hw_event()
246 hwc->event_base = uncore_fixed_ctr(box); in uncore_assign_hw_event()
247 hwc->config_base = uncore_fixed_ctl(box); in uncore_assign_hw_event()
251 hwc->config_base = uncore_event_ctl(box, hwc->idx); in uncore_assign_hw_event()
252 hwc->event_base = uncore_perf_ctr(box, hwc->idx); in uncore_assign_hw_event()
255 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) in uncore_perf_event_update() argument
261 shift = 64 - uncore_freerunning_bits(box, event); in uncore_perf_event_update()
263 shift = 64 - uncore_fixed_ctr_bits(box); in uncore_perf_event_update()
265 shift = 64 - uncore_perf_ctr_bits(box); in uncore_perf_event_update()
270 new_count = uncore_read_counter(box, event); in uncore_perf_event_update()
287 struct intel_uncore_box *box; in uncore_pmu_hrtimer() local
292 box = container_of(hrtimer, struct intel_uncore_box, hrtimer); in uncore_pmu_hrtimer()
293 if (!box->n_active || box->cpu != smp_processor_id()) in uncore_pmu_hrtimer()
305 list_for_each_entry(event, &box->active_list, active_entry) { in uncore_pmu_hrtimer()
306 uncore_perf_event_update(box, event); in uncore_pmu_hrtimer()
309 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) in uncore_pmu_hrtimer()
310 uncore_perf_event_update(box, box->events[bit]); in uncore_pmu_hrtimer()
314 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); in uncore_pmu_hrtimer()
318 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box) in uncore_pmu_start_hrtimer() argument
320 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), in uncore_pmu_start_hrtimer()
324 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) in uncore_pmu_cancel_hrtimer() argument
326 hrtimer_cancel(&box->hrtimer); in uncore_pmu_cancel_hrtimer()
329 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) in uncore_pmu_init_hrtimer() argument
331 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in uncore_pmu_init_hrtimer()
332 box->hrtimer.function = uncore_pmu_hrtimer; in uncore_pmu_init_hrtimer()
339 struct intel_uncore_box *box; in uncore_alloc_box() local
341 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg); in uncore_alloc_box()
343 box = kzalloc_node(size, GFP_KERNEL, node); in uncore_alloc_box()
344 if (!box) in uncore_alloc_box()
348 raw_spin_lock_init(&box->shared_regs[i].lock); in uncore_alloc_box()
350 uncore_pmu_init_hrtimer(box); in uncore_alloc_box()
351 box->cpu = -1; in uncore_alloc_box()
352 box->dieid = -1; in uncore_alloc_box()
355 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; in uncore_alloc_box()
357 INIT_LIST_HEAD(&box->active_list); in uncore_alloc_box()
359 return box; in uncore_alloc_box()
368 static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event) in is_box_event() argument
370 return &box->pmu->pmu == event->pmu; in is_box_event()
374 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, in uncore_collect_events() argument
380 max_count = box->pmu->type->num_counters; in uncore_collect_events()
381 if (box->pmu->type->fixed_ctl) in uncore_collect_events()
384 if (box->n_events >= max_count) in uncore_collect_events()
387 n = box->n_events; in uncore_collect_events()
389 if (is_box_event(box, leader)) { in uncore_collect_events()
390 box->event_list[n] = leader; in uncore_collect_events()
398 if (!is_box_event(box, event) || in uncore_collect_events()
405 box->event_list[n] = event; in uncore_collect_events()
412 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event) in uncore_get_event_constraint() argument
414 struct intel_uncore_type *type = box->pmu->type; in uncore_get_event_constraint()
418 c = type->ops->get_constraint(box, event); in uncore_get_event_constraint()
436 static void uncore_put_event_constraint(struct intel_uncore_box *box, in uncore_put_event_constraint() argument
439 if (box->pmu->type->ops->put_constraint) in uncore_put_event_constraint()
440 box->pmu->type->ops->put_constraint(box, event); in uncore_put_event_constraint()
443 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n) in uncore_assign_events() argument
453 c = uncore_get_event_constraint(box, box->event_list[i]); in uncore_assign_events()
454 box->event_constraint[i] = c; in uncore_assign_events()
461 hwc = &box->event_list[i]->hw; in uncore_assign_events()
462 c = box->event_constraint[i]; in uncore_assign_events()
482 ret = perf_assign_events(box->event_constraint, n, in uncore_assign_events()
487 uncore_put_event_constraint(box, box->event_list[i]); in uncore_assign_events()
494 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_start() local
507 list_add_tail(&event->active_entry, &box->active_list); in uncore_pmu_event_start()
509 uncore_read_counter(box, event)); in uncore_pmu_event_start()
510 if (box->n_active++ == 0) in uncore_pmu_event_start()
511 uncore_pmu_start_hrtimer(box); in uncore_pmu_event_start()
519 box->events[idx] = event; in uncore_pmu_event_start()
520 box->n_active++; in uncore_pmu_event_start()
521 __set_bit(idx, box->active_mask); in uncore_pmu_event_start()
523 local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); in uncore_pmu_event_start()
524 uncore_enable_event(box, event); in uncore_pmu_event_start()
526 if (box->n_active == 1) in uncore_pmu_event_start()
527 uncore_pmu_start_hrtimer(box); in uncore_pmu_event_start()
532 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_stop() local
538 if (--box->n_active == 0) in uncore_pmu_event_stop()
539 uncore_pmu_cancel_hrtimer(box); in uncore_pmu_event_stop()
540 uncore_perf_event_update(box, event); in uncore_pmu_event_stop()
544 if (__test_and_clear_bit(hwc->idx, box->active_mask)) { in uncore_pmu_event_stop()
545 uncore_disable_event(box, event); in uncore_pmu_event_stop()
546 box->n_active--; in uncore_pmu_event_stop()
547 box->events[hwc->idx] = NULL; in uncore_pmu_event_stop()
551 if (box->n_active == 0) in uncore_pmu_event_stop()
552 uncore_pmu_cancel_hrtimer(box); in uncore_pmu_event_stop()
560 uncore_perf_event_update(box, event); in uncore_pmu_event_stop()
567 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_add() local
572 if (!box) in uncore_pmu_event_add()
586 ret = n = uncore_collect_events(box, event, false); in uncore_pmu_event_add()
594 ret = uncore_assign_events(box, assign, n); in uncore_pmu_event_add()
599 for (i = 0; i < box->n_events; i++) { in uncore_pmu_event_add()
600 event = box->event_list[i]; in uncore_pmu_event_add()
604 hwc->last_tag == box->tags[assign[i]]) in uncore_pmu_event_add()
618 event = box->event_list[i]; in uncore_pmu_event_add()
622 hwc->last_tag != box->tags[assign[i]]) in uncore_pmu_event_add()
623 uncore_assign_hw_event(box, event, assign[i]); in uncore_pmu_event_add()
624 else if (i < box->n_events) in uncore_pmu_event_add()
632 box->n_events = n; in uncore_pmu_event_add()
639 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_del() local
652 for (i = 0; i < box->n_events; i++) { in uncore_pmu_event_del()
653 if (event == box->event_list[i]) { in uncore_pmu_event_del()
654 uncore_put_event_constraint(box, event); in uncore_pmu_event_del()
656 for (++i; i < box->n_events; i++) in uncore_pmu_event_del()
657 box->event_list[i - 1] = box->event_list[i]; in uncore_pmu_event_del()
659 --box->n_events; in uncore_pmu_event_del()
670 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_read() local
671 uncore_perf_event_update(box, event); in uncore_pmu_event_read()
720 struct intel_uncore_box *box; in uncore_pmu_event_init() local
742 box = uncore_pmu_to_box(pmu, event->cpu); in uncore_pmu_event_init()
743 if (!box || box->cpu < 0) in uncore_pmu_event_init()
745 event->cpu = box->cpu; in uncore_pmu_event_init()
746 event->pmu_private = box; in uncore_pmu_event_init()
770 if (!check_valid_freerunning_event(box, event)) in uncore_pmu_event_init()
779 event->hw.event_base = uncore_freerunning_counter(box, event); in uncore_pmu_event_init()
784 ret = pmu->type->ops->hw_config(box, event); in uncore_pmu_event_init()
801 struct intel_uncore_box *box; in uncore_pmu_enable() local
805 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); in uncore_pmu_enable()
806 if (!box) in uncore_pmu_enable()
810 uncore_pmu->type->ops->enable_box(box); in uncore_pmu_enable()
816 struct intel_uncore_box *box; in uncore_pmu_disable() local
820 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); in uncore_pmu_disable()
821 if (!box) in uncore_pmu_disable()
825 uncore_pmu->type->ops->disable_box(box); in uncore_pmu_disable()
1126 struct intel_uncore_box *box; in uncore_pci_pmu_register() local
1132 box = uncore_alloc_box(type, NUMA_NO_NODE); in uncore_pci_pmu_register()
1133 if (!box) in uncore_pci_pmu_register()
1141 atomic_inc(&box->refcnt); in uncore_pci_pmu_register()
1142 box->dieid = die; in uncore_pci_pmu_register()
1143 box->pci_dev = pdev; in uncore_pci_pmu_register()
1144 box->pmu = pmu; in uncore_pci_pmu_register()
1145 uncore_box_init(box); in uncore_pci_pmu_register()
1147 pmu->boxes[die] = box; in uncore_pci_pmu_register()
1155 uncore_box_exit(box); in uncore_pci_pmu_register()
1156 kfree(box); in uncore_pci_pmu_register()
1217 struct intel_uncore_box *box = pmu->boxes[die]; in uncore_pci_pmu_unregister() local
1222 uncore_box_exit(box); in uncore_pci_pmu_unregister()
1223 kfree(box); in uncore_pci_pmu_unregister()
1228 struct intel_uncore_box *box; in uncore_pci_remove() local
1235 box = pci_get_drvdata(pdev); in uncore_pci_remove()
1236 if (!box) { in uncore_pci_remove()
1247 pmu = box->pmu; in uncore_pci_remove()
1439 struct intel_uncore_box *box; in uncore_change_type_ctx() local
1444 box = pmu->boxes[die]; in uncore_change_type_ctx()
1445 if (!box) in uncore_change_type_ctx()
1449 WARN_ON_ONCE(box->cpu != -1); in uncore_change_type_ctx()
1450 box->cpu = new_cpu; in uncore_change_type_ctx()
1454 WARN_ON_ONCE(box->cpu != old_cpu); in uncore_change_type_ctx()
1455 box->cpu = -1; in uncore_change_type_ctx()
1459 uncore_pmu_cancel_hrtimer(box); in uncore_change_type_ctx()
1461 box->cpu = new_cpu; in uncore_change_type_ctx()
1476 struct intel_uncore_box *box; in uncore_box_unref() local
1483 box = pmu->boxes[id]; in uncore_box_unref()
1484 if (box && atomic_dec_return(&box->refcnt) == 0) in uncore_box_unref()
1485 uncore_box_exit(box); in uncore_box_unref()
1521 struct intel_uncore_box *box, *tmp; in allocate_boxes() local
1534 box = uncore_alloc_box(type, cpu_to_node(cpu)); in allocate_boxes()
1535 if (!box) in allocate_boxes()
1537 box->pmu = pmu; in allocate_boxes()
1538 box->dieid = die; in allocate_boxes()
1539 list_add(&box->active_list, &allocated); in allocate_boxes()
1543 list_for_each_entry_safe(box, tmp, &allocated, active_list) { in allocate_boxes()
1544 list_del_init(&box->active_list); in allocate_boxes()
1545 box->pmu->boxes[die] = box; in allocate_boxes()
1550 list_for_each_entry_safe(box, tmp, &allocated, active_list) { in allocate_boxes()
1551 list_del_init(&box->active_list); in allocate_boxes()
1552 kfree(box); in allocate_boxes()
1562 struct intel_uncore_box *box; in uncore_box_ref() local
1573 box = pmu->boxes[id]; in uncore_box_ref()
1574 if (box && atomic_inc_return(&box->refcnt) == 1) in uncore_box_ref()
1575 uncore_box_init(box); in uncore_box_ref()