summaryrefslogtreecommitdiff
path: root/source/module/monitor_perf.c
diff options
context:
space:
mode:
authorzy <[email protected]>2023-12-14 02:22:17 -0500
committerzy <[email protected]>2023-12-14 02:22:17 -0500
commita7e4c1e4fa85714a9e062afe5529218871bb959f (patch)
tree967065ff95c1a718a9044d8f4d4bb0b26c1b628d /source/module/monitor_perf.c
parent5d9fd19ddb02326e3d6d9534b899376afe8d9049 (diff)
ucli perf software irq
Diffstat (limited to 'source/module/monitor_perf.c')
-rw-r--r--source/module/monitor_perf.c50
1 files changed, 29 insertions, 21 deletions
diff --git a/source/module/monitor_perf.c b/source/module/monitor_perf.c
index 549d7ac..76df6f3 100644
--- a/source/module/monitor_perf.c
+++ b/source/module/monitor_perf.c
@@ -3,13 +3,15 @@
static struct perf_event *pe;
void vm_perf_overflow_callback(struct perf_event *event,
- struct perf_sample_data *data,
- struct pt_regs *regs) {
+ struct perf_sample_data *data,
+ struct pt_regs *regs) {
// handle perf event data
// struct perf_callchain_entry *callchain;
// int nr, i;
pr_info("perf event callback\n");
+
+ // perf_event_disable(event);
// 如果 perf_sample_data 有调用堆栈信息
// if (data->callchain) {
@@ -42,10 +44,10 @@ void vm_perf_overflow_callback(struct perf_event *event,
struct perf_event_attr pea = {
.type = PERF_TYPE_SOFTWARE,
.size = sizeof(struct perf_event_attr),
- .config = PERF_COUNT_SW_CPU_CLOCK,
+ .config = PERF_COUNT_SW_DUMMY,
.sample_period = 1,
.sample_type = PERF_SAMPLE_CALLCHAIN,
- // .disabled = 1,
+ .disabled = 1,
};
#include <linux/cpumask.h>
@@ -61,32 +63,38 @@ void setup_perf_event_for_task(struct task_struct *tsk) {
pr_info("Perf event already created\n");
return;
}
- int cpu;
- struct perf_event **events;
- for_each_possible_cpu(cpu) {
- struct perf_event **event = per_cpu_ptr(events, cpu);
- if (cpu_is_offline(cpu)) {
- *event = NULL;
- continue;
- }
- *event = perf_event_create_kernel_counter(&pea, cpu, tsk,
- vm_perf_overflow_callback, NULL);
- if (IS_ERR(*event)) {
- printk(KERN_INFO "create perf event failure\n");
- // return -1;
- }
- }
+ // int cpu;
+ // struct perf_event **events;
+ // for_each_possible_cpu(cpu) {
+ // struct perf_event **event = per_cpu_ptr(events, cpu);
+ // if (cpu_is_offline(cpu)) {
+ // pr_info("cpu %d is offline\n", cpu);
+ // *event = NULL;
+ // continue;
+ // }
+ // *event = perf_event_create_kernel_counter(&pea, cpu, tsk,
+ // vm_perf_overflow_callback, NULL);
+
+ // // perf_event_create_kernel_counter(&pea, cpu, tsk,
+ // // vm_perf_overflow_callback,
+ // // NULL);
+ // if (IS_ERR(*event)) {
+ // printk(KERN_INFO "create perf event failure\n");
+ // // return -1;
+ // }
+ // // perf_event_enable(*event);
+ // }
// pe = perf_event_create_kernel_counter(&pea, tsk->on_cpu, tsk,
// vm_perf_callback, NULL);
- pe = perf_event_create_kernel_counter(&pea, tsk->on_cpu, tsk,
+ pe = perf_event_create_kernel_counter(&pea, -1, tsk,
vm_perf_overflow_callback, NULL);
if (IS_ERR(pe)) {
pr_info("Error in perf_event_create_kernel_counter\n");
return;
}
- // perf_event_enable(pe); // enable perf event
+ perf_event_enable(pe); // enable perf event
}
/**