1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
|
#include "monitor_perf.h"
static struct perf_event *pe;
void vm_perf_overflow_callback(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs) {
// handle perf event data
// struct perf_callchain_entry *callchain;
// int nr, i;
pr_info("perf event callback\n");
// 如果 perf_sample_data 有调用堆栈信息
// if (data->callchain) {
// callchain = data->callchain;
// nr = callchain->nr; // 调用堆栈的长度
// // 遍历堆栈条目并处理它们
// for (i = 0; i < nr; i++) {
// // callchain->ip[i] 包含了堆栈的每个条目
// // 在这里可以调用 to_buff 将堆栈信息写入缓冲区
// // to_buff(&callchain->ip[i], sizeof(callchain->ip[i]));
// pr_info("callchain->ip[%d] = %llx\n", i, callchain->ip[i]);
// }
// }
}
// static struct perf_event_attr pea = {
// .type = PERF_TYPE_SOFTWARE, // software event
// .size = sizeof(struct perf_event_attr), // size of attr
// .config = PERF_COUNT_SW_CPU_CLOCK, // no care
// PERF_COUNT_SW_DUMMY PERF_COUNT_SW_CPU_CLOCK .sample_period = 1, // sample
// every 1 event .sample_type =
// PERF_SAMPLE_CALLCHAIN, // sample callchain | means include stacktrace
// // .exclude_kernel = 1, // no kernel stacktrace | may need to change
// after test
// // .exclude_hv = 1, // no hypervisor stacktrace | may need to change
// after test .disabled = 0, // disabled at first
// };
struct perf_event_attr pea = {
.type = PERF_TYPE_SOFTWARE,
.size = sizeof(struct perf_event_attr),
.config = PERF_COUNT_SW_CPU_CLOCK,
.sample_period = 1,
.sample_type = PERF_SAMPLE_CALLCHAIN,
// .disabled = 1,
};
#include <linux/cpumask.h>
#include <linux/smp.h>
/**
* @brief Set the up perf event for task object
*
* @param tsk
*/
void setup_perf_event_for_task(struct task_struct *tsk) {
pr_info("setup_perf_event_for_task: cpu = %d\n", tsk->on_cpu);
if (pe) {
pr_info("Perf event already created\n");
return;
}
int cpu;
struct perf_event **events;
for_each_possible_cpu(cpu) {
struct perf_event **event = per_cpu_ptr(events, cpu);
if (cpu_is_offline(cpu)) {
*event = NULL;
continue;
}
*event = perf_event_create_kernel_counter(&pea, cpu, tsk,
vm_perf_overflow_callback, NULL);
if (IS_ERR(*event)) {
printk(KERN_INFO "create perf event failure\n");
// return -1;
}
}
// pe = perf_event_create_kernel_counter(&pea, tsk->on_cpu, tsk,
// vm_perf_callback, NULL);
pe = perf_event_create_kernel_counter(&pea, tsk->on_cpu, tsk,
vm_perf_overflow_callback, NULL);
if (IS_ERR(pe)) {
pr_info("Error in perf_event_create_kernel_counter\n");
return;
}
// perf_event_enable(pe); // enable perf event
}
/**
* @brief Disable perf event
*
*/
void cleanup_perf_event(void) {
if (pe) {
perf_event_disable(pe);
perf_event_release_kernel(pe);
pe = NULL;
}
}
|