@@ -58,53 +58,50 @@ struct {
58
58
59
59
static bool allow_record (struct task_struct * t )
60
60
{
61
- u32 tgid = t -> tgid ;
62
- u32 pid = t -> pid ;
61
+ u32 tgid = BPF_CORE_READ ( t , tgid ) ;
62
+ u32 pid = BPF_CORE_READ ( t , pid ) ;
63
63
64
64
if (filter_by_tgid && !bpf_map_lookup_elem (& tgids , & tgid ))
65
65
return false;
66
66
if (filter_by_pid && !bpf_map_lookup_elem (& pids , & pid ))
67
67
return false;
68
- if (user_threads_only && t -> flags & PF_KTHREAD )
68
+ if (user_threads_only && ( BPF_CORE_READ ( t , flags ) & PF_KTHREAD ) )
69
69
return false;
70
- else if (kernel_threads_only && !(t -> flags & PF_KTHREAD ))
70
+ else if (kernel_threads_only && !(BPF_CORE_READ ( t , flags ) & PF_KTHREAD ))
71
71
return false;
72
72
if (state != -1 && get_task_state (t ) != state )
73
73
return false;
74
74
return true;
75
75
}
76
76
77
- SEC ("tp_btf/sched_switch" )
78
- int BPF_PROG (sched_switch , bool preempt , struct task_struct * prev , struct task_struct * next )
77
+ static int handle_sched_switch (void * ctx , bool preempt , struct task_struct * prev , struct task_struct * next )
79
78
{
80
79
struct internal_key * i_keyp , i_key ;
81
80
struct val_t * valp , val ;
82
81
s64 delta ;
83
82
u32 pid ;
84
83
85
84
if (allow_record (prev )) {
86
- pid = prev -> pid ;
85
+ pid = BPF_CORE_READ ( prev , pid ) ;
87
86
/* To distinguish idle threads of different cores */
88
87
if (!pid )
89
88
pid = bpf_get_smp_processor_id ();
90
89
i_key .key .pid = pid ;
91
- i_key .key .tgid = prev -> tgid ;
90
+ i_key .key .tgid = BPF_CORE_READ ( prev , tgid ) ;
92
91
i_key .start_ts = bpf_ktime_get_ns ();
93
92
94
- if (prev -> flags & PF_KTHREAD )
93
+ if (BPF_CORE_READ ( prev , flags ) & PF_KTHREAD )
95
94
i_key .key .user_stack_id = -1 ;
96
95
else
97
- i_key .key .user_stack_id =
98
- bpf_get_stackid (ctx , & stackmap ,
99
- BPF_F_USER_STACK );
96
+ i_key .key .user_stack_id = bpf_get_stackid (ctx , & stackmap , BPF_F_USER_STACK );
100
97
i_key .key .kern_stack_id = bpf_get_stackid (ctx , & stackmap , 0 );
101
98
bpf_map_update_elem (& start , & pid , & i_key , 0 );
102
- bpf_probe_read_kernel_str (& val .comm , sizeof (prev -> comm ), prev -> comm );
99
+ bpf_probe_read_kernel_str (& val .comm , sizeof (prev -> comm ), BPF_CORE_READ ( prev , comm ) );
103
100
val .delta = 0 ;
104
101
bpf_map_update_elem (& info , & i_key .key , & val , BPF_NOEXIST );
105
102
}
106
103
107
- pid = next -> pid ;
104
+ pid = BPF_CORE_READ ( next , pid ) ;
108
105
i_keyp = bpf_map_lookup_elem (& start , & pid );
109
106
if (!i_keyp )
110
107
return 0 ;
@@ -124,4 +121,16 @@ int BPF_PROG(sched_switch, bool preempt, struct task_struct *prev, struct task_s
124
121
return 0 ;
125
122
}
126
123
124
+ SEC ("tp_btf/sched_switch" )
125
+ int BPF_PROG (sched_switch , bool preempt , struct task_struct * prev , struct task_struct * next )
126
+ {
127
+ return handle_sched_switch (ctx , preempt , prev , next );
128
+ }
129
+
130
+ SEC ("raw_tp/sched_switch" )
131
+ int BPF_PROG (sched_switch_raw , bool preempt , struct task_struct * prev , struct task_struct * next )
132
+ {
133
+ return handle_sched_switch (ctx , preempt , prev , next );
134
+ }
135
+
127
136
char LICENSE [] SEC ("license" ) = "GPL" ;
0 commit comments