summaryrefslogtreecommitdiff
path: root/SOURCE/module/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'SOURCE/module/kernel')
-rwxr-xr-xSOURCE/module/kernel/load.c17
-rw-r--r--SOURCE/module/kernel/mutex.c23
-rwxr-xr-xSOURCE/module/kernel/sys_delay.c22
3 files changed, 57 insertions, 5 deletions
diff --git a/SOURCE/module/kernel/load.c b/SOURCE/module/kernel/load.c
index ce34668..9226117 100755
--- a/SOURCE/module/kernel/load.c
+++ b/SOURCE/module/kernel/load.c
@@ -27,6 +27,7 @@
#include <linux/rbtree.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
+#include <asm-generic/rwonce.h> // for READ_ONCE(x)
#include <asm/irq_regs.h>
@@ -79,6 +80,12 @@ void diag_load_timer(struct diag_percpu_context *context)
}
#else
+// https://www.spinics.net/lists/kernel/msg3582022.html
+// fun remove from 5.8.rc3, just for build to add it here
+#define task_contributes_to_load2(task) ((READ_ONCE(task->__state) & TASK_UNINTERRUPTIBLE) != 0 && \
+ (task->flags & PF_FROZEN) == 0 && \
+ (READ_ONCE(task->__state) & TASK_NOLOAD) == 0)
+
static void load_monitor_ipi(void *ignore)
{
struct load_monitor_cpu_run *cpu_run;
@@ -144,7 +151,7 @@ void diag_load_timer(struct diag_percpu_context *context)
rcu_read_lock();
do_each_thread(g, p) {
- if (task_contributes_to_load(p))
+ if (task_contributes_to_load2(p))
nr_uninterrupt++;
} while_each_thread(g, p);
@@ -206,8 +213,12 @@ void diag_load_timer(struct diag_percpu_context *context)
diag_variant_buffer_seal(&load_monitor_variant_buffer);
diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
do_each_thread(g, p) {
- if ((p->state == TASK_RUNNING)
- || task_contributes_to_load(p)) {
+// [[PATCH v2 7/7] sched: Change task_struct::state](https://lore.kernel.org/all/[email protected]/#r)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0)
+ if ((p->state == TASK_RUNNING) || task_contributes_to_load2(p)) {
+#else
+ if ((p->__state == TASK_RUNNING) || task_contributes_to_load2(p)) {
+#endif
tsk_info.et_type = et_load_monitor_task;
tsk_info.id = event_id;
tsk_info.tv = detail.tv;
diff --git a/SOURCE/module/kernel/mutex.c b/SOURCE/module/kernel/mutex.c
index 8ee8b97..5143765 100644
--- a/SOURCE/module/kernel/mutex.c
+++ b/SOURCE/module/kernel/mutex.c
@@ -430,13 +430,22 @@ static int __activate_mutex_monitor(void)
hook_tracepoint("sched_process_exit", trace_sched_process_exit_hit, NULL);
}
//get_argv_processes(&mm_tree);
-
+//https://lore.kernel.org/lkml/[email protected]/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
get_online_cpus();
+#else
+ cpus_read_lock();
+#endif
new_mutex_lock(orig_text_mutex);
JUMP_INSTALL(mutex_lock);
JUMP_INSTALL(mutex_unlock);
new_mutex_unlock(orig_text_mutex);
+//https://lore.kernel.org/lkml/[email protected]/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
put_online_cpus();
+#else
+ cpus_read_unlock();
+#endif
return 1;
out_variant_buffer:
@@ -452,14 +461,24 @@ static void __deactivate_mutex_monitor(void)
unhook_tracepoint("sched_process_exit", trace_sched_process_exit_hit, NULL);
}
+//https://lore.kernel.org/lkml/[email protected]/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
get_online_cpus();
+#else
+ cpus_read_lock();
+#endif
new_mutex_lock(orig_text_mutex);
JUMP_REMOVE(mutex_lock);
JUMP_REMOVE(mutex_unlock);
new_mutex_unlock(orig_text_mutex);
+//https://lore.kernel.org/lkml/[email protected]/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
put_online_cpus();
+#else
+ cpus_read_unlock();
+#endif
clean_data();
@@ -633,6 +652,8 @@ static int lookup_syms(void)
if (orig___mutex_unlock_slowpath == NULL)
orig___mutex_unlock_slowpath = (void *)diag_kallsyms_lookup_name("__mutex_unlock_slowpath.isra.19");
if (orig___mutex_unlock_slowpath == NULL)
+ orig___mutex_unlock_slowpath = (void *)diag_kallsyms_lookup_name("__mutex_unlock_slowpath.isra.24");
+ if (orig___mutex_unlock_slowpath == NULL)
orig___mutex_unlock_slowpath = (void *)diag_kallsyms_lookup_name("__mutex_unlock_slowpath");
if (orig___mutex_unlock_slowpath == NULL)
orig___mutex_unlock_slowpath = (void *)diag_kallsyms_lookup_name("__mutex_unlock_slowpath.constprop.0");
diff --git a/SOURCE/module/kernel/sys_delay.c b/SOURCE/module/kernel/sys_delay.c
index daa915f..8ea674c 100755
--- a/SOURCE/module/kernel/sys_delay.c
+++ b/SOURCE/module/kernel/sys_delay.c
@@ -326,11 +326,21 @@ static int __activate_sys_delay(void)
}
//get_argv_processes(&mm_tree);
+//https://lore.kernel.org/lkml/[email protected]/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
get_online_cpus();
+#else
+ cpus_read_lock();
+#endif
mutex_lock(orig_text_mutex);
JUMP_INSTALL(_cond_resched);
mutex_unlock(orig_text_mutex);
+//https://lore.kernel.org/lkml/[email protected]/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
put_online_cpus();
+#else
+ cpus_read_unlock();
+#endif
return 1;
out_variant_buffer:
@@ -362,11 +372,21 @@ static void __deactivate_sys_delay(void)
unhook_tracepoint("sched_process_exit", trace_sched_process_exit_hit, NULL);
}
+//https://lore.kernel.org/lkml/[email protected]/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
get_online_cpus();
+#else
+ cpus_read_lock();
+#endif
mutex_lock(orig_text_mutex);
JUMP_REMOVE(_cond_resched);
mutex_unlock(orig_text_mutex);
+//https://lore.kernel.org/lkml/[email protected]/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
put_online_cpus();
+#else
+ cpus_read_unlock();
+#endif
synchronize_sched();
msleep(10);
@@ -507,7 +527,7 @@ long diag_ioctl_sys_delay(unsigned int cmd, unsigned long arg)
static int lookup_syms(void)
{
- LOOKUP_SYMS(_cond_resched);
+ // LOOKUP_SYMS(_cond_resched);
#if KERNEL_VERSION(4, 4, 0) <= LINUX_VERSION_CODE
LOOKUP_SYMS(__schedule);
#else