summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYour Name <[email protected]>2023-06-27 01:52:11 +0000
committerYour Name <[email protected]>2023-06-27 01:52:11 +0000
commit4e3f6eaa7a6b2885d5547ba2dbc948544ad4fe1d (patch)
treebe0321f079b8d19f0de759f8a6a1bfd407f0b55a
parent4ca20702c9e9438bdc3736ff3db94aa28ed88297 (diff)
许多注释
-rw-r--r--arch/x86/include/asm/uintr.h27
-rw-r--r--arch/x86/kernel/uintr_core.c317
-rw-r--r--arch/x86/kernel/uintr_fd.c155
3 files changed, 264 insertions, 235 deletions
diff --git a/arch/x86/include/asm/uintr.h b/arch/x86/include/asm/uintr.h
index 64113ef523ca..5df694c79fc9 100644
--- a/arch/x86/include/asm/uintr.h
+++ b/arch/x86/include/asm/uintr.h
@@ -5,6 +5,7 @@
#ifdef CONFIG_X86_USER_INTERRUPTS
/* User Posted Interrupt Descriptor (UPID) */
+/* 接收方 */
struct uintr_upid {
struct {
u8 status; /* bit 0: ON, bit 1: SN, bit 2-7: reserved */
@@ -13,7 +14,7 @@ struct uintr_upid {
u8 reserved2; /* Reserved */
u32 ndst; /* Notification destination */
} nc __packed; /* Notification control */
- u64 puir; /* Posted user interrupt requests */
+ u64 puir; /* Posted user interrupt requests */ /*pir 发送方向量*/
} __aligned(64);
/* UPID Notification control status */
@@ -22,25 +23,25 @@ struct uintr_upid {
struct uintr_upid_ctx {
struct list_head node;
- struct task_struct *task; /* Receiver task */
- struct uintr_upid *upid;
- refcount_t refs;
+ struct task_struct *task; /* Receiver task */ // 接收方进程
+ struct uintr_upid *upid; // 接收方的 UPID
+ refcount_t refs; // 线程安全的引用计数
bool receiver_active; /* Flag for UPID being mapped to a receiver */
- bool waiting;
+ bool waiting; // 是否在等待
};
struct uintr_receiver_info {
- struct uintr_upid_ctx *upid_ctx; /* UPID context */
- struct callback_head twork; /* Task work head */
- u64 uvec; /* Vector number */
+ struct uintr_upid_ctx *upid_ctx; /* UPID context */ /* UPID 上下文 */
+ struct callback_head twork; /* Task work head */ // 回调
+ u64 uvec; /* Vector number */ // 中断向量计数
};
struct uintr_sender_info {
- struct list_head node;
- struct uintr_uitt_ctx *uitt_ctx;
- struct task_struct *task;
- struct uintr_upid_ctx *r_upid_ctx; /* Receiver's UPID context */
- struct callback_head twork; /* Task work head */
+ struct list_head node; //
+ struct uintr_uitt_ctx *uitt_ctx; // uitte 上下文
+ struct task_struct *task; // 发送方进程
+ struct uintr_upid_ctx *r_upid_ctx; /* Receiver's UPID context */// 接收方的 UPID 上下文
+ struct callback_head twork; /* Task work head */ // 回调
unsigned int uitt_index;
};
diff --git a/arch/x86/kernel/uintr_core.c b/arch/x86/kernel/uintr_core.c
index dca8819ac3fd..607f9df83049 100644
--- a/arch/x86/kernel/uintr_core.c
+++ b/arch/x86/kernel/uintr_core.c
@@ -25,12 +25,12 @@
* Each UITT entry is 16 bytes in size.
* Current UITT table size is set as 4KB (256 * 16 bytes)
*/
-#define UINTR_MAX_UITT_NR 256
-#define UINTR_MAX_UVEC_NR 64
+#define UINTR_MAX_UITT_NR 256 // 最大支持256个UITT表项
+#define UINTR_MAX_UVEC_NR 64 // 最大支持64个中断向量
struct uintr_receiver {
struct uintr_upid_ctx *upid_ctx;
- u64 uvec_mask; /* track active vector per bit */
+ u64 uvec_mask; /* track active vector per bit */ // 每个bit表示一个中断向量
};
/* User Interrupt Target Table Entry (UITTE) */
@@ -45,46 +45,48 @@ struct uintr_uitt_ctx {
struct uintr_uitt_entry *uitt;
/* Protect UITT */
spinlock_t uitt_lock;
- refcount_t refs;
+ refcount_t refs; // 引用计数
};
struct uintr_sender {
struct uintr_uitt_ctx *uitt_ctx;
/* track active uitt entries per bit */
- u64 uitt_mask[BITS_TO_U64(UINTR_MAX_UITT_NR)];
+ u64 uitt_mask[BITS_TO_U64(UINTR_MAX_UITT_NR)];// 256bit 每一位对应到 64位数组上
};
/* TODO: To remove the global lock, move to a per-cpu wait list. */
-static DEFINE_SPINLOCK(uintr_wait_lock);
-static struct list_head uintr_wait_list = LIST_HEAD_INIT(uintr_wait_list);
+static DEFINE_SPINLOCK(uintr_wait_lock); // 静态内部自旋锁
+static struct list_head uintr_wait_list = LIST_HEAD_INIT(uintr_wait_list); // 初始化链表头
inline bool uintr_arch_enabled(void)
{
return static_cpu_has(X86_FEATURE_UINTR);
}
-static inline bool is_uintr_receiver(struct task_struct *t)
+static inline bool is_uintr_receiver(struct task_struct *t) // 是否有接收者
{
- return !!t->thread.ui_recv;
+ return !!t->thread.ui_recv; // 是否有接收者
}
/* Always make sure task is_uintr_receiver() before calling */
+// 是否有等待中的接收者
static inline bool is_uintr_waiting(struct task_struct *t)
{
return t->thread.ui_recv->upid_ctx->waiting;
}
+// 是否有发送者
static inline bool is_uintr_sender(struct task_struct *t)
{
return !!t->thread.ui_send;
}
-
+// 是否有发送者或者接收者
static inline bool is_uintr_task(struct task_struct *t)
{
return(is_uintr_receiver(t) || is_uintr_sender(t));
}
-static inline bool is_uitt_empty(struct task_struct *t)
+static inline bool is_uitt_empty(struct task_struct *t) // uitt_mask 是否都为 0
{
return !!bitmap_empty((unsigned long *)t->thread.ui_send->uitt_mask,
UINTR_MAX_UITT_NR);
@@ -99,24 +101,31 @@ static inline bool is_uitt_empty(struct task_struct *t)
* senders from connecting with this UPID, since the receiver task has already
* made this UPID inactive.
*/
+// r_info->upid_ctx->receiver_active 是否为 true
static bool uintr_is_receiver_active(struct uintr_receiver_info *r_info)
{
return r_info->upid_ctx->receiver_active;
}
+//
+// 将 CPU ID 转换为 NUMA 节点 ID
static inline u32 cpu_to_ndst(int cpu)
{
+ // 获取 CPU 对应的 APIC ID
u32 apicid = (u32)apic->cpu_present_to_apicid(cpu);
+ // 如果 APIC ID 无效,则输出警告信息
WARN_ON_ONCE(apicid == BAD_APICID);
+ // 如果未启用 x2APIC,则将 APIC ID 左移 8 位并掩码,返回 NUMA 节点 ID
if (!x2apic_enabled())
return (apicid << 8) & 0xFF00;
+ // 否则,返回 APIC ID 作为 NUMA 节点 ID
return apicid;
}
-static void free_upid(struct uintr_upid_ctx *upid_ctx)
+static void free_upid(struct uintr_upid_ctx *upid_ctx) // 释放 upid_ctx
{
put_task_struct(upid_ctx->task);
kfree(upid_ctx->upid);
@@ -125,107 +134,108 @@ static void free_upid(struct uintr_upid_ctx *upid_ctx)
}
/* TODO: UPID needs to be allocated by a KPTI compatible allocator */
+// 创建 upid_ctx and 对应的 upid
static struct uintr_upid_ctx *alloc_upid(void)
{
struct uintr_upid_ctx *upid_ctx;
struct uintr_upid *upid;
- upid_ctx = kzalloc(sizeof(*upid_ctx), GFP_KERNEL);
+ upid_ctx = kzalloc(sizeof(*upid_ctx), GFP_KERNEL); // 申请内存
if (!upid_ctx)
return NULL;
- upid = kzalloc(sizeof(*upid), GFP_KERNEL);
+ upid = kzalloc(sizeof(*upid), GFP_KERNEL); // 申请内存
if (!upid) {
kfree(upid_ctx);
return NULL;
}
- upid_ctx->upid = upid;
- refcount_set(&upid_ctx->refs, 1);
- upid_ctx->task = get_task_struct(current);
- upid_ctx->receiver_active = true;
- upid_ctx->waiting = false;
+ upid_ctx->upid = upid; // upid_ctx->upid 指向 upid
+ refcount_set(&upid_ctx->refs, 1); // 引用计数设置为 1
+ upid_ctx->task = get_task_struct(current); // upid_ctx->task 指向当前进程
+ upid_ctx->receiver_active = true; // upid_ctx->receiver_active 设置为 true
+ upid_ctx->waiting = false; // upid_ctx->waiting 设置为 false
return upid_ctx;
}
static void put_upid_ref(struct uintr_upid_ctx *upid_ctx)
{
- if (refcount_dec_and_test(&upid_ctx->refs))
- free_upid(upid_ctx);
+ if (refcount_dec_and_test(&upid_ctx->refs)) // upid_ctx->refs 计数 -1
+ free_upid(upid_ctx); // 计数为 0 释放
}
static struct uintr_upid_ctx *get_upid_ref(struct uintr_upid_ctx *upid_ctx)
{
- refcount_inc(&upid_ctx->refs);
+ refcount_inc(&upid_ctx->refs); // 引用计数加1
return upid_ctx;
}
-static void free_uitt(struct uintr_uitt_ctx *uitt_ctx)
+static void free_uitt(struct uintr_uitt_ctx *uitt_ctx) // 销毁 uitt_ctx
{
unsigned long flags;
- spin_lock_irqsave(&uitt_ctx->uitt_lock, flags);
- kfree(uitt_ctx->uitt);
- uitt_ctx->uitt = NULL;
- spin_unlock_irqrestore(&uitt_ctx->uitt_lock, flags);
+ spin_lock_irqsave(&uitt_ctx->uitt_lock, flags); // 加锁
+ kfree(uitt_ctx->uitt); // 释放 uitt_ctx->uitt
+ uitt_ctx->uitt = NULL; // 置空
+ spin_unlock_irqrestore(&uitt_ctx->uitt_lock, flags); // 解锁
- kfree(uitt_ctx);
+ kfree(uitt_ctx); // 释放 uitt_ctx
}
/* TODO: Replace UITT allocation with KPTI compatible memory allocator */
-static struct uintr_uitt_ctx *alloc_uitt(void)
+static struct uintr_uitt_ctx *alloc_uitt(void) // 初始化 uitt_ctx
{
struct uintr_uitt_ctx *uitt_ctx;
struct uintr_uitt_entry *uitt;
- uitt_ctx = kzalloc(sizeof(*uitt_ctx), GFP_KERNEL);
+ uitt_ctx = kzalloc(sizeof(*uitt_ctx), GFP_KERNEL); // 申请 uitt_ctx 内存
if (!uitt_ctx)
return NULL;
- uitt = kzalloc(sizeof(*uitt) * UINTR_MAX_UITT_NR, GFP_KERNEL);
+ uitt = kzalloc(sizeof(*uitt) * UINTR_MAX_UITT_NR, GFP_KERNEL); // 申请 uitt 内存
if (!uitt) {
kfree(uitt_ctx);
return NULL;
}
uitt_ctx->uitt = uitt;
- spin_lock_init(&uitt_ctx->uitt_lock);
- refcount_set(&uitt_ctx->refs, 1);
+ spin_lock_init(&uitt_ctx->uitt_lock); // 自旋锁
+ refcount_set(&uitt_ctx->refs, 1); // 计数为 1
return uitt_ctx;
}
static void put_uitt_ref(struct uintr_uitt_ctx *uitt_ctx)
{
- if (refcount_dec_and_test(&uitt_ctx->refs))
- free_uitt(uitt_ctx);
+ if (refcount_dec_and_test(&uitt_ctx->refs)) // refs -=1; 若 refs == 0
+ free_uitt(uitt_ctx); // 销毁 uitt_ctx
}
-static struct uintr_uitt_ctx *get_uitt_ref(struct uintr_uitt_ctx *uitt_ctx)
+static struct uintr_uitt_ctx *get_uitt_ref(struct uintr_uitt_ctx *uitt_ctx) //
{
- refcount_inc(&uitt_ctx->refs);
+ refcount_inc(&uitt_ctx->refs); // uitt_ctx->refs += 1
return uitt_ctx;
}
-static inline void mark_uitte_invalid(struct uintr_sender_info *s_info)
+static inline void mark_uitte_invalid(struct uintr_sender_info *s_info) // 失活 uitte
{
struct uintr_uitt_entry *uitte;
unsigned long flags;
- spin_lock_irqsave(&s_info->uitt_ctx->uitt_lock, flags);
+ spin_lock_irqsave(&s_info->uitt_ctx->uitt_lock, flags); // 获取锁
uitte = &s_info->uitt_ctx->uitt[s_info->uitt_index];
- uitte->valid = 0;
+ uitte->valid = 0; // 使能 = 0
spin_unlock_irqrestore(&s_info->uitt_ctx->uitt_lock, flags);
}
-static void __clear_vector_from_upid(u64 uvec, struct uintr_upid *upid)
+static void __clear_vector_from_upid(u64 uvec, struct uintr_upid *upid) // 从 upid 中清除 uvec
{
- clear_bit(uvec, (unsigned long *)&upid->puir);
+ clear_bit(uvec, (unsigned long *)&upid->puir); // 将 puir 中的 uvec 位清零
}
-static void __clear_vector_from_task(u64 uvec)
+static void __clear_vector_from_task(u64 uvec) // 从 task 中清除 uvec 在 uvec_mask 对应位置
{
struct task_struct *t = current;
@@ -291,19 +301,19 @@ static void teardown_uitt(void)
struct fpu *fpu = &t->thread.fpu;
u64 msr64;
- put_uitt_ref(t->thread.ui_send->uitt_ctx);
- kfree(t->thread.ui_send);
- t->thread.ui_send = NULL;
+ put_uitt_ref(t->thread.ui_send->uitt_ctx); // refs -= 1, 若 refs == 0, 销毁 uitt_ctx
+ kfree(t->thread.ui_send); // 释放 ui_send 内存
+ t->thread.ui_send = NULL; // 置空
- fpregs_lock();
+ fpregs_lock(); // 获取 浮点寄存器锁 .开始清空寄存器
if (fpregs_state_valid(fpu, smp_processor_id())) {
/* Modify only the relevant bits of the MISC MSR */
- rdmsrl(MSR_IA32_UINTR_MISC, msr64);
+ rdmsrl(MSR_IA32_UINTR_MISC, msr64); //
msr64 &= GENMASK_ULL(63, 32);
wrmsrl(MSR_IA32_UINTR_MISC, msr64);
wrmsrl(MSR_IA32_UINTR_TT, 0ULL);
- } else {
+ } else {// 若浮点寄存器无效
struct uintr_state *p;
p = get_xsave_addr(&fpu->state.xsave, XFEATURE_UINTR);
@@ -313,37 +323,38 @@ static void teardown_uitt(void)
}
}
- fpregs_unlock();
+ fpregs_unlock(); // 释放 浮点寄存器锁
}
-static int init_uitt(void)
+// 初始化 UITT
+static int init_uitt(void)
{
struct task_struct *t = current;
struct fpu *fpu = &t->thread.fpu;
struct uintr_sender *ui_send;
u64 msr64;
- ui_send = kzalloc(sizeof(*t->thread.ui_send), GFP_KERNEL);
+ ui_send = kzalloc(sizeof(*t->thread.ui_send), GFP_KERNEL); //申请内存
if (!ui_send)
return -ENOMEM;
- ui_send->uitt_ctx = alloc_uitt();
- if (!ui_send->uitt_ctx) {
+ ui_send->uitt_ctx = alloc_uitt(); // uitt 初始化
+ if (!ui_send->uitt_ctx) { // uitt 初始化失败
pr_debug("send: Alloc UITT failed for task=%d\n", t->pid);
kfree(ui_send);
return -ENOMEM;
}
- fpregs_lock();
+ fpregs_lock(); //
- if (fpregs_state_valid(fpu, smp_processor_id())) {
+ if (fpregs_state_valid(fpu, smp_processor_id())) { // 浮点寄存器有效
wrmsrl(MSR_IA32_UINTR_TT, (u64)ui_send->uitt_ctx->uitt | 1);
/* Modify only the relevant bits of the MISC MSR */
rdmsrl(MSR_IA32_UINTR_MISC, msr64);
msr64 &= GENMASK_ULL(63, 32);
msr64 |= UINTR_MAX_UITT_NR;
wrmsrl(MSR_IA32_UINTR_MISC, msr64);
- } else {
+ } else { // 浮点寄存器无效,写入 xsave
struct xregs_state *xsave;
struct uintr_state *p;
@@ -361,7 +372,7 @@ static int init_uitt(void)
pr_debug("send: Setup a new UITT=%px for task=%d with size %d\n",
ui_send->uitt_ctx->uitt, t->pid, UINTR_MAX_UITT_NR * 16);
- t->thread.ui_send = ui_send;
+ t->thread.ui_send = ui_send; // 设置当前 task 的 ui_send 当前进程为发送者
return 0;
}
@@ -379,26 +390,27 @@ static void __free_uitt_entry(unsigned int entry)
pr_debug("send: Freeing UITTE entry %d for task=%d\n", entry, t->pid);
- spin_lock_irqsave(&t->thread.ui_send->uitt_ctx->uitt_lock, flags);
+ spin_lock_irqsave(&t->thread.ui_send->uitt_ctx->uitt_lock, flags); // 加锁
memset(&t->thread.ui_send->uitt_ctx->uitt[entry], 0,
- sizeof(struct uintr_uitt_entry));
- spin_unlock_irqrestore(&t->thread.ui_send->uitt_ctx->uitt_lock, flags);
+ sizeof(struct uintr_uitt_entry));// 对应 entry 的 uitt 置 0
+ spin_unlock_irqrestore(&t->thread.ui_send->uitt_ctx->uitt_lock, flags); // 解锁
- clear_bit(entry, (unsigned long *)t->thread.ui_send->uitt_mask);
+ clear_bit(entry, (unsigned long *)t->thread.ui_send->uitt_mask); // uitt_mask 对应位清 0
- if (is_uitt_empty(t)) {
+ if (is_uitt_empty(t)) { // 没有 uitt 了,全部为空.
pr_debug("send: UITT mask is empty. Dereference and teardown UITT\n");
- teardown_uitt();
+ teardown_uitt(); // 留下眼泪 ( $ _ $ )
}
}
-static void sender_free_uitte(struct callback_head *head)
+static void sender_free_uitte(struct callback_head *head) // 释放
{
struct uintr_sender_info *s_info;
- s_info = container_of(head, struct uintr_sender_info, twork);
+ s_info = container_of(head, struct uintr_sender_info, twork); // 通过成员变量获取指向结构体的指针
+ // 获取到了 s_info
- __free_uitt_entry(s_info->uitt_index);
+ __free_uitt_entry(s_info->uitt_index); // 释放 uitt 对应资源
put_uitt_ref(s_info->uitt_ctx);
put_upid_ref(s_info->r_upid_ctx);
put_task_struct(s_info->task);
@@ -406,7 +418,7 @@ static void sender_free_uitte(struct callback_head *head)
}
void do_uintr_unregister_sender(struct uintr_receiver_info *r_info,
- struct uintr_sender_info *s_info)
+ struct uintr_sender_info *s_info) // 解绑 sender 和 receiver
{
int ret;
@@ -414,12 +426,12 @@ void do_uintr_unregister_sender(struct uintr_receiver_info *r_info,
* To make sure any new senduipi result in a #GP fault.
* The task work might take non-zero time to kick the process out.
*/
- mark_uitte_invalid(s_info);
+ mark_uitte_invalid(s_info); // 对应 uitte 使能 = 0
pr_debug("send: Adding Free UITTE %d task work for task=%d\n",
s_info->uitt_index, s_info->task->pid);
- init_task_work(&s_info->twork, sender_free_uitte);
+ init_task_work(&s_info->twork, sender_free_uitte); // 执行 sender_free_uitte
ret = task_work_add(s_info->task, &s_info->twork, true);
if (ret) {
/*
@@ -428,10 +440,10 @@ void do_uintr_unregister_sender(struct uintr_receiver_info *r_info,
*/
pr_debug("send: Free UITTE %d task=%d has already exited\n",
s_info->uitt_index, s_info->task->pid);
- put_upid_ref(s_info->r_upid_ctx);
- put_uitt_ref(s_info->uitt_ctx);
- put_task_struct(s_info->task);
- kfree(s_info);
+ put_upid_ref(s_info->r_upid_ctx); // 计数 -1 .若为 0 则释放 upid_ctx
+ put_uitt_ref(s_info->uitt_ctx); // 计数 -1 .若为 0 则释放 uitt_ctx
+ put_task_struct(s_info->task); // 进程控制块计数 -1
+ kfree(s_info); // 释放 s_info
return;
}
}
@@ -451,40 +463,40 @@ int do_uintr_register_sender(struct uintr_receiver_info *r_info,
* This check only prevents connections using uintr_fd after the
* receiver has already exited/unregistered.
*/
- if (!uintr_is_receiver_active(r_info))
+ if (!uintr_is_receiver_active(r_info)) // 检查接收者是否处于活跃状态
return -ESHUTDOWN;
- if (is_uintr_sender(t)) {
+ if (is_uintr_sender(t)) { // 检查当前进程是否已经注册过了
entry = find_first_zero_bit((unsigned long *)t->thread.ui_send->uitt_mask,
- UINTR_MAX_UITT_NR);
- if (entry >= UINTR_MAX_UITT_NR)
+ UINTR_MAX_UITT_NR); // 找到第一个为 0 的二进制数位置
+ if (entry >= UINTR_MAX_UITT_NR) // 如果前 256 位都满了,则返回 -ENOSPC
return -ENOSPC;
- } else {
+ } else { // 没有 uitt 注册过
BUILD_BUG_ON(UINTR_MAX_UITT_NR < 1);
- entry = 0;
- ret = init_uitt();
+ entry = 0; // 从第一位索引 0 开始
+ ret = init_uitt(); // 初始化 UITT ,当前进程为发送者
if (ret)
return ret;
}
- ui_send = t->thread.ui_send;
+ ui_send = t->thread.ui_send; // 取出当前进程的 ui_send
- set_bit(entry, (unsigned long *)ui_send->uitt_mask);
+ set_bit(entry, (unsigned long *)ui_send->uitt_mask); //对应位置置为 1
- spin_lock_irqsave(&ui_send->uitt_ctx->uitt_lock, flags);
- uitte = &ui_send->uitt_ctx->uitt[entry];
+ spin_lock_irqsave(&ui_send->uitt_ctx->uitt_lock, flags); //获取 uitt 锁
+ uitte = &ui_send->uitt_ctx->uitt[entry];// 直接用指针取的 list. thanks god 是这个意思啊.
pr_debug("send: sender=%d receiver=%d UITTE entry %d address %px\n",
current->pid, r_info->upid_ctx->task->pid, entry, uitte);
- uitte->user_vec = r_info->uvec;
- uitte->target_upid_addr = (u64)r_info->upid_ctx->upid;
- uitte->valid = 1;
- spin_unlock_irqrestore(&ui_send->uitt_ctx->uitt_lock, flags);
+ uitte->user_vec = r_info->uvec;
+ uitte->target_upid_addr = (u64)r_info->upid_ctx->upid; // 接收方 upid 的内存地址
+ uitte->valid = 1; // 使能有效
+ spin_unlock_irqrestore(&ui_send->uitt_ctx->uitt_lock, flags); // 释放 uitt 锁
- s_info->r_upid_ctx = get_upid_ref(r_info->upid_ctx);
- s_info->uitt_ctx = get_uitt_ref(ui_send->uitt_ctx);
- s_info->task = get_task_struct(current);
- s_info->uitt_index = entry;
+ s_info->r_upid_ctx = get_upid_ref(r_info->upid_ctx); // 返回 upid_ctx->refs +1. 返回 upid_ctx
+ s_info->uitt_ctx = get_uitt_ref(ui_send->uitt_ctx); // 返回 uitt_ctx->refs +1. 返回 uitt_ctx
+ s_info->task = get_task_struct(current); // 当前进程的 task_struct
+ s_info->uitt_index = entry; // uitt 的索引
return 0;
}
@@ -494,18 +506,18 @@ int uintr_receiver_wait(void)
struct uintr_upid_ctx *upid_ctx;
unsigned long flags;
- if (!is_uintr_receiver(current))
+ if (!is_uintr_receiver(current)) // 如果当前进程不是接收者,则返回 -EOPNOTSUPP
return -EOPNOTSUPP;
- upid_ctx = current->thread.ui_recv->upid_ctx;
+ upid_ctx = current->thread.ui_recv->upid_ctx; // 取出当前进程的 upid_ctx
upid_ctx->upid->nc.nv = UINTR_KERNEL_VECTOR;
- upid_ctx->waiting = true;
- spin_lock_irqsave(&uintr_wait_lock, flags);
- list_add(&upid_ctx->node, &uintr_wait_list);
- spin_unlock_irqrestore(&uintr_wait_lock, flags);
+ upid_ctx->waiting = true; // 标记为 等待中
+ spin_lock_irqsave(&uintr_wait_lock, flags); // 锁
+ list_add(&upid_ctx->node, &uintr_wait_list); // 加入到等待队列
+ spin_unlock_irqrestore(&uintr_wait_lock, flags); // 解锁
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
+ set_current_state(TASK_INTERRUPTIBLE); // 设置为可中断的状态
+ schedule(); // 让出调度
return -EINTR;
}
@@ -538,19 +550,20 @@ static void uintr_remove_task_wait(struct task_struct *task)
struct uintr_upid_ctx *upid_ctx, *tmp;
unsigned long flags;
- spin_lock_irqsave(&uintr_wait_lock, flags);
- list_for_each_entry_safe(upid_ctx, tmp, &uintr_wait_list, node) {
- if (upid_ctx->task == task) {
+ spin_lock_irqsave(&uintr_wait_lock, flags); // 获取全局等待锁
+ list_for_each_entry_safe(upid_ctx, tmp, &uintr_wait_list, node) { // 遍历 uintr_wait_list
+ if (upid_ctx->task == task) { // 如果找到了当前进程
pr_debug("wait: Removing task %d from wait\n",
upid_ctx->task->pid);
- upid_ctx->upid->nc.nv = UINTR_NOTIFICATION_VECTOR;
- upid_ctx->waiting = false;
- list_del(&upid_ctx->node);
+ upid_ctx->upid->nc.nv = UINTR_NOTIFICATION_VECTOR; // 设置 upid 的 nv 为 0xec
+ upid_ctx->waiting = false; // 等待为 false
+ list_del(&upid_ctx->node); // 从链表中删除
}
}
- spin_unlock_irqrestore(&uintr_wait_lock, flags);
+ spin_unlock_irqrestore(&uintr_wait_lock, flags); // 释放全局等待锁
}
+//
int do_uintr_unregister_handler(void)
{
struct task_struct *t = current;
@@ -558,7 +571,7 @@ int do_uintr_unregister_handler(void)
struct uintr_receiver *ui_recv;
u64 msr64;
- if (!is_uintr_receiver(t))
+ if (!is_uintr_receiver(t)) // 判断当前进程是否是接收者
return -EINVAL;
pr_debug("recv: Unregister handler and clear MSRs for task=%d\n",
@@ -572,10 +585,11 @@ int do_uintr_unregister_handler(void)
* disable preemption while modifying the MSRs, UPID and ui_recv thread
* struct.
*/
- fpregs_lock();
+ fpregs_lock(); // 获取 浮点寄存器锁
/* Clear only the receiver specific state. Sender related state is not modified */
- if (fpregs_state_valid(fpu, smp_processor_id())) {
+ // 仅清除接收者相关的状态 发送者相关的状态不会被修改
+ if (fpregs_state_valid(fpu, smp_processor_id())) { // 浮点寄存器状态有效
/* Modify only the relevant bits of the MISC MSR */
rdmsrl(MSR_IA32_UINTR_MISC, msr64);
msr64 &= ~GENMASK_ULL(39, 32);
@@ -584,7 +598,7 @@ int do_uintr_unregister_handler(void)
wrmsrl(MSR_IA32_UINTR_RR, 0ULL);
wrmsrl(MSR_IA32_UINTR_STACKADJUST, 0ULL);
wrmsrl(MSR_IA32_UINTR_HANDLER, 0ULL);
- } else {
+ } else { // 浮点寄存器状态无效
struct uintr_state *p;
p = get_xsave_addr(&fpu->state.xsave, XFEATURE_UINTR);
@@ -597,43 +611,48 @@ int do_uintr_unregister_handler(void)
}
}
- ui_recv = t->thread.ui_recv;
- ui_recv->upid_ctx->receiver_active = false;
+ ui_recv = t->thread.ui_recv; // 进程的 ui_recv
+ ui_recv->upid_ctx->receiver_active = false; // 接收者使能 false
/*
* Suppress notifications so that no further interrupts are generated
* based on this UPID.
*/
- set_bit(UPID_SN, (unsigned long *)&ui_recv->upid_ctx->upid->nc.status);
- uintr_remove_task_wait(t);
- put_upid_ref(ui_recv->upid_ctx);
- kfree(ui_recv);
- t->thread.ui_recv = NULL;
+ // 抑制通知,以便不再基于此 UPID 生成进一步的中断
+ set_bit(UPID_SN, (unsigned long *)&ui_recv->upid_ctx->upid->nc.status); //关掉 UPID status 使能
+ uintr_remove_task_wait(t); // 从等待列表中移除当前进程
+ put_upid_ref(ui_recv->upid_ctx); // upid_ctx->refs - 1, 且应用技术为 0 时释放 upid_ctx
+ kfree(ui_recv); // 释放 ui_recv
+ t->thread.ui_recv = NULL; // 置空
- fpregs_unlock();
+ fpregs_unlock(); // 释放 浮点寄存器锁
return 0;
}
+// 注册中断处理函数
int do_uintr_register_handler(u64 handler)
{
- struct uintr_receiver *ui_recv;
- struct uintr_upid *upid;
- struct task_struct *t = current;
- struct fpu *fpu = &t->thread.fpu;
- u64 misc_msr;
- int cpu;
+ struct uintr_receiver *ui_recv; // 定义 uintr_receiver 结构体指针 ui_recv
+ struct uintr_upid *upid; // 定义 uintr_upid 结构体指针 upid
+ struct task_struct *t = current; // 获取当前进程的 task_struct 结构体指针 t
+ struct fpu *fpu = &t->thread.fpu; // 获取当前进程的浮点寄存器指针 fpu
+ u64 misc_msr; // 定义 misc_msr 变量,用于存储 MSR_IA32_UINTR_MISC 寄存器的值
+ int cpu; // 定义 cpu 变量,用于存储当前 CPU 的 ID
+ // 如果当前进程已经是 uintr 接收者,则返回 EBUSY 错误
if (is_uintr_receiver(t))
return -EBUSY;
+ // 分配 uintr_receiver 结构体指针 ui_recv 的内存空间
ui_recv = kzalloc(sizeof(*ui_recv), GFP_KERNEL);
if (!ui_recv)
return -ENOMEM;
+ // 分配 uintr_upid 结构体指针 upid_ctx 的内存空间
ui_recv->upid_ctx = alloc_upid();
- if (!ui_recv->upid_ctx) {
- kfree(ui_recv);
+ if (!ui_recv->upid_ctx) {
+ kfree(ui_recv); // 如果分配失败,则释放 ui_recv 的内存空间
pr_debug("recv: alloc upid failed for task=%d\n", t->pid);
return -ENOMEM;
}
@@ -646,18 +665,23 @@ int do_uintr_register_handler(u64 handler)
* disable preemption while modifying the MSRs, UPID and ui_recv thread
* struct.
*/
- fpregs_lock();
+ fpregs_lock(); // 获取浮点寄存器锁
- cpu = smp_processor_id();
- upid = ui_recv->upid_ctx->upid;
- upid->nc.nv = UINTR_NOTIFICATION_VECTOR;
- upid->nc.ndst = cpu_to_ndst(cpu);
+ cpu = smp_processor_id(); // 获取当前 CPU 的 ID
+ upid = ui_recv->upid_ctx->upid; // 获取当前进程的 upid 结构体指针
+ upid->nc.nv = UINTR_NOTIFICATION_VECTOR; // 中断编号
+ upid->nc.ndst = cpu_to_ndst(cpu); // 接收者所在的 cpu 核心
- t->thread.ui_recv = ui_recv;
+ // 拓展了 thread_struct 结构体,加入了
+ // struct uintr_receiver *ui_recv;
+ // struct uintr_sender *ui_recv;
+ // 这样注册 ui_recv ui_recv 后就关联起来了
+ t->thread.ui_recv = ui_recv; // 当前进程就可以接收 uintr 了
- if (fpregs_state_valid(fpu, cpu)) {
- wrmsrl(MSR_IA32_UINTR_HANDLER, handler);
- wrmsrl(MSR_IA32_UINTR_PD, (u64)ui_recv->upid_ctx->upid);
+ if (fpregs_state_valid(fpu, cpu)) { //如果当前进程的浮点寄存器有效
+ //都是新增加的硬件寄存器,归硬件直接操作.
+ wrmsrl(MSR_IA32_UINTR_HANDLER, handler); // handler 放到 MSR_IA32_UINTR_HANDLER 寄存器
+ wrmsrl(MSR_IA32_UINTR_PD, (u64)ui_recv->upid_ctx->upid); // upid 放到 MSR_IA32_UINTR_PD 寄存器
/* Set value as size of ABI redzone */
wrmsrl(MSR_IA32_UINTR_STACKADJUST, 128);
@@ -666,7 +690,7 @@ int do_uintr_register_handler(u64 handler)
rdmsrl(MSR_IA32_UINTR_MISC, misc_msr);
misc_msr |= (u64)UINTR_NOTIFICATION_VECTOR << 32;
wrmsrl(MSR_IA32_UINTR_MISC, misc_msr);
- } else {
+ } else { // 当前进程的浮点寄存器无效
struct xregs_state *xsave;
struct uintr_state *p;
@@ -679,11 +703,11 @@ int do_uintr_register_handler(u64 handler)
p->upid_addr = (u64)ui_recv->upid_ctx->upid;
p->stack_adjust = 128;
p->uinv = UINTR_NOTIFICATION_VECTOR;
- printk("the xsave addr is %p\n handler: 0x%llx | upid_addr: 0x%llx | uif: %d\n", p, handler,p->upid_addr,p->uif_pad3); // 改
+ printk("the xsave addr is %p\n handler: 0x%llx | upid_addr: 0x%llx | uif: %d\n", p, handler,p->upid_addr,p->uif_pad3);
}
}
- fpregs_unlock();
+ fpregs_unlock(); // 释放浮点寄存器锁
pr_debug("recv: task=%d register handler=%llx upid %px\n",
t->pid, handler, upid);
@@ -708,10 +732,10 @@ void do_uintr_unregister_vector(struct uintr_receiver_info *r_info)
return;
}
}
-
+// 注册中断向量
int do_uintr_register_vector(struct uintr_receiver_info *r_info)
{
- struct uintr_receiver *ui_recv;
+ struct uintr_receiver *ui_recv; //
struct task_struct *t = current;
/*
@@ -721,6 +745,7 @@ int do_uintr_register_vector(struct uintr_receiver_info *r_info)
if (!is_uintr_receiver(t))
return -EINVAL;
+ // 检查向量号是否超出范围
if (r_info->uvec >= UINTR_MAX_UVEC_NR)
return -ENOSPC;
@@ -790,9 +815,9 @@ void switch_uintr_return(void)
* It is necessary to clear the SN bit after we set UINV and
* NDST to avoid incorrect interrupt routing.
*/
- upid = current->thread.ui_recv->upid_ctx->upid;
- upid->nc.ndst = cpu_to_ndst(smp_processor_id());
- clear_bit(UPID_SN, (unsigned long *)&upid->nc.status);
+ upid = current->thread.ui_recv->upid_ctx->upid; // 获取当前进程的 upid
+ upid->nc.ndst = cpu_to_ndst(smp_processor_id()); // 更新 ndst 为当前cpu
+ clear_bit(UPID_SN, (unsigned long *)&upid->nc.status); // 清除SN位 启用 uintr 中断
/*
* Interrupts might have accumulated in the UPID while the
@@ -809,7 +834,7 @@ void switch_uintr_return(void)
* updated after the read.
*/
- if (READ_ONCE(upid->puir)){
+ if (READ_ONCE(upid->puir)){ // 有未处理的用户中断请求
printk("sending self ipi\n");
apic->send_IPI_self(UINTR_NOTIFICATION_VECTOR);
}
diff --git a/arch/x86/kernel/uintr_fd.c b/arch/x86/kernel/uintr_fd.c
index 891c4568d60b..f2e59d445733 100644
--- a/arch/x86/kernel/uintr_fd.c
+++ b/arch/x86/kernel/uintr_fd.c
@@ -15,13 +15,13 @@
static bool Debug = false;
struct uintrfd_ctx {
- struct uintr_receiver_info *r_info;
+ struct uintr_receiver_info *r_info; // 接收方信息
/* Protect sender_list */
- spinlock_t sender_lock;
- struct list_head sender_list;
+ spinlock_t sender_lock; // 自旋锁, 保护 sender_list
+ struct list_head sender_list; // uintr_sender_info 列表
};
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_PROC_FS // 启用 proc 文件系统
static void uintrfd_show_fdinfo(struct seq_file *m, struct file *file)
{
struct uintrfd_ctx *uintrfd_ctx = file->private_data;
@@ -31,7 +31,7 @@ static void uintrfd_show_fdinfo(struct seq_file *m, struct file *file)
}
#endif
-static int uintrfd_release(struct inode *inode, struct file *file)
+static int uintrfd_release(struct inode *inode, struct file *file) // 释放 uintrfd
{
struct uintrfd_ctx *uintrfd_ctx = file->private_data;
struct uintr_sender_info *s_info, *tmp;
@@ -43,14 +43,14 @@ static int uintrfd_release(struct inode *inode, struct file *file)
uintrfd_ctx->r_info->upid_ctx->task->pid,
uintrfd_ctx->r_info->uvec);
- spin_lock_irqsave(&uintrfd_ctx->sender_lock, flags);
- list_for_each_entry_safe(s_info, tmp, &uintrfd_ctx->sender_list, node) {
- list_del(&s_info->node);
- do_uintr_unregister_sender(uintrfd_ctx->r_info, s_info);
+ spin_lock_irqsave(&uintrfd_ctx->sender_lock, flags); // 加锁
+ list_for_each_entry_safe(s_info, tmp, &uintrfd_ctx->sender_list, node) {// 遍历 sender_list
+ list_del(&s_info->node); // 从 sender_list 中删除 s_info
+ do_uintr_unregister_sender(uintrfd_ctx->r_info, s_info); // 注销 s_info
}
- spin_unlock_irqrestore(&uintrfd_ctx->sender_lock, flags);
+ spin_unlock_irqrestore(&uintrfd_ctx->sender_lock, flags); // 解锁
- do_uintr_unregister_vector(uintrfd_ctx->r_info);
+ do_uintr_unregister_vector(uintrfd_ctx->r_info); // 注销 uintrfd_ctx->r_info
kfree(uintrfd_ctx);
return 0;
@@ -67,46 +67,49 @@ static const struct file_operations uintrfd_fops = {
/*
* sys_uintr_create_fd - Create a uintr_fd for the registered interrupt vector.
*/
-SYSCALL_DEFINE2(uintr_create_fd, u64, vector, unsigned int, flags)
+SYSCALL_DEFINE2(uintr_create_fd, u64, vector, unsigned int, flags) // 创建一个 uintr_fd 文件描述符
{
if (Debug)printk("uintr_create_fd called\n");
- struct uintrfd_ctx *uintrfd_ctx;
+ struct uintrfd_ctx *uintrfd_ctx; //定义 uintrfd_ctx
int uintrfd;
int ret;
- if (!uintr_arch_enabled())
- return -EOPNOTSUPP;
+ if (!uintr_arch_enabled()) // 如果 uintr 架构未启用
+ return -EOPNOTSUPP; // 返回不支持该操作的错误码
- if (flags)
- return -EINVAL;
+ if (flags) // 如果 flags 不为 0
+ return -EINVAL; // 返回无效参数的错误码
- uintrfd_ctx = kzalloc(sizeof(*uintrfd_ctx), GFP_KERNEL);
+ uintrfd_ctx = kzalloc(sizeof(*uintrfd_ctx), GFP_KERNEL); // 为 uintrfd_ctx 分配内存空间
if (!uintrfd_ctx)
- return -ENOMEM;
+ return -ENOMEM; // 如果分配失败,返回内存不足的错误码
- uintrfd_ctx->r_info = kzalloc(sizeof(*uintrfd_ctx->r_info), GFP_KERNEL);
- if (!uintrfd_ctx->r_info) {
- ret = -ENOMEM;
- goto out_free_ctx;
+ uintrfd_ctx->r_info = kzalloc(sizeof(*uintrfd_ctx->r_info), GFP_KERNEL); // 为 uintr_receiver_info 分配内存空间
+ if (!uintrfd_ctx->r_info) { // 如果分配失败
+ ret = -ENOMEM; // 返回内存不足的错误码
+ goto out_free_ctx; // 跳转到 out_free_ctx
}
- uintrfd_ctx->r_info->uvec = vector;
- ret = do_uintr_register_vector(uintrfd_ctx->r_info);
- if (ret) {
- kfree(uintrfd_ctx->r_info);
- goto out_free_ctx;
+ uintrfd_ctx->r_info->uvec = vector; // 将 vector 赋值给 uintrfd_ctx->r_info->uvec
+ ret = do_uintr_register_vector(uintrfd_ctx->r_info); // 注册中断向量
+ if (ret) { // 如果注册失败
+ kfree(uintrfd_ctx->r_info); // 释放 uintrfd_ctx->r_info 的内存空间
+ goto out_free_ctx; // 跳转到 out_free_ctx
}
- INIT_LIST_HEAD(&uintrfd_ctx->sender_list);
- spin_lock_init(&uintrfd_ctx->sender_lock);
+ INIT_LIST_HEAD(&uintrfd_ctx->sender_list); // 初始化 sender_list,使其成为一个空链表
+ spin_lock_init(&uintrfd_ctx->sender_lock); // 初始化 sender_lock,使其成为一个未锁定的自旋锁
/* TODO: Get user input for flags - UFD_CLOEXEC */
/* Check: Do we need O_NONBLOCK? */
+ // 创建匿名 inode,关联 uintrfd_ctx ,返回 uintrfd
+ // uintrfd_fops 封装了文件操作函数
+ // O_RDONLY | O_CLOEXEC | O_NONBLOCK 代表了 只读、关闭时执行、非阻塞
uintrfd = anon_inode_getfd("[uintrfd]", &uintrfd_fops, uintrfd_ctx,
O_RDONLY | O_CLOEXEC | O_NONBLOCK);
- if (uintrfd < 0) {
- ret = uintrfd;
+ if (uintrfd < 0) { // 如果创建失败
+ ret = uintrfd; // 创建失败的错误码
goto out_free_uvec;
}
if(Debug)printk("recv: Alloc vector success uintrfd %d uvec %llu for task=%d\n",
@@ -115,12 +118,12 @@ SYSCALL_DEFINE2(uintr_create_fd, u64, vector, unsigned int, flags)
pr_debug("recv: Alloc vector success uintrfd %d uvec %llu for task=%d\n",
uintrfd, uintrfd_ctx->r_info->uvec, current->pid);
- return uintrfd;
+ return uintrfd; // 返回 uintrfd
out_free_uvec:
do_uintr_unregister_vector(uintrfd_ctx->r_info);
out_free_ctx:
- kfree(uintrfd_ctx);
+ kfree(uintrfd_ctx); // 释放 uintrfd_ctx 的内存空间
if(Debug)printk("recv: Alloc vector failed for task=%d ret %d\n",
current->pid, ret);
pr_debug("recv: Alloc vector failed for task=%d ret %d\n",
@@ -136,17 +139,17 @@ SYSCALL_DEFINE2(uintr_register_handler, u64 __user *, handler, unsigned int, fla
if (Debug) printk("uintr_register_handler called\n");
int ret;
- if (!uintr_arch_enabled())
+ if (!uintr_arch_enabled()) // 如果 uintr 未启用
return -EOPNOTSUPP;
- if (flags)
+ if (flags) // 如果 flags 不为 0
return -EINVAL;
/* TODO: Validate the handler address */
if (!handler)
return -EFAULT;
- ret = do_uintr_register_handler((u64)handler);
+ ret = do_uintr_register_handler((u64)handler); // 注册中断处理函数
if(Debug) printk("recv: register handler task=%d flags %d handler %lx ret %d\n",current->pid, flags, (unsigned long)handler, ret);
pr_debug("recv: register handler task=%d flags %d handler %lx ret %d\n",
current->pid, flags, (unsigned long)handler, ret);
@@ -168,7 +171,7 @@ SYSCALL_DEFINE1(uintr_unregister_handler, unsigned int, flags)
if (flags)
return -EINVAL;
- ret = do_uintr_unregister_handler();
+ ret = do_uintr_unregister_handler(); // 主要处理在这了
if(Debug) printk("recv: unregister handler task=%d flags %d ret %d\n",
current->pid, flags, ret);
pr_debug("recv: unregister handler task=%d flags %d ret %d\n",
@@ -183,60 +186,60 @@ SYSCALL_DEFINE1(uintr_unregister_handler, unsigned int, flags)
SYSCALL_DEFINE2(uintr_register_sender, int, uintrfd, unsigned int, flags)
{
if (Debug) printk("uintr_register_sender called\n");
- struct uintr_sender_info *s_info;
- struct uintrfd_ctx *uintrfd_ctx;
- unsigned long lock_flags;
- struct file *uintr_f;
- struct fd f;
- int ret = 0;
-
- if (!uintr_arch_enabled())
+ struct uintr_sender_info *s_info; // 定义一个指向 uintr_sender_info 结构体的指针 s_info
+ struct uintrfd_ctx *uintrfd_ctx; // 定义一个指向 uintrfd_ctx 结构体的指针 uintrfd_ctx
+ unsigned long lock_flags; // 定义一个无符号长整型变量 lock_flags
+ struct file *uintr_f; // 定义一个指向 file 结构体的指针 uintr_f
+ struct fd f; // 定义一个 fd 结构体变量 f
+ int ret = 0; // 定义一个整型变量 ret 并初始化为 0
+
+ if (!uintr_arch_enabled()) // 如果 uintr 未启用
return -EOPNOTSUPP;
- if (flags)
+ if (flags) // 如果 flags 不为 0
return -EINVAL;
- f = fdget(uintrfd);
- uintr_f = f.file;
- if (!uintr_f)
- return -EBADF;
+ f = fdget(uintrfd); // 获取文件描述符 uintrfd 对应的文件
+ uintr_f = f.file;
+ if (!uintr_f) // 如果文件不存在
+ return -EBADF;
- if (uintr_f->f_op != &uintrfd_fops) {
- ret = -EOPNOTSUPP;
- goto out_fdput;
+ if (uintr_f->f_op != &uintrfd_fops) { // 如果文件操作指针不是指向 uintrfd_fops 结构体
+ ret = -EOPNOTSUPP;
+ goto out_fdput; // 错误处理
}
- uintrfd_ctx = (struct uintrfd_ctx *)uintr_f->private_data;
+ uintrfd_ctx = (struct uintrfd_ctx *)uintr_f->private_data; // 获取在 uintr_create_fd 存入的 uintrfd_ctx
- spin_lock_irqsave(&uintrfd_ctx->sender_lock, lock_flags);
- list_for_each_entry(s_info, &uintrfd_ctx->sender_list, node) {
+ spin_lock_irqsave(&uintrfd_ctx->sender_lock, lock_flags); // 获取 sender_lock
+ list_for_each_entry(s_info, &uintrfd_ctx->sender_list, node) { // 检测当前线程是否已经注册过 sender
if (s_info->task == current) {
ret = -EISCONN;
break;
}
}
- spin_unlock_irqrestore(&uintrfd_ctx->sender_lock, lock_flags);
+ spin_unlock_irqrestore(&uintrfd_ctx->sender_lock, lock_flags); // 释放 sender_lock
- if (ret)
+ if (ret) // 如果已经注册过
goto out_fdput;
- s_info = kzalloc(sizeof(*s_info), GFP_KERNEL);
+ s_info = kzalloc(sizeof(*s_info), GFP_KERNEL); // 分配内存空间
if (!s_info) {
ret = -ENOMEM;
goto out_fdput;
}
- ret = do_uintr_register_sender(uintrfd_ctx->r_info, s_info);
- if (ret) {
+ ret = do_uintr_register_sender(uintrfd_ctx->r_info, s_info); // 关联起 r_info 和 s_info
+ if (ret) { // 失败
kfree(s_info);
goto out_fdput;
}
- spin_lock_irqsave(&uintrfd_ctx->sender_lock, lock_flags);
- list_add(&s_info->node, &uintrfd_ctx->sender_list);
- spin_unlock_irqrestore(&uintrfd_ctx->sender_lock, lock_flags);
+ spin_lock_irqsave(&uintrfd_ctx->sender_lock, lock_flags); // uintrfd_ctx sender_list 上锁
+ list_add(&s_info->node, &uintrfd_ctx->sender_list); // 将 s_info 添加到 uintrfd_ctx sender_list 中
+ spin_unlock_irqrestore(&uintrfd_ctx->sender_lock, lock_flags); // uintrfd_ctx sender_list 解锁
- ret = s_info->uitt_index;
+ ret = s_info->uitt_index; // 索引
out_fdput:
if(Debug)printk("send: register sender task=%d flags %d ret(uipi_id)=%d\n",
@@ -261,13 +264,13 @@ SYSCALL_DEFINE2(uintr_unregister_sender, int, uintrfd, unsigned int, flags)
struct fd f;
int ret;
- if (!uintr_arch_enabled())
+ if (!uintr_arch_enabled()) // 架构下没有启用 uintr
return -EOPNOTSUPP;
- if (flags)
+ if (flags) // flags 不为 0
return -EINVAL;
- f = fdget(uintrfd);
+ f = fdget(uintrfd); // fd 获取 uintrfd_ctx
uintr_f = f.file;
if (!uintr_f)
return -EBADF;
@@ -280,16 +283,16 @@ SYSCALL_DEFINE2(uintr_unregister_sender, int, uintrfd, unsigned int, flags)
uintrfd_ctx = (struct uintrfd_ctx *)uintr_f->private_data;
ret = -EINVAL;
- spin_lock_irqsave(&uintrfd_ctx->sender_lock, lock_flags);
- list_for_each_entry(s_info, &uintrfd_ctx->sender_list, node) {
- if (s_info->task == current) {
+ spin_lock_irqsave(&uintrfd_ctx->sender_lock, lock_flags); // uintrfd_ctx sender_list 上锁
+ list_for_each_entry(s_info, &uintrfd_ctx->sender_list, node) { // 遍历 uintrfd_ctx -> sender_list
+ if (s_info->task == current) { // 如果当前线程注册过 sender
ret = 0;
- list_del(&s_info->node);
- do_uintr_unregister_sender(uintrfd_ctx->r_info, s_info);
+ list_del(&s_info->node); // 将 s_info 从 uintrfd_ctx sender_list 中删除
+ do_uintr_unregister_sender(uintrfd_ctx->r_info, s_info); // 解绑 r_info 和 s_info,释放资源
break;
}
}
- spin_unlock_irqrestore(&uintrfd_ctx->sender_lock, lock_flags);
+ spin_unlock_irqrestore(&uintrfd_ctx->sender_lock, lock_flags); // 解锁
if(Debug)printk("send: unregister sender uintrfd %d for task=%d ret %d\n",
uintrfd, current->pid, ret);
pr_debug("send: unregister sender uintrfd %d for task=%d ret %d\n",
@@ -312,5 +315,5 @@ SYSCALL_DEFINE1(uintr_wait, unsigned int, flags)
return -EINVAL;
/* TODO: Add a timeout option */
- return uintr_receiver_wait();
+ return uintr_receiver_wait(); // 主要逻辑在这了
}