/*
该handle在arch/arm/mach-davinci/irq.c中注册,在dm644x平台中为do_edge_IRQ()或
do_level_IRQ(),这两个例程中会调用用户注册的中断例程。
*/
desc->handle(irq, desc, regs);
/*
* Now re-run any pending interrupts.
*/
// 检查是否有的等待处理到中断请求
if (!list_empty(&irq_pending))
do_pending_irqs(regs); // 处理延后的中断请求
spin_unlock(&irq_controller_lock);
// 退出中断上下文并检查是否有软中断发生
irq_exit();
/*
检查延迟中断并记录
*/
latency_check();
// 记录irq进入事件,其实在该平台什么也没做。
ltt_ev_irq_exit();
}
在上面的asm_do_IRQ例程中,调用的desc->handle(irq, desc, regs)在dm644x平台中有两种类型,
do_edge_IRQ()和do_level_IRQ()。下面的分析都是基于asm_do_IRQ例程中的代码。
3.进入desc->handle(),这里假设指向do_edge_IRQ()例程:
linux/arch/arm/kernel/irq.c:
/*
* Most edge-triggered IRQ implementations seem to take a broken
* approach to this. Hence the complexity.
*/
void
do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
const unsigned int cpu = smp_processor_id();
desc->triggered = 1;
/*
* If we're currently running this IRQ, or its disabled,
* we shouldn't process the IRQ. Instead, turn on the
* hardware masks.
*/
/*
desc->disable_depth为正数时,表明中断被禁止,操作该变量的例程是disable_irq()和
enable_irq(),前者增加disable_depth的值,后者减少disable_depth的值。
*/
if (unlikely(desc->running || desc->disable_depth))
goto running;
/*
* Acknowledge and clear the IRQ, but don't mask it.
*/
// 该ack()在arch/arm/mach-davinci/irq.c中注册,用来清除中断请求寄存器的相应位。
desc->chip->ack(irq);
/*
* Mark the IRQ currently in progress.
*/
desc->running = 1; // 中断正在被处理
kstat_cpu(cpu).irqs[irq]++; //统计irq线发生中断的次数
#ifdef CONFIG_PREEMPT_HARDIRQS
if (desc->action) desc->status |= IRQ_INPROGRESS;
if (redirect_hardirq(desc))
return;
#endif
do {
struct irqaction *action;
action = desc->action;
if (!action) // 检查用户是否注册了中断例程
break;
/*
如果允许打开中断,则调用unmask()例程打开中断(向中断使能寄存器写1),
unmask()例程在arch/arm/mach-davinci/irq.c中注册。
*/
if (desc->pending && !desc->disable_depth) {
desc->pending = 0;
desc->chip->unmask(irq);
}
// 执行用户注册的中断例程,其包含在action结构体的handler中
__do_irq(irq, action, regs);
} while (desc->pending && !desc->disable_depth);//如果又有中断发生,继续执行
desc->status &= ~IRQ_INPROGRESS;
desc->running = 0;
/*
* If we were disabled or freed, shut down the handler.
*/
if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
return;
running:
/*
* We got another IRQ while this one was masked or
* currently running. Delay it.
*/
desc->pending = 1;
desc->status |= IRQ_PENDING;
desc->chip->mask(irq);
desc->chip->ack(irq);
}
4.进入__do_irq例程,其运行用户注册的中断例程.
linux/arch/arm/kernel/irq.c:
static int
__do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
{
unsigned int status;
int ret, retval = 0;
spin_unlock(&irq_controller_lock);
// 如果硬中断没有嵌套且该中断不是快速中断的话则打开中断,以允许中断嵌套
if (!hardirq_count() || !(action->flags & SA_INTERRUPT))
local_irq_enable();
interrupt_overhead_stop();
status = 0;
// 执行用户注册到中断例程,一个中断线可以被共享,也就是可以注册多个用户中断例程
do {
ret = action->handler(irq, action->dev_id, regs);
if (ret == IRQ_HANDLED)
status |= action->flags;
retval |= ret;
action = action->next;
} while (action);
if (status & SA_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
#ifdef CONFIG_NO_IDLE_HZ
if (timer_update.function && irq != timer_update.skip)
timer_update.function(irq, 0, regs);
#endif
spin_lock_irq(&irq_controller_lock);
return retval;
}
5.进入do_pending_irqs()例程,处理延后的中断请求。
linux/arch/arm/kernel/irq.c:
static void do_pending_irqs(struct pt_regs *regs)
{
struct list_head head, *l, *n;
do {
struct irqdesc *desc;
/*
* First, take the pending interrupts off the list.
* The act of calling the handlers may add some IRQs
* back onto the list.
*/
head = irq_pending; // 获取延后中断列表
INIT_LIST_HEAD(&irq_pending); // 清空延后中断列表
head.next->prev = &head;
head.prev->next = &head;
/*
* Now run each entry. We must delete it from our
* list before calling the handler.
*/
// 获取所有延后的中断描述符结构体指针,并调用中断处理例程desc->handle()
list_for_each_safe(l, n, &head) {
desc = list_entry(l, struct irqdesc, pend);
list_del_init(&desc->pend);
desc->handle(desc - irq_desc, desc, regs);
}
/*
* The list must be empty.
*/
BUG_ON(!list_empty(&head));
} while (!list_empty(&irq_pending));// 该例程在运行的时候可能又产生了新的延后中断
}
6.进入irq_exit()例程,该例程会检查是否有软中断发生。
linux/arch/arm/kernel/irq.c:
// kernel/softirq.c
void irq_exit(void)
{
// 减少硬中断计数器的值
sub_preempt_count(IRQ_EXIT_OFFSET);
// 判断是否在硬中断或软中断上下文中,是否有软中断发生
if (!in_interrupt() && local_softirq_pending())
invoke_softirq(); // 处理软中断
// 减少抢占计数器的值
preempt_enable_no_resched();
}
因为软中断是穿串行处理的,所以不允许发生在软中断上下文中。
7.进入软中断处理例程__do_softirq(),其其实就是invoke_softirq()的#define定义:
linux/kernel/softirq.c:
# define invoke_softirq() __do_softirq()
/*
* We restart softirq processing MAX_SOFTIRQ_RESTART times,
* and we fall back to softirqd after that.
*
* This number has been established via experimentation.
* The two things to balance is latency against fairness -
* we want to handle softirqs as soon as possible, but they
* should not be able to lock up the box.
*/
#define MAX_SOFTIRQ_RESTART 10
asmlinkage void ___do_softirq(void)
{
struct softirq_action *h;
__u32 pending;
int max_restart = MAX_SOFTIRQ_RESTART;
int cpu;
// 把本地cpu软中断的位掩码复制到局部变量pending中
pending = local_softirq_pending();
cpu = smp_processor_id();
restart:
/* Reset the pending bitmask before enabling irqs */
local_softirq_pending() = 0;
local_irq_enable(); // 开中断,允许硬中断
h = softirq_vec; // 获取软中断结构体数组
do {
if (pending & 1) { // 数组下标越小,软中断优先级越高
{
u32 preempt_count = preempt_count();
// 如果打开了追踪内核事件的开关,记录软中断事件
ltt_ev_soft_irq(LTT_EV_SOFT_IRQ_SOFT_IRQ, (h - softirq_vec));
// 执行注册的软中断
h->action(h);
if (preempt_count != preempt_count()) {
print_symbol("softirq preempt bug: exited %s with wrong
preemption count!\n", (unsigned long) h->action);
printk("entered with %08x, exited with %08x.\n",
preempt_count, preempt_count());
preempt_count() = preempt_count;
}
}
rcu_bh_qsctr_inc(cpu);
cond_resched_all(); // 检查是否需要调度程序
}
h++;
pending >>= 1;
} while (pending);
local_irq_disable();
pending = local_softirq_pending();
/*
如果pending不为0,且循环没有达到指定到10次,则继续执行软中断
*/
if (pending && --max_restart)
goto restart;
if (pending) // 如果循环超过了10次,则唤醒内核线程wakeup_softirqd()延后执行
wakeup_softirqd();
}
从asm_do_IRQ()中返回后再用get_irqnr_and_base宏检查是否又有新的中断发生(这就是前面讲到串行
处理),否則在get_thread_info tsk宏中通过sp得到task_struct的地址,并跳到
ret_to_user:
get_thread_info tsk
mov why, #0
b ret_to_user
从以上能看出,linux系统的中断处理过程比较完善,对于一个操作系统来说是比较适合的。但是就是因为
完善,所以整个中断过程比较长,耗费的时间比裸跑的中断多很多,正所谓有利有弊,各取所需。