900字范文,内容丰富有趣,生活中的好帮手!
900字范文 > linux内核定时器死机 浅析linux内核中timer定时器的生成和sofirq软中断调用流程

linux内核定时器死机 浅析linux内核中timer定时器的生成和sofirq软中断调用流程

时间:2022-06-23 12:40:26

相关推荐

linux内核定时器死机 浅析linux内核中timer定时器的生成和sofirq软中断调用流程

浅析linux内核中timer定时器的生成和sofirq软中断调用流程

mod_timer添加的定时器timer在内核的软中断中发生调用,__run_timers会spin_lock_irq(&base->lock);禁止cpu中断,所以我们的timer回调处理函数handler工作在irq关闭的环境中,所以需要作很多考虑,比如在handler中尽量不要执行会引起pending的函数调用,比如kmalloc之类可能引起pending的操作,否则会使kernel永远停在我们的handler中不能返回,这样kernel将因为我们ko设计上的失败而死机[luther.gliethttp]!

我们可以使用如下几行语句,向我们的ko驱动添加一个timer定时器,来处理时间事件:

struct __wlanwlan_check_tx_flow_timer

{

struct timer_list timer;

int timer_freq;

} wlan_check_tx_flow_timer = {

.timer_freq = 8*1000,

};

static void wlan_check_tx_flow_timer_handler(unsigned long data)

{

...

//重新启动timer定时器

mod_timer(&wlan_check_tx_flow_timer.timer, jiffies + msecs_to_jiffies(wlan_check_tx_flow_timer.timer_freq));

...

}

//设置定时器

setup_timer(&wlan_check_tx_flow_timer.timer, wlan_check_tx_flow_timer_handler, (unsigned long)&wlan_check_tx_flow_timer);

//添加定时器

mod_timer(&wlan_check_tx_flow_timer.timer, jiffies + msecs_to_jiffies(wlan_check_tx_flow_timer.timer_freq));

那么这个wlan_check_tx_flow_timer_handler处理函数在什么时候被调用的呢?那么我们追入内核中,看看kernel对定时器的具体管理.

首先kernel在启动的最前面注册TIMER_SOFTIRQ的处理函数[luther.gliethttp],

start_kernel

=>init_timers

=>open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);

那么由谁来调用raise_softirq(TIMER_SOFTIRQ);触发TIMER_SOFTIRQ软中断呢,这就和平台相关了,对于pxa935处理器来说[luther.gliethttp],

MACHINE_START(LUTHER, "luther")

.phys_io = 0x40000000,

.boot_params = 0xa0000100,

.io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,

.map_io = pxa_map_io,

.init_irq = pxa3xx_init_irq,

.timer = &pxa_timer,

.init_machine = luther_init,

MACHINE_END

=>pxa_timer_init//平台对应的定时器初始化

==>pxa_timer_irq.dev_id = &ckevt_32ktimer;

==>setup_irq(IRQ_OST_4_11, &pxa_timer_irq); //32768的rtc

==>clockevents_register_device(&ckevt_32ktimer);

pxa_timer_interrupt中断处理函数

=>c->event_handler(c);也就是tick_handle_periodic系统时钟函数

=>tick_handle_periodic

=>update_process_times

=>run_local_timers

=>raise_softirq(TIMER_SOFTIRQ);

这里仅仅是触发了TIMER_SOFTIRQ软中断,那么在什么地方处理我们mod_timer添加的timer定时器处理函数wlan_check_tx_flow_timer_handler呢[luther.gliethttp]?

__irq_svc://内核中发生的中断__irq_usr://用户空间时发生的中断=>asm_do_IRQ

=>irq_exit

=>do_softirq

=>__do_softirq

=>调用上面注册的run_timer_softirq软中断处理函数

=>run_timer_softirq

=>__run_timers

static inline void __run_timers(struct tvec_base *base)

{

struct timer_list *timer;

spin_lock_irq(&base->lock);//禁止中断 while (time_after_eq(jiffies, base->timer_jiffies)) {

...

if (时间到了) {

...

fn = timer->function;

data = timer->data;

fn(data);//这就是我们上面添加的static void wlan_check_tx_flow_timer_handler(unsigned long data);定时器处理函数了.

...

}

...

}

set_running_timer(base, NULL);

spin_unlock_irq(&base->lock);//打开中断

}

//================

include/asm/hardirq.h

typedef struct {

unsigned int __softirq_pending;

unsigned int local_timer_irqs;

} ____cacheline_aligned irq_cpustat_t;

//================

kernel/softirq.c|45| irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;

#ifndef __ARCH_IRQ_STAT

irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;//在这里定义irq_stat存储空间

EXPORT_SYMBOL(irq_stat);

#endif

//================

include/linux/irq_cpustat.h

#ifndef __ARCH_IRQ_STAT

//引用的就是上面的irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;

extern irq_cpustat_t irq_stat[];/* defined in asm/hardirq.h */

#define __IRQ_STAT(cpu, member)(irq_stat[cpu].member)

#endif

//================

arch/arm/kernel/entry-armv.S|331| .wordirq_stat

#ifdef CONFIG_PREEMPT

svc_preempt:

teqr8, #0@ was preempt count = 0

ldreqr6, .LCirq_stat //操作

movnepc, lr@ no

ldrr0, [r6, #4]@ local_irq_count

ldrr1, [r6, #8]@ local_bh_count

addsr0, r0, r1

movnepc, lr

movr7, #0@ preempt_schedule_irq

strr7, [tsk, #TI_PREEMPT]@ expects preempt_count == 0

1:blpreempt_schedule_irq@ irq en/disable is done inside

ldrr0, [tsk, #TI_FLAGS]@ get new tasks TI_FLAGS

tstr0, #_TIF_NEED_RESCHED

beqpreempt_return@ go again

b1b

#endif

.LCirq_stat:

.wordirq_stat //引用irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;地址

#endif

/* arch independent irq_stat fields */

#define local_softirq_pending() \

__IRQ_STAT(smp_processor_id(), __softirq_pending)

#define __ARCH_IRQ_EXIT_IRQS_DISABLED1

#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED

# define invoke_softirq()__do_softirq() //是这个

#else

# define invoke_softirq()do_softirq()

#endif

#ifndef __ARCH_SET_SOFTIRQ_PENDING

#define set_softirq_pending(x) (local_softirq_pending() = (x))

#define or_softirq_pending(x) (local_softirq_pending() |= (x))

#endif

#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)

inline void raise_softirq_irqoff(unsigned int nr)

{

__raise_softirq_irqoff(nr);

if (!in_interrupt())

wakeup_softirqd();

}

void raise_softirq(unsigned int nr)

{

unsigned long flags;

local_irq_save(flags);

raise_softirq_irqoff(nr);

local_irq_restore(flags);

}

=>s3c2410_timer_interrupt

=>timer_tick

=>pxa_timer_init

==>pxa_timer_irq.dev_id = &ckevt_32ktimer;

==>setup_irq(IRQ_OST_4_11, &pxa_timer_irq); //32768的rtc

==>clockevents_register_device(&ckevt_32ktimer);

=>clockevents_register_device

=>clockevents_do_notify

=>raw_notifier_call_chain(&clockevents_chain, reason, dev);

=>__raw_notifier_call_chain

=>notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);

=>nb->notifier_call(nb, val, v);就是tick_notify

start_kernel

=>tick_init

static struct notifier_block tick_notifier = {

.notifier_call = tick_notify,

};

void __init tick_init(void)

{

clockevents_register_notifier(&tick_notifier);

}

clockevents_register_notifier

=>raw_notifier_chain_register(&clockevents_chain, nb);

=>notifier_chain_register将tick_notifier添加到clockevents_chain这个单向链表中[luther.gliethttp]

static int tick_notify(struct notifier_block *nb, unsigned long reason,

void *dev)

{

switch (reason) {

case CLOCK_EVT_NOTIFY_ADD:

return tick_check_new_device(dev);

...

return NOTIFY_OK;

}

=>tick_notify

=>tick_check_new_device

=>tick_setup_device(td, newdev, cpu, cpumask);

static void tick_setup_device(struct tick_device *td,

struct clock_event_device *newdev, int cpu,

cpumask_t cpumask)

{

ktime_t next_event;

void (*handler)(struct clock_event_device *) = NULL;

/*

* First device setup ?

*/

if (!td->evtdev) {

/*

* If no cpu took the do_timer update, assign it to

* this cpu:

*/

if (tick_do_timer_cpu == -1) {

tick_do_timer_cpu = cpu;

tick_next_period = ktime_get();

tick_period = ktime_set(0, NSEC_PER_SEC / HZ);

}

/*

* Startup in periodic mode first.

*/

td->mode = TICKDEV_MODE_PERIODIC;//设置第1个tick设备为TICKDEV_MODE_PERIODIC模式 } else {

handler = td->evtdev->event_handler;

next_event = td->evtdev->next_event;

}

td->evtdev = newdev;

...

if (td->mode == TICKDEV_MODE_PERIODIC)

tick_setup_periodic(newdev, 0);

else

tick_setup_oneshot(newdev, handler, next_event);

}

void tick_setup_periodic(struct clock_event_device *dev, int broadcast)

{

tick_set_periodic_handler(dev, broadcast);//设置event_handler处理函数为dev->event_handler = tick_handle_periodic;

/* Broadcast setup ? */

if (!tick_device_is_functional(dev))

return;

if (dev->features & CLOCK_EVT_FEAT_PERIODIC) {

clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);

} else {

unsigned long seq;

ktime_t next;

do {

seq = read_seqbegin(&xtime_lock);

next = tick_next_period;

} while (read_seqretry(&xtime_lock, seq));

clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);

for (;;) {

if (!clockevents_program_event(dev, next, ktime_get()))

return;

next = ktime_add(next, tick_period);

}

}

}

void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)

{

if (!broadcast)

dev->event_handler = tick_handle_periodic;

else

dev->event_handler = tick_handle_periodic_broadcast;

}

=>pxa_timer_interrupt

{

...

if (OSSR & OST_C4) {

OIER &= ~OST_C4;

OSSR = OST_C4;

if (timer32k_enabled)

c->event_handler(c);//调用tick_handle_periodic处理函数,作为 }

...

}

void tick_handle_periodic(struct clock_event_device *dev)

{

int cpu = smp_processor_id();

ktime_t next;

tick_periodic(cpu);//调用do_timer(1);将jiffies_64加1

if (dev->mode != CLOCK_EVT_MODE_ONESHOT)

return;

/*

* Setup the next period for devices, which do not have

* periodic mode:

*/

next = ktime_add(dev->next_event, tick_period);

for (;;) {

if (!clockevents_program_event(dev, next, ktime_get()))

return;

tick_periodic(cpu);

next = ktime_add(next, tick_period);

}

}

static void tick_periodic(int cpu)

{

if (tick_do_timer_cpu == cpu) {

write_seqlock(&xtime_lock);

/* Keep track of the next tick event */

tick_next_period = ktime_add(tick_next_period, tick_period);

do_timer(1);

write_sequnlock(&xtime_lock);

}

update_process_times(user_mode(get_irq_regs()));

profile_tick(CPU_PROFILING);

}

arch/arm/kernel/time.c|332| update_process_times(user_mode(get_irq_regs()));

=>update_process_times

=>run_local_timers

=>raise_softirq(TIMER_SOFTIRQ);//触发软中断,当irq_exit时调用__do_softirq来处理

=>run_timer_softirq

=>__run_timers

=>

fn = timer->function;//执行

data = timer->data;

fn(data);

//================

include/asm/arch-pxa/entry-macro.S|22| .macroget_irqnr_and_base, irqnr, irqstat, base, tmp

//pxa获取irq中断号函数

//================

arch/arm/kernel/entry-armv.S|37| bneasm_do_IRQ

.macroirq_handler

get_irqnr_preamble r5, lr

1:get_irqnr_and_base r0, r6, r5, lr //获取irq中断号,存储到r0寄存器中,作为参数传递给asm_do_IRQ movner1, sp

@

@ routine called with r0 = irq number, r1 = struct pt_regs *

@

adrnelr, 1b

bneasm_do_IRQ

...

//================ .align5

__irq_svc://内核中发生的中断 svc_entry

...

irq_handler

...

//================ .align5

__irq_usr://用户空间时发生的中断 usr_entry

...

irq_handler

...

//================ .macrovector_stub, name, mode, correction=0

.align5

vector_\name:

.if \correction

sublr, lr, #\correction

.endif

@

@ Save r0, lr_ (parent PC) and spsr_

@ (parent CPSR)

@

stmiasp, {r0, lr}@ save r0, lr

mrslr, spsr

strlr, [sp, #8]@ save spsr

@

@ Prepare for SVC32 mode. IRQs remain disabled.

@

mrsr0, cpsr

eorr0, r0, #(\mode ^ SVC_MODE)

msrspsr_cxsf, r0

@

@ the branch table must immediately follow this code

@

andlr, lr, #0x0f //lr存储了spsr,所以一共有16种cpu模式 movr0, sp //传参 ldrlr, [pc, lr, lsl #2]//取出相应模式下的处理函数指针,比如__irq_usr或者__irq_svc movspc, lr@ branch to handler in SVC mode

.endm

//================ .globl__stubs_start

__stubs_start:

/*

* Interrupt dispatcher

*/

vector_stubirq, IRQ_MODE, 4

.long__irq_usr@ 0 (USR_26 / USR_32)

.long__irq_invalid@ 1 (FIQ_26 / FIQ_32)

.long__irq_invalid@ 2 (IRQ_26 / IRQ_32)

.long__irq_svc@ 3 (SVC_26 / SVC_32)

.long__irq_invalid@ 4

.long__irq_invalid@ 5

.long__irq_invalid@ 6

.long__irq_invalid@ 7

.long__irq_invalid@ 8

.long__irq_invalid@ 9

.long__irq_invalid@ a

.long__irq_invalid@ b

.long__irq_invalid@ c

.long__irq_invalid@ d

.long__irq_invalid@ e

.long__irq_invalid@ f

//================ .globl__vectors_start

__vectors_start:

swiSYS_ERROR0

bvector_und + stubs_offset

ldrpc, .LCvswi + stubs_offset

bvector_pabt + stubs_offset

bvector_dabt + stubs_offset

bvector_addrexcptn + stubs_offset

bvector_irq + stubs_offset

bvector_fiq + stubs_offset

//================asm_do_IRQ(unsigned int irq, struct pt_regs *regs)

=>desc_handle_irq(irq, desc);//static inline void desc_handle_irq(unsigned int irq, struct irq_desc *desc)

{

desc->handle_irq(irq, desc);//调用中断号irq对应的handler回调处理函数[luther.gliethttp]}

__irq_svc://内核中发生的中断__irq_usr://用户空间时发生的中断=>asm_do_IRQ

=>irq_exit

=>do_softirq

=>__do_softirq

=>

{

...

h = softirq_vec;//执行软中断函数

do {

if (pending & 1) {

h->action(h);

//如果32768的时间到达,那asm_do_IRQ中将触发raise_softirq(TIMER_SOFTIRQ);//在这里将执行管理系统tick的run_timer_softirq软中断[luther.gliethttp] rcu_bh_qsctr_inc(cpu);

}

h++;

pending >>= 1;

} while (pending);

...

}

start_kernel

=>init_timers

=>open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);

void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)

{

softirq_vec[nr].data = data;

softirq_vec[nr].action = action;

}

static void run_timer_softirq(struct softirq_action *h)

{

struct tvec_base *base = __get_cpu_var(tvec_bases);//获得time时间根

hrtimer_run_pending();

if (time_after_eq(jiffies, base->timer_jiffies))

__run_timers(base);

}

//执行软中断=>run_timer_softirq

=>__run_timers

=>

fn = timer->function;

data = timer->data;

fn(data);

static inline void __run_timers(struct tvec_base *base)

{

...

spin_lock_irq(&base->lock);//禁止中断 ...

fn = timer->function;

data = timer->data;

fn(data);

...

set_running_timer(base, NULL);

spin_unlock_irq(&base->lock);//打开中断}

mod_timer

=>__mod_timer

int __mod_timer(struct timer_list *timer, unsigned long expires)

{

struct tvec_base *base, *new_base;

unsigned long flags;

int ret = 0;

timer_stats_timer_set_start_info(timer);

BUG_ON(!timer->function);

base = lock_timer_base(timer, &flags);

if (timer_pending(timer)) {

detach_timer(timer, 0);

ret = 1;

}

new_base = __get_cpu_var(tvec_bases);//获得time时间根

if (base != new_base) {

/*

* We are trying to schedule the timer on the local CPU.

* However we can't change timer's base while it is running,

* otherwise del_timer_sync() can't detect that the timer's

* handler yet has not finished. This also guarantees that

* the timer is serialized wrt itself.

*/

if (likely(base->running_timer != timer)) {

/* See the comment in lock_timer_base() */

timer_set_base(timer, NULL);

spin_unlock(&base->lock);

base = new_base;

spin_lock(&base->lock);

timer_set_base(timer, base);

}

}

timer->expires = expires;

internal_add_timer(base, timer);

//添加到链表上,这样当timer超时到达时,run_timer_softirq=>__run_timers软中断中将会回调该处理函数[luther.gliethttp]. spin_unlock_irqrestore(&base->lock, flags);

return ret;

}

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。