sync_cache过程
Aug4 13:51:49 suse11 kernel: [ 26.353680] Pid: 3429, comm: lsb_release Tainted: G N2.6.32.12-0.7-taco #16Aug4 13:51:49 suse11 kernel: [ 26.353685] Call Trace:
Aug4 13:51:49 suse11 kernel: [ 26.353691][<ffffffff8100612c>] dump_trace+0x6c/0x2d0
Aug4 13:51:49 suse11 kernel: [ 26.353697][<ffffffff813c4b18>] dump_stack+0x69/0x71
Aug4 13:51:49 suse11 kernel: [ 26.353704][<ffffffffa0093a35>] mptscsih_qcmd+0x25/0x830
Aug4 13:51:49 suse11 kernel: [ 26.353715][<ffffffffa001dc55>] scsi_dispatch_cmd+0x115/0x2a0
Aug4 13:51:49 suse11 kernel: [ 26.353726][<ffffffffa0024a91>] scsi_request_fn+0x3f1/0x550
Aug4 13:51:49 suse11 kernel: [ 26.353738][<ffffffff811e2ba4>] generic_unplug_device+0x24/0x40
Aug4 13:51:49 suse11 kernel: [ 26.353744][<ffffffff81148dd6>] sync_buffer+0x36/0x50
Aug4 13:51:49 suse11 kernel: [ 26.353749][<ffffffff813c6350>] __wait_on_bit+0x50/0x80
Aug4 13:51:49 suse11 kernel: [ 26.353754][<ffffffff813c63f9>] out_of_line_wait_on_bit+0x79/0xa0
----------------------------以下为文件系统层,现在暂不分析
Aug4 13:51:49 suse11 kernel: [ 26.353763][<ffffffffa00d03a2>] ext3_bread+0x52/0x80
Aug4 13:51:49 suse11 kernel: [ 26.353774][<ffffffffa00d2e33>] htree_dirblock_to_tree+0x33/0x1b0
Aug4 13:51:49 suse11 kernel: [ 26.353787][<ffffffffa00d5db0>] ext3_htree_fill_tree+0xa0/0x260
Aug4 13:51:49 suse11 kernel: [ 26.353799][<ffffffffa00cb7e6>] ext3_readdir+0x496/0x620
Aug4 13:51:49 suse11 kernel: [ 26.353806][<ffffffff811319b7>] vfs_readdir+0xc7/0xe0
Aug4 13:51:49 suse11 kernel: [ 26.353811][<ffffffff81131a54>] sys_getdents64+0x84/0xe0
Aug4 13:51:49 suse11 kernel: [ 26.353817][<ffffffff81002efb>] system_call_fastpath+0x16/0x1b
Aug4 13:51:49 suse11 kernel: [ 26.353822][<00007f61387b923a>] 0x7f61387b923a
__wait_on_bit最终执行下面的函数:
87 /*
88* Block until a buffer comes unlocked.This doesn't stop it
89* from becoming locked again - you have to lock it yourself
90* if you want to preserve its state.
91*/
92 void __wait_on_buffer(struct buffer_head * bh)
93 {
94 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
95 }
监视buffer_head结构中->b_state字段的BH_Lock标志位,如果此位clear即执行sync_buffer,
它的实现细节及原理,这里暂时不讨论。
------------------------------------------------------------------------------
58 static int sync_buffer(void *word)
59 {
60 struct block_device *bd;
61 struct buffer_head *bh
62 = container_of(word, struct buffer_head, b_state);
63/*直接用b_state字段来获取整个结构*/
64 smp_mb();
65 bd = bh->b_bdev; /*获取块设备struct block_device*/
66 if (bd)
67 blk_run_address_space(bd->bd_inode->i_mapping); /*这里到了cache结构 address_space*/
/*这个函数最终会执行到下面*/
68 io_schedule();
69 return 0;
70 }
-----------------------------------------------------------------------------
-----------------------------------------------------------------------------
828 static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
829 struct page *page)
830 {
831 if (bdi && bdi->unplug_io_fn)
832 bdi->unplug_io_fn(bdi, page);
/*这个函数又是在哪里初始化的呢?*/
/*blk_alloc_queue_node -> q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; */
/*可以看到是在创建queue的时候初始化的*/
833 }
我们看到 blk_backing_dev_unplug 函数最终会调用下面这个函数,即它最终会调用queue中的unplug.
在我们这里即是: generic_unplug_device /*这个函数在创建queue 时放到->unplug_fn字段中的*/
314 void blk_unplug(struct request_queue *q)
315 {
316 /*
317 * devices don't necessarily have an ->unplug_fn defined
318 */
319 if (q->unplug_fn) {
320 trace_block_unplug_io(q);
321 q->unplug_fn(q);
322 }
323 }
-----------------------------------------------------------------------------
255 /*
256* remove the plug and let it rip..
257*/
258 void __generic_unplug_device(struct request_queue *q)
259 {
260 if (unlikely(blk_queue_stopped(q))) 如果标记了QUEUE_FLAG_STOPPED 直接返
回
261 return;
262 if (!blk_remove_plug(q) && !blk_queue_nonrot(q))/*如果是非SSD盘且没有标
记为
QUEUE_FLAG_PLUGGED即返
回*/
263 return;
264
265 q->request_fn(q); /*即scsi_request_fn, 在哪里初始化?*/
/*在创建queue初始化:blk_init_queue_node*/
266 }
267
--------------------------------------------------------------------- 见到了熟悉的lsi的mptscsih:lol 呵呵,总觉得linux的IO流程太长,看似有优化的空间~~ 回复 5# humjb_1983
linux的IO栈太深,导致io latency很大,现在已经有一些方案在优化,如NVMe
镇水铁牛 发表于 2015-01-30 22:18 static/image/common/back.gif
回复 5# humjb_1983
linux的IO栈太深,导致io latency很大,现在已经有一些方案在优化,如NVMe
呵呵,感谢,抽空了解下~
页:
[1]