环境:vmware7.1, Fedora 4并且内核升级到2.6.18
写了一个简单的块设备驱动程序:
#include <linux/module.h>
#include <linux/blkdev.h>
#define BLKDEV_DEVICEMAJOR COMPAQ_SMART2_MAJOR
#define BLKDEV_DISKNAME "blkdev"
#define BLKDEV_BYTES (16*1024*1024) //size:16M
enum {
RM_SIMPLE = 0,
RM_FULL = 1,
RM_NOQUEUE = 2,
};
static int request_mode = RM_SIMPLE; //default mode
module_param(request_mode,int,S_IRUGO);
static struct request_queue *blkdev_queue;
static struct gendisk *blkdev_disk;
unsigned char blkdev_data[BLKDEV_BYTES]; //send/recv buffer
unsigned char *disk_data = NULL;
static struct block_device_operations blkdev_fops= {
.owner = THIS_MODULE,
};
static void blkdev_do_request(struct request_queue *q)
{
static int count = 0;
struct request *req;
printk("Function blkdev_do_request count=%d\n",count++);
while ((req = elv_next_request(q)) != NULL) {
unsigned long start = req->sector << 9;
unsigned long len = req->current_nr_sectors << 9;
if(start + len > BLKDEV_BYTES) {
printk(KERN_ERR BLKDEV_DISKNAME
": bad request: block=%llu, count=%u\n",
(unsigned long long)req->sector,req->current_nr_sectors);
end_request(req, 0);
continue;
}
switch (rq_data_dir(req)) {
case READ:
memcpy(req->buffer,
blkdev_data + (req->sector << 9),
req->current_nr_sectors << 9);
end_request(req, 1);
break;
case WRITE:
memcpy(blkdev_data + (req->sector << 9),
req->buffer, req->current_nr_sectors << 9);
end_request(req, 1);
break;
default:
break;
}
}
}
static int blkdev_make_request(struct request_queue *queue,struct bio *bio)
{
int i;
static int count = 0;
struct bio_vec *bvec;
void *disk_mem;
void *bvec_mem;
printk("Function blkdev_make_request count=%d\n",count++);
if((bio->bi_sector << 9) + bio->bi_size > BLKDEV_BYTES) {
bio_endio(bio, 0, -EIO);
return 0;
}
//disk_mem = disk_data + (bio->bi_sector << 9);
bio_for_each_segment(bvec, bio, i) {
// bvec_mem = kmap(bvec->bv_page) + bvec->bv_offset;
switch(bio_data_dir(bio)) {
case READ:
case READA:
// memcpy(bvec_mem, disk_mem, bvec->bv_len);
break;
case WRITE:
// memcpy(disk_mem, bvec_mem, bvec->bv_len);
break;
default:
// kunmap(bvec->bv_page);
break;
}
// kunmap(bvec->bv_page);
// disk_mem += bvec->bv_len;
}
bio_endio(bio,bio->bi_size,0);
return 0;
}
static int __init blkdev_init(void)
{
int ret;
switch(request_mode) {
case RM_SIMPLE:
blkdev_queue = blk_init_queue(blkdev_do_request, NULL);
if (!blkdev_queue) {
ret = -ENOMEM; //12 : out of memory
goto err_init_queue;
}
break;
case RM_NOQUEUE:
blkdev_queue = blk_alloc_queue(GFP_KERNEL);
if(blkdev_queue==NULL) {
ret = -ENOMEM;
goto err_init_queue;
}
blk_queue_make_request(blkdev_queue,&blkdev_make_request);
break;
default:
printk(KERN_NOTICE"Bad request mode %d\n",request_mode);
}
blkdev_disk = alloc_disk(1);
if (!blkdev_disk) {
ret = -ENOMEM;
goto err_alloc_disk;
}
strcpy(blkdev_disk->disk_name, BLKDEV_DISKNAME);
blkdev_disk->major = BLKDEV_DEVICEMAJOR;
blkdev_disk->first_minor = 0;
blkdev_disk->fops = &blkdev_fops;
blkdev_disk->queue = blkdev_queue;
set_capacity(blkdev_disk, BLKDEV_BYTES>>9);
add_disk(blkdev_disk);
printk(KERN_ERR"the device derver has insmod!\n");
return 0;
err_alloc_disk:
blk_cleanup_queue(blkdev_queue);
err_init_queue:
return ret;
}
static void __exit blkdev_exit(void)
{
del_gendisk(blkdev_disk);
put_disk(blkdev_disk);
blk_cleanup_queue(blkdev_queue);
} module_init(blkdev_init);
module_exit(blkdev_exit);
MODULE_LICENSE("GPL");
我想知道的是在用dd命令进行读写块设备时,请求的次数问题,所以在处理函数里定义static变量进行统计,make通过
insmod blkdev.ko
dmesg 查看信息:
先从MBR备份一个512字节的2进制文件:
dd if=/dev/sda of=/boot/write bs=512 count=1
然后写入块设备:
dd if=/boot/write of=/dev/blkdev bs=512 count=1
这时的dmesg信息:
从而知道执行了一次dd,内核出现2个请求,这时进行读操作
dd if=/dev/blkdev of=/boot/read bs=512 count=1
dmesg信息:
只有一条请求,这里我就有点不懂了!!!!!
写的有点乱,还请能坚持看到这里的各位给点意见,不胜感激!!!!!
有什么别的这方面的问题大家也可以讨论一下。首先我的想法是:
通过
while((req=elv_next_request(q))!=NULL)(有请求队列)
{
}
或者
bio_for_each_segment(bvec,bio,i)(无请求队列)
{
}
的方式可以依次取得一个请求,然后进入处理,每次请求的大小默认应该是512吧,所以我上面进行写请求时不应该出现2次调用blkdev_do_request函数,还是说内核默认的请求大小不是512,另一方面这个请求的大小可以手动设置吗(通过函数还是直接给struct request_queue、struct bio成员变量赋值)?有没有哪位高人对这里比较了解的啊,我是新手,都不太懂,请教了!
|