免费注册 查看新帖 |

Chinaunix

  平台 论坛 博客 文库
最近访问板块 发新帖
楼主: sisi8408

Kernel Bug-Vulnerability-Comment library [复制链接]

论坛徽章:
0
发表于 2008-07-27 18:07 |显示全部楼层

  1. 6, sched
  2.    =====

  3. after requests queued, it is time to sched Q based upon the basic concept of `plug`

  4. void blk_plug_device(struct request_queue *q)
  5. {
  6.         WARN_ON(!irqs_disabled());

  7.         /*
  8.          * don't plug a stopped queue,
  9.          * it must be paired with blk_start_queue()
  10.          * which will restart the queueing
  11.          */
  12.         if (blk_queue_stopped(q))
  13.                 return;
  14.         /* 1,
  15.          * like qdisc in net device, also a controler,
  16.          * but 2 state machine used, stopped and pugged
  17.          */
  18.         if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
  19.                 /* 2,
  20.                  * further, register regular timer
  21.                  */
  22.                 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
  23.                 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
  24.         }
  25. }

  26. /*
  27. * though a nice concept, uglised by its name,
  28. * how about blk_unplug_device?
  29. */
  30. int blk_remove_plug(struct request_queue *q)
  31. {
  32.         WARN_ON(!irqs_disabled());

  33.         if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
  34.                 return 0;

  35.         del_timer(&q->unplug_timer);
  36.         return 1;
  37. }

  38. void __generic_unplug_device(struct request_queue *q)
  39. {
  40.         if (unlikely(blk_queue_stopped(q)))
  41.                 return;

  42.         if (!blk_remove_plug(q)) /* already !PLUGGED */
  43.                 return;

  44.         q->request_fn(q);
  45. }
  46. /*
  47. * if ligal, simply run request_fn registered by driver
  48. */
  49. void generic_unplug_device(struct request_queue *q)
  50. {
  51.         if (blk_queue_plugged(q)) {
  52.                 spin_lock_irq(q->queue_lock);
  53.                 __generic_unplug_device(q);
  54.                 spin_unlock_irq(q->queue_lock);
  55.         }
  56. }

  57. void blk_unplug_timeout(unsigned long data)
  58. {
  59.         struct request_queue *q = (struct request_queue *)data;

  60.         blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
  61.                                 q->rq.count[READ] + q->rq.count[WRITE]);

  62.         kblockd_schedule_work(&q->unplug_work);
  63. }
  64. /*
  65. * timer execed in interrupt context,
  66. * simply schedule work to thread context, nice design?
  67. */
  68. void blk_unplug_work(struct work_struct *work)
  69. {
  70.         struct request_queue *q =
  71.                 container_of(work, struct request_queue, unplug_work);

  72.         blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
  73.                                 q->rq.count[READ] + q->rq.count[WRITE]);
  74.         /* safe enough? */
  75.         q->unplug_fn(q);

  76. /* shouldbe
  77.         struct request_queue *q =
  78.                 container_of(work, struct request_queue, unplug_work);
  79.         blk_unplug(q);
  80. */
  81. }

  82. /*
  83. * too much work, like u, controler is not slave
  84. */
  85. void blk_stop_queue(struct request_queue *q)
  86. {
  87.         queue_flag_set(QUEUE_FLAG_STOPPED, q);
  88.         blk_remove_plug(q);
  89. }
  90. /*
  91. * after bread+salt+water, what a life style, r u puritan?
  92. */
  93. void blk_start_queue(struct request_queue *q)
  94. {
  95.         WARN_ON(!irqs_disabled());

  96.         queue_flag_clear(QUEUE_FLAG_STOPPED, q);

  97.         /*
  98.          * one level of recursion is ok and is much faster than kicking
  99.          * the unplug handling
  100.          */
  101.         if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
  102.                 //queue_flag_set(QUEUE_FLAG_REENTER, q);
  103.                 q->request_fn(q);
  104.                 queue_flag_clear(QUEUE_FLAG_REENTER, q);
  105.         } else {
  106.                 blk_plug_device(q);
  107.                 kblockd_schedule_work(&q->unplug_work);
  108.         }
  109. }

  110. /*
  111. * agianst asyn, something like
  112. *
  113. static void uptodate(rw_lock_t *lock)
  114. {
  115.         write_lock(lock);
  116.         write_unlock(lock);
  117. }
  118. *
  119. */
  120. void blk_sync_queue(struct request_queue *q)
  121. {
  122.         del_timer_sync(&q->unplug_timer);
  123.         kblockd_flush_work(&q->unplug_work);
  124. }

  125. /*
  126. * sched in direct way if possible
  127. */
  128. void blk_run_queue(struct request_queue *q)
  129. {
  130.         unsigned long flags;

  131.         spin_lock_irqsave(q->queue_lock, flags);
  132.         __blk_run_queue(q);
  133.         spin_unlock_irqrestore(q->queue_lock, flags);
  134. }
  135. void blk_start_queueing(struct request_queue *q)
  136. {
  137.         if (!blk_queue_plugged(q))
  138.                 q->request_fn(q);
  139.         else
  140.                 __generic_unplug_device(q);
  141. }
复制代码

论坛徽章:
0
发表于 2008-07-27 21:26 |显示全部楼层

  1. 5, enqueue method
  2.    ==============

  3. void blk_insert_request(struct request_queue *q, struct request *rq,
  4.                         int at_head, void *data)
  5. {
  6.         int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
  7.         unsigned long flags;

  8.         /*
  9.          * tell I/O scheduler that this isn't a regular read/write (ie it
  10.          * must not attempt merges on this) and that it acts as a soft
  11.          * barrier
  12.          */
  13.         rq->cmd_type = REQ_TYPE_SPECIAL;
  14.         rq->cmd_flags |= REQ_SOFTBARRIER;

  15.         rq->special = data;

  16.         spin_lock_irqsave(q->queue_lock, flags);

  17.         /*
  18.          * If command is tagged, release the tag
  19.          */
  20.         if (blk_rq_tagged(rq))
  21.                 blk_queue_end_tag(q, rq);

  22.         drive_stat_acct(rq, 1);
  23.        
  24.         __elv_add_request(q, rq, where, 0);
  25.         /* 1,
  26.          * ask elevator to add, then deliver to controler special request
  27.          */
  28.         blk_start_queueing(q);
  29.        
  30.         spin_unlock_irqrestore(q->queue_lock, flags);
  31. }
  32. static inline void add_request(struct request_queue *q, struct request *req)
  33. {
  34.         drive_stat_acct(req, 1);

  35.         /*
  36.          * elevator indicated where it wants this request to be
  37.          * inserted at elevator_merge time
  38.          */
  39.         __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
  40. }

  41. void blk_requeue_request(struct request_queue *q, struct request *rq)
  42. {
  43.         blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);

  44.         if (blk_rq_tagged(rq))
  45.                 blk_queue_end_tag(q, rq);

  46.         elv_requeue_request(q, rq);
  47. }
复制代码

论坛徽章:
0
发表于 2008-07-27 21:27 |显示全部楼层

  1. 7, return from driver
  2.    ==================

  3. exec bio_endio method defined by bio issuer,
  4. since request may collect bios from different issuers.

  5. static void req_bio_endio(struct request *rq, struct bio *bio,
  6.                           unsigned int nbytes, int error)
  7. {
  8.         struct request_queue *q = rq->q;

  9.         if (&q->bar_rq != rq) {
  10.                 if (error)
  11.                         clear_bit(BIO_UPTODATE, &bio->bi_flags);
  12.                 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
  13.                         error = -EIO;

  14.                 if (unlikely(nbytes > bio->bi_size)) {
  15.                         printk(KERN_ERR "%s: want %u bytes done, %u left\n",
  16.                                __func__, nbytes, bio->bi_size);
  17.                         nbytes = bio->bi_size;
  18.                 }

  19.                 bio->bi_size -= nbytes;
  20.                 bio->bi_sector += (nbytes >> 9);
  21.                
  22.                 if (bio->bi_size == 0)
  23.                         bio_endio(bio, error);
  24.         } else {
  25.                 /*
  26.                  * Okay, this is the barrier request in progress, just
  27.                  * record the error;
  28.                  */
  29.                 if (error && !q->orderr)
  30.                         q->orderr = error;
  31.         }
  32. }

  33. /*
  34. * after bios in request processed
  35. */
  36. void __blk_put_request(struct request_queue *q, struct request *req)
  37. {
  38.         if (unlikely(!q))
  39.                 return;
  40.         if (unlikely(--req->ref_count))
  41.                 return;

  42.         elv_completed_request(q, req);

  43.         /*
  44.          * Request may not have originated from ll_rw_blk. if not,
  45.          * it didn't come out of our reserved rq pools
  46.          */
  47.         if (req->cmd_flags & REQ_ALLOCED) {
  48.                 int rw = rq_data_dir(req);
  49.                 int priv = req->cmd_flags & REQ_ELVPRIV;

  50.                 BUG_ON(!list_empty(&req->queuelist));
  51.                 BUG_ON(!hlist_unhashed(&req->hash));

  52.                 blk_free_request(q, req);
  53.                 freed_request(q, rw, priv);
  54.         }
  55. }

  56. /*
  57. * bio processed one after another
  58. */
  59. static int __end_that_request_first(struct request *req, int error, int nr_bytes)
  60. {
  61.         int total_bytes, bio_nbytes, next_idx = 0;
  62.         struct bio *bio;

  63.         blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);

  64.         /*
  65.          * for a REQ_BLOCK_PC request, we want to carry any eventual
  66.          * sense key with us all the way through
  67.          */
  68.         if (!blk_pc_request(req))
  69.                 req->errors = 0;

  70.         if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
  71.                 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
  72.                                 req->rq_disk ? req->rq_disk->disk_name : "?",
  73.                                 (unsigned long long)req->sector);
  74.         }

  75.         if (blk_fs_request(req) && req->rq_disk) {
  76.                 struct hd_struct *part = get_part(req->rq_disk, req->sector);
  77.                 const int rw = rq_data_dir(req);

  78.                 all_stat_add(req->rq_disk, part, sectors[rw],
  79.                                 nr_bytes >> 9, req->sector);
  80.         }

  81.         total_bytes = bio_nbytes = 0;
  82.        
  83.         while ((bio = req->bio) != NULL) {
  84.                 int nbytes;

  85.                 /*
  86.                  * For an empty barrier request, the low level driver must
  87.                  * store a potential error location in ->sector. We pass
  88.                  * that back up in ->bi_sector.
  89.                  */
  90.                 if (blk_empty_barrier(req))
  91.                         bio->bi_sector = req->sector;

  92.                 if (nr_bytes >= bio->bi_size) {
  93.                         /* 1,
  94.                          * bio complete as a whole
  95.                          */
  96.                         req->bio = bio->bi_next;
  97.                        
  98.                         nbytes = bio->bi_size;
  99.                         /* 2,
  100.                          * call endio method
  101.                          */
  102.                         req_bio_endio(req, bio, nbytes, error);
  103.                        
  104.                         next_idx = 0;
  105.                         bio_nbytes = 0;
  106.                 } else {
  107.                         int idx = bio->bi_idx + next_idx;
  108.                         /* 3,
  109.                          * check vectors in bio
  110.                          */
  111.                         if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
  112.                                 blk_dump_rq_flags(req, "__end_that");
  113.                                 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
  114.                                        __func__, bio->bi_idx, bio->bi_vcnt);
  115.                                 break;
  116.                         }

  117.                         nbytes = bio_iovec_idx(bio, idx)->bv_len;
  118.                         BIO_BUG_ON(nbytes > bio->bi_size);

  119.                         /*
  120.                          * not a complete bvec done
  121.                          */
  122.                         if (unlikely(nbytes > nr_bytes)) {
  123.                                 bio_nbytes += nr_bytes;
  124.                                 total_bytes += nr_bytes;
  125.                                 break;
  126.                         }

  127.                         /*
  128.                          * advance to the next vector
  129.                          */
  130.                         next_idx++;
  131.                         bio_nbytes += nbytes;
  132.                 }

  133.                 total_bytes += nbytes;
  134.                 nr_bytes -= nbytes;

  135.                 bio = req->bio;
  136.                 if (bio) {
  137.                         /*
  138.                          * end more in this run, or just return 'not-done'
  139.                          */
  140.                         if (unlikely(nr_bytes <= 0))
  141.                                 break;
  142.                 }
  143.         }

  144.         /*
  145.          * completely done
  146.          */
  147.         if (!req->bio)
  148.                 return 0;

  149.         /*
  150.          * if the request wasn't completed, update state
  151.          */
  152.         if (bio_nbytes) {
  153.                 req_bio_endio(req, bio, bio_nbytes, error);
  154.                
  155.                 bio->bi_idx += next_idx;
  156.                 bio_iovec(bio)->bv_offset += nr_bytes;
  157.                 bio_iovec(bio)->bv_len -= nr_bytes;
  158.         }
  159.         /* 4,
  160.          * prepare request for next round
  161.          */
  162.         blk_recalc_rq_sectors(req, total_bytes >> 9);
  163.         blk_recalc_rq_segments(req);
  164.         return 1;
  165. }

  166. static void end_that_request_last(struct request *req, int error)
  167. {
  168.         struct gendisk *disk = req->rq_disk;

  169.         if (blk_rq_tagged(req))
  170.                 blk_queue_end_tag(req->q, req);

  171.         if (blk_queued_rq(req))
  172.                 blkdev_dequeue_request(req);

  173.         if (unlikely(laptop_mode) && blk_fs_request(req))
  174.                 laptop_io_completion();

  175.         /*
  176.          * Account IO completion.  bar_rq isn't accounted as a normal
  177.          * IO on queueing nor completion.  Accounting the containing
  178.          * request is enough.
  179.          */
  180.         if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
  181.                 unsigned long duration = jiffies - req->start_time;
  182.                 const int rw = rq_data_dir(req);
  183.                 struct hd_struct *part = get_part(disk, req->sector);

  184.                 __all_stat_inc(disk, part, ios[rw], req->sector);
  185.                 __all_stat_add(disk, part, ticks[rw], duration, req->sector);
  186.                 disk_round_stats(disk);

  187.                 disk->in_flight--;
  188.                
  189.                 if (part) {
  190.                         part_round_stats(part);
  191.                         part->in_flight--;
  192.                 }
  193.         }

  194.         if (req->end_io)
  195.                 req->end_io(req, error);
  196.         else {
  197.                 if (blk_bidi_rq(req))
  198.                         __blk_put_request(req->next_rq->q, req->next_rq);

  199.                 __blk_put_request(req->q, req);
  200.         }
  201. }

  202. static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
  203.                       unsigned int bidi_bytes,
  204.                       int (drv_callback)(struct request *))
  205. {
  206.         struct request_queue *q = rq->q;
  207.         unsigned long flags = 0UL;

  208.         if (blk_fs_request(rq) ||
  209.             blk_pc_request(rq)) {
  210.                 if (__end_that_request_first(rq, error, nr_bytes))
  211.                         return 1;

  212.                 /* Bidi request must be completed as a whole */
  213.                 if (blk_bidi_rq(rq) &&
  214.                     __end_that_request_first(rq->next_rq, error, bidi_bytes))
  215.                         return 1;
  216.         }

  217.         /* Special feature for tricky drivers */
  218.         if (drv_callback && drv_callback(rq))
  219.                 return 1;

  220.         add_disk_randomness(rq->rq_disk);

  221.         spin_lock_irqsave(q->queue_lock, flags);
  222.         end_that_request_last(rq, error);
  223.         spin_unlock_irqrestore(q->queue_lock, flags);

  224.         return 0;
  225. }

  226. int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
  227. {
  228.         return blk_end_io(rq, error, nr_bytes, 0, NULL);
  229. }

  230. int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
  231. {
  232.         if (blk_fs_request(rq) ||
  233.             blk_pc_request(rq)) {
  234.                 if (__end_that_request_first(rq, error, nr_bytes))
  235.                         return 1;
  236.         }

  237.         add_disk_randomness(rq->rq_disk);

  238.         end_that_request_last(rq, error);
  239.         return 0;
  240. }


  241. /*
  242. * feed completed request to softirq
  243. */
  244. void blk_complete_request(struct request *req)
  245. {
  246.         struct list_head *cpu_list;
  247.         unsigned long flags;

  248.         BUG_ON(!req->q->softirq_done_fn);

  249.         local_irq_save(flags);

  250.         cpu_list = &__get_cpu_var(blk_cpu_done);
  251.         list_add_tail(&req->donelist, cpu_list);
  252.        
  253.         raise_softirq_irqoff(BLOCK_SOFTIRQ);

  254.         local_irq_restore(flags);
  255. }
  256. /*
  257. * standard softirq method for completed request
  258. */
  259. static void blk_done_softirq(struct softirq_action *h)
  260. {
  261.         struct list_head *cpu_list, local_list;

  262.         local_irq_disable();
  263.         cpu_list = &__get_cpu_var(blk_cpu_done);
  264.         list_replace_init(cpu_list, &local_list);
  265.         local_irq_enable();

  266.         while (!list_empty(&local_list)) {
  267.                 struct request *rq;

  268.                 rq = list_entry(local_list.next, struct request, donelist);
  269.                
  270.                 list_del_init(&rq->donelist);
  271.                 rq->q->softirq_done_fn(rq);
  272.         }
  273. }
复制代码

论坛徽章:
0
发表于 2008-08-02 19:15 |显示全部楼层

  1. struct kfifo {
  2.         unsigned char *buffer;        /* the buffer holding the data */
  3.         unsigned int size;        /* the size of the allocated buffer */
  4.         unsigned int in;        /* data is added at offset (in % size) */
  5.         unsigned int out;        /* data is extracted from off. (out % size) */
  6.         spinlock_t *lock, __lock; /* protects concurrent modifications */
  7. };
  8. /* .26 */
  9. struct kfifo * kfifo_init(unsigned char *buffer, unsigned int size,
  10.                                 gfp_t gfp_mask, spinlock_t *lock)
  11. {
  12.         struct kfifo *fifo;

  13.         /* size must be a power of 2 */
  14.         BUG_ON(!is_power_of_2(size));

  15.         fifo = kmalloc(sizeof(struct kfifo), gfp_mask);
  16.         if (!fifo)
  17.                 return ERR_PTR(-ENOMEM);

  18.         fifo->buffer = buffer;
  19.         fifo->size = size;
  20.         fifo->in = fifo->out = 0;
  21.         if (lock)
  22.                 fifo->lock = lock;
  23.         else {
  24.                 spin_lock_init(&fifo->__lock);
  25.                 fifo->lock = &fifo->__lock;
  26.         }
  27.         return fifo;
  28. }
复制代码

论坛徽章:
0
发表于 2008-08-03 13:27 |显示全部楼层

  1. /*
  2. * A simple `mbuf` FIFO implementation based upon kfifo
  3. */

  4. #define mbuf_fifo_ENTRY_SIZE        2048
  5. #define mbuf_fifo_NR_ENTRY        256

  6. typedef struct {
  7.         unsigned char *buf;
  8.         __be16 offset, len;
  9. } mbuf_desc_t;

  10. typedef union {
  11.         mbuf_desc_t mbuf;
  12.         unsigned char size[mbuf_fifo_ENTRY_SIZE];
  13. } mbuf_entry_t;

  14. struct mbuf_fifo {
  15.         int reader, writer;
  16.         spinlock_t *lock, __lock;
  17.         mbuf_entry_t *entries;
  18. };

  19. static inline void __mbuf_fifo_reset(struct kfifo *fifo)
  20. {
  21.         fifo->writer = fifo->reader = 0;
  22. }

  23. static inline void mbuf_fifo_reset(struct kfifo *fifo)
  24. {
  25.         unsigned long flags;

  26.         spin_lock_irqsave(fifo->lock, flags);
  27.         __mbuf_fifo_reset(fifo);
  28.         spin_unlock_irqrestore(fifo->lock, flags);
  29. }

  30. static inline int __mbuf_fifo_len(struct mbuf_fifo *fifo)
  31. {
  32.         if (fifo->writer >= fifo->reader)
  33.                 return fifo->writer - fifo->reader;
  34.         else
  35.                 return mbuf_fifo_NR_ENTRY + fifo->writer - fifo->reader;
  36. }

  37. static inline int mbuf_fifo_len(struct mbuf_fifo *fifo)
  38. {
  39.         unsigned long flags;
  40.         int ret;

  41.         spin_lock_irqsave(fifo->lock, flags);
  42.         ret = __mbuf_fifo_len(fifo);
  43.         spin_unlock_irqrestore(fifo->lock, flags);
  44.         return ret;
  45. }

  46. static inline int __mbuf_fifo_full(struct mbuf_fifo *fifo)
  47. {
  48.         return __mbuf_fifo_len == mbuf_fifo_NR_ENTRY -1;
  49. }

  50. static inline int mbuf_fifo_full(struct mbuf_fifo *fifo)
  51. {
  52.         unsigned long flags;
  53.         int ret;

  54.         spin_lock_irqsave(fifo->lock, flags);
  55.         ret = __mbuf_fifo_full(fifo);
  56.         spin_unlock_irqrestore(fifo->lock, flags);
  57.         return ret;
  58. }

  59. static int __mbuf_fifo_read(struct sk_buff *skb, struct mbuf_fifo *fifo)
  60. {
  61.         mbuf_desc_t *desc;

  62.         if (!__mbuf_fifo_len(fifo))
  63.                 return 0;

  64.         desc = (mbuf_desc_t *)(fifo->entries + fifo->reader);
  65.         if (ntohs(desc->len) > skb_tailroom(skb))
  66.                 return 0;
  67.         memcpy(skb_put(skb, ntohs(desc->len)),
  68.                 desc +1, ntohs(desc->len));
  69.         ++fifo->reader;
  70.         fifo->reader %= (mbuf_fifo_NR_ENTRY -1);
  71.         return 1;
  72. }

  73. static int mbuf_fifo_read(struct sk_buff *skb, struct mbuf_fifo *fifo)
  74. {
  75.         unsigned long flags;
  76.         int rc = 0;

  77.         spin_lock_irqsave(fifo->lock, flags);
  78.         rc = __mbuf_fifo_read(skb, fifo);
  79.         spin_unlock_irqrestore(fifo->lock, flags);
  80.         return rc;
  81. }

  82. #define NETDEV_TX_KILL -2

  83. static int __mbuf_fifo_write(struct sk_buff *skb, struct mbuf_fifo *fifo)
  84. {
  85.         mbuf_desc_t *desc;
  86.        
  87.         if (__mbuf_fifo_full(fifo))
  88.                 return NETDEV_TX_BUSY;

  89.         desc = (mbuf_desc_t *)(fifo->entries + fifo->writer);
  90.         desc->offset = htons(sizeof(mbuf_desc_t));
  91.         desc->len = htons(skb->len);
  92.         /* skb_copy_bits is nicer */
  93.         memcpy(desc +1, skb->data, skb->len);
  94.         ++fifo->writer;
  95.         fifo->writer %= (mbuf_fifo_NR_ENTRY -1);
  96.         return NETDEV_TX_OK;
  97. }

  98. static int mbuf_fifo_write(struct sk_buff *skb, struct mbuf_fifo *fifo)
  99. {
  100.         unsigned long flags;
  101.         int rc = NETDEV_TX_KILL;
  102.        
  103.         if (skb->len > mbuf_fifo_ENTRY_SIZE - sizeof(mbuf_desc_t))
  104.                 return rc;
  105.        
  106.         spin_lock_irqsave(fifo->lock, flags);
  107.         rc = __mbuf_fifo_write(skb, fifo);
  108.         spin_unlock_irqrestore(fifo->lock, flags);

  109.         return rc;
  110. }

  111. #define mbuf_fifo_PHY_START 0xeffe00ff

  112. static unsigned long mbuf_fifo_phy_sart = mbuf_fifo_PHY_START;
  113. module_param(mbuf_fifo_phy_sart, unsigned long, S_IRUGO | S_IWUSR);

  114. static int mbuf_fifo_inited = 0;
  115. module_param(mbuf_fifo_inited, int, S_IRUGO | S_IWUSR);

  116. static struct mbuf_fifo * mbuf_fifo_alloc(spinlock_t *lock)
  117. {
  118.         struct mbuf_fifo *fifo;
  119.         void *start;
  120. /*       
  121.         if (mbuf_fifo_inited)
  122.                 return ERR_PTR(-EBUSY);
  123. */
  124.         if (mbuf_fifo_phy_sart == mbuf_fifo_PHY_START)
  125.                 return ERR_PTR(-ENOMEM);
  126.        
  127.         if (!mbuf_fifo_phy_sart)
  128.                 return ERR_PTR(-EINVAL);
  129.        
  130.         if (mbuf_fifo_phy_sart & (PAGE_SIZE-1))
  131.                 return ERR_PTR(-EINVAL);
  132.        
  133.         /* ok, input through sysfs */
  134.         start = phys_to_virt(mbuf_fifo_phy_sart);

  135.         /* mapped in kernel ? */
  136.         if (!mbuf_fifo_inited) {
  137.                 memset(start, 0,
  138.                         (mbuf_fifo_ENTRY_SIZE -1) * mbuf_fifo_NR_ENTRY);
  139.         } else {
  140.                 char c =
  141.                 (*(char *)(start + (mbuf_fifo_ENTRY_SIZE -1)
  142.                                  * mbuf_fifo_NR_ENTRY -1));
  143.         }
  144.        
  145.         fifo = kmalloc(sizeof(struct mbuf_fifo), GFP_KERNEL);
  146.         if (!fifo)
  147.                 return ERR_PTR(-ENOMEM);
  148.         memset(fifo, 0, sizeof(struct mbuf_fifo));

  149.         if (!mbuf_fifo_inited)
  150.                 mbuf_fifo_inited = 1;
  151.        
  152.         if (lock) {
  153.                 fifo->lock = lock;
  154.         } else {
  155.                 spin_lock_init(&fifo->__lock);
  156.                 fifo->lock = &fifo->__lock;
  157.         }
  158.         fifo->entries = start;
  159.         return fifo;
  160. }

  161. static void mbuf_fifo_free(struct mbuf_fifo *fifo)
  162. {
  163.         if (fifo)
  164.                 kfree(fifo);
  165. }

  166. struct mbuf_fifo_ops {
  167.         void (*free)(struct mbuf_fifo *fifo);
  168.         struct mbuf_fifo * (*alloc)(spinlock_t *lock);
  169.         int (*write)(struct sk_buff *skb, struct mbuf_fifo *fifo);
  170.         int (*read)(struct sk_buff *skb, struct mbuf_fifo *fifo);
  171. };

  172. static struct mbuf_fifo_ops ops = {
  173.         .free        = mbuf_fifo_free;
  174.         .alloc        = mbuf_fifo_alloc;
  175.         .read        = mbuf_fifo_read;
  176.         .write        = mbuf_fifo_write;
  177. };

  178. extern atomic_t mbuf_fifo_ready;
  179. extern struct mbuf_fifo_ops *mbuf_fifo_methods;

  180. static int mbuf_fifo_init(void)
  181. {
  182.         if (mbuf_fifo_methods)
  183.                 return -EBUSY;
  184.                
  185.         mbuf_fifo_methods = &ops;
  186.         atomic_set(&mbuf_fifo_ready, 1);
  187.         return 0;
  188. }

  189. static void mbuf_fifo_exit(void)
  190. {
  191.         atomic_set(&mbuf_fifo_ready, 0);
  192.         mbuf_fifo_methods = NULL;
  193. }

  194. module_init(mbuf_fifo_init);
  195. module_exit(mbuf_fifo_exit);
复制代码

论坛徽章:
0
发表于 2008-08-08 16:45 |显示全部楼层
i huck u, Rusty Russell, for lguest.

论坛徽章:
0
发表于 2008-08-14 12:42 |显示全部楼层
原帖由 rtable 于 2008-8-8 16:45 发表
i huck u, Rusty Russell, for lguest.

lguest的代码6K多行,而且注释+空行估计了一下有一半,代码全是精华,真是佩服
看了几天,要研究透有些难度,那几个virtio的driver还好点
不过不管花多少时间,一定要把它搞定

论坛徽章:
0
发表于 2008-08-16 18:51 |显示全部楼层

  1.         lg = kzalloc(sizeof(*lg), GFP_KERNEL);
  2.         if (!lg) {
  3.                 err = -ENOMEM;
  4.                 goto unlock;
  5.         }

  6.         /* Populate the easy fields of our guest */
  7.         lg->mem_base = (void __user *)args[0];
  8.         lg->pfn_limit = args[1];

  9.         /* .26
  10.          *L:020
  11.          * The initialization write supplies 4 pointer sized (32 or 64 bit)
  12.          * values (in addition to the LHREQ_INITIALIZE value).
  13.          *
  14.          * This is the first cpu (cpu 0) and it will start booting at args[3]
  15.          */
  16.         err = lg_cpu_start(&lg->cpus[0], 0, args[3]);
  17.         if (err)
  18.                 goto release_guest;

  19. /*L:025                This actually initializes a CPU */

  20. static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
  21. {
  22.         /*
  23.          * We have a limited number of CPUs in the lguest struct.
  24.          *
  25.          * but cpu was not told to serve which guest
  26.          */
  27.         if (id >= ARRAY_SIZE(cpu->lg->cpus))
  28.                 return -EINVAL;

  29.         /* Set up this CPU's id, and pointer back to the lguest struct. */
  30.         cpu->id = id;
  31.         cpu->lg = container_of((cpu - id), struct lguest, cpus[0]);
  32.         cpu->lg->nr_cpus++;

  33.         ...
  34. }

  35. /* and seems like */

  36. static int lg_cpu_start(struct lguest *guest, unsigned id, unsigned long start_ip)
  37. {
  38.         struct lg_cpu *cpu;
  39.        
  40.         /* We have a limited number the number of CPUs in the lguest struct. */
  41.         if (id >= ARRAY_SIZE(guest->cpus))
  42.                 return -EINVAL;
  43.        
  44.         cpu = &lg->cpus[id];
  45.        
  46.         /* Set up this CPU's id, and pointer back to the lguest struct. */
  47.         cpu->id = id;
  48.         cpu->lg = guest; //container_of((cpu - id), struct lguest, cpus[0]);
  49.         cpu->lg->nr_cpus++;
  50.        
  51.         ...
  52. }
复制代码

论坛徽章:
0
发表于 2008-08-16 23:19 |显示全部楼层
int run_guest(struct lg_cpu *cpu, unsigned long __user *user);

论坛徽章:
0
发表于 2008-08-17 11:03 |显示全部楼层
write(lguest, whoiam); //then
read(lguest, result);
您需要登录后才可以回帖 登录 | 注册

本版积分规则 发表回复

  

北京盛拓优讯信息技术有限公司. 版权所有 京ICP备16024965号-6 北京市公安局海淀分局网监中心备案编号:11010802020122 niuxiaotong@pcpop.com 17352615567
未成年举报专区
中国互联网协会会员  联系我们:huangweiwei@itpub.net
感谢所有关心和支持过ChinaUnix的朋友们 转载本站内容请注明原作者名及出处

清除 Cookies - ChinaUnix - Archiver - WAP - TOP