- 论坛徽章:
- 0
|
请大家贴测试结果
- diff -upr linux-2.6.20.7/drivers/net/e1000/e1000_main.c linux-2.6.20.7-devQ/drivers/net/e1000/e1000_main.c
- --- linux-2.6.20.7/drivers/net/e1000/e1000_main.c 2007-05-28 10:50:46.000000000 +0800
- +++ linux-2.6.20.7-devQ/drivers/net/e1000/e1000_main.c 2007-05-28 11:29:11.000000000 +0800
- @@ -944,7 +944,7 @@ e1000_probe(struct pci_dev *pdev,
- netdev->watchdog_timeo = 5 * HZ;
- #ifdef CONFIG_E1000_NAPI
- netdev->poll = &e1000_clean;
- - netdev->weight = 64;
- + netdev->weight = 16;
- #endif
- netdev->vlan_rx_register = e1000_vlan_rx_register;
- netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
- @@ -1333,7 +1333,7 @@ e1000_sw_init(struct e1000_adapter *adap
- for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->polling_netdev[i].priv = adapter;
- adapter->polling_netdev[i].poll = &e1000_clean;
- - adapter->polling_netdev[i].weight = 64;
- + adapter->polling_netdev[i].weight = 16;
- dev_hold(&adapter->polling_netdev[i]);
- set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
- }
- @@ -3435,11 +3435,21 @@ e1000_xmit_frame(struct sk_buff *skb, st
- max_per_txd, nr_frags, mss));
-
- netdev->trans_start = jiffies;
- -
- - /* Make sure there is space in the ring for the next send. */
- - e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
- -
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
- + /* Make sure there is space in the ring for the next send. */
- + //e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
- + if (8 +2 > E1000_DESC_UNUSED(tx_ring)) {
- + int tx_cleaned = 0;
- + if (spin_trylock(&adapter->tx_queue_lock)) {
- + if (8 +2 > E1000_DESC_UNUSED(tx_ring))
- + tx_cleaned = e1000_clean_tx_irq(adapter, tx_ring);
- + else
- + tx_cleaned = 1;
- + spin_unlock(&adapter->tx_queue_lock);
- + }
- + if (!tx_cleaned)
- + e1000_maybe_stop_tx(netdev, tx_ring, 8 +2);
- + }
- return NETDEV_TX_OK;
- }
-
- @@ -3977,8 +3987,11 @@ e1000_clean(struct net_device *poll_dev,
- * simultaneously. A failure obtaining the lock means
- * tx_ring[0] is currently being cleaned anyway. */
- if (spin_trylock(&adapter->tx_queue_lock)) {
- + if (8 +2 > E1000_DESC_UNUSED(&adapter->tx_ring[0]))
- tx_cleaned = e1000_clean_tx_irq(adapter,
- &adapter->tx_ring[0]);
- + else
- + tx_cleaned = 1;
- spin_unlock(&adapter->tx_queue_lock);
- }
-
- @@ -4056,7 +4069,7 @@ e1000_clean_tx_irq(struct e1000_adapter
-
- tx_ring->next_to_clean = i;
-
- -#define TX_WAKE_THRESHOLD 32
- +#define TX_WAKE_THRESHOLD 4 //32
- if (unlikely(cleaned && netif_carrier_ok(netdev) &&
- E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
- /* Make sure that anybody stopping the queue after this
- diff -upr linux-2.6.20.7/include/linux/netdevice.h linux-2.6.20.7-devQ/include/linux/netdevice.h
- --- linux-2.6.20.7/include/linux/netdevice.h 2007-05-28 10:43:47.000000000 +0800
- +++ linux-2.6.20.7-devQ/include/linux/netdevice.h 2007-06-27 11:55:19.000000000 +0800
- @@ -619,7 +619,7 @@ struct softnet_data
- struct sk_buff_head input_pkt_queue;
- struct list_head poll_list;
- struct sk_buff *completion_queue;
- -
- + atomic_t poll_nr;
- struct net_device backlog_dev; /* Sorry. 8) */
- #ifdef CONFIG_NET_DMA
- struct dma_chan *net_dma;
- @@ -872,7 +872,11 @@ static inline int netif_rx_reschedule(st
- dev->quota += undo;
-
- local_irq_save(flags);
- - list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
- + list_add_tail(&dev->poll_list,
- + &__get_cpu_var(softnet_data).poll_list);
- + atomic_inc(&__get_cpu_var(softnet_data).poll_nr);
- + /* accord to smp */
- + smp_mb__after_atomic_inc();
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
- local_irq_restore(flags);
- return 1;
- @@ -894,6 +898,8 @@ static inline void netif_rx_complete(str
- list_del(&dev->poll_list);
- smp_mb__before_clear_bit();
- clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
- + /* smp_mb__before_atomic_dec(); */
- + atomic_dec(&per_cpu(softnet_data, smp_processor_id()).poll_nr);
- local_irq_restore(flags);
- }
-
- @@ -919,6 +925,8 @@ static inline void __netif_rx_complete(s
- list_del(&dev->poll_list);
- smp_mb__before_clear_bit();
- clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
- + /* smp_mb__before_atomic_dec(); */
- + atomic_dec(&per_cpu(softnet_data, smp_processor_id()).poll_nr);
- }
-
- static inline void netif_tx_lock(struct net_device *dev)
- diff -upr linux-2.6.20.7/include/linux/skbuff.h linux-2.6.20.7-devQ/include/linux/skbuff.h
- --- linux-2.6.20.7/include/linux/skbuff.h 2007-05-28 10:42:57.000000000 +0800
- +++ linux-2.6.20.7-devQ/include/linux/skbuff.h 2007-06-27 11:21:59.000000000 +0800
- @@ -114,6 +114,14 @@ struct sk_buff_head {
- spinlock_t lock;
- };
-
- +struct sk_buff_queue {
- + struct sk_buff *next;
- + struct sk_buff *prev;
- + __u32 qlen;
- + unsigned long last;
- +};
- +extern __u32 __sk_buff_thresh;
- +extern int __sk_buff_nics[8];
- struct sk_buff;
-
- /* To allow 64K frame to be packed as single skb without frag_list */
- diff -upr linux-2.6.20.7/net/core/dev.c linux-2.6.20.7-devQ/net/core/dev.c
- --- linux-2.6.20.7/net/core/dev.c 2007-05-28 10:45:11.000000000 +0800
- +++ linux-2.6.20.7-devQ/net/core/dev.c 2007-06-27 11:59:03.000000000 +0800
- @@ -1123,6 +1123,8 @@ void __netif_rx_schedule(struct net_devi
- local_irq_save(flags);
- dev_hold(dev);
- list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
- + atomic_inc(&__get_cpu_var(softnet_data).poll_nr);
- + smp_mb__after_atomic_inc();
- if (dev->quota < 0)
- dev->quota += dev->weight;
- else
- @@ -3506,6 +3508,7 @@ static int __init net_dev_init(void)
- skb_queue_head_init(&queue->input_pkt_queue);
- queue->completion_queue = NULL;
- INIT_LIST_HEAD(&queue->poll_list);
- + atomic_set(&queue->poll_nr, 0);
- set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
- queue->backlog_dev.weight = weight_p;
- queue->backlog_dev.poll = process_backlog;
- diff -upr linux-2.6.20.7/net/core/skbuff.c linux-2.6.20.7-devQ/net/core/skbuff.c
- --- linux-2.6.20.7/net/core/skbuff.c 2007-05-28 10:44:47.000000000 +0800
- +++ linux-2.6.20.7-devQ/net/core/skbuff.c 2007-06-27 11:24:19.000000000 +0800
- @@ -70,7 +70,8 @@
-
- static struct kmem_cache *skbuff_head_cache __read_mostly;
- static struct kmem_cache *skbuff_fclone_cache __read_mostly;
- -
- +__u32 __sk_buff_thresh = 31;
- +int __sk_buff_nics[8] = { 0, 0, 0, 0, /**/ 0, 0, 0, 0 };
- /*
- * Keep out-of-line to prevent kernel bloat.
- * __builtin_return_address is not used because it is not always
- diff -upr linux-2.6.20.7/net/ipv4/ip_input.c linux-2.6.20.7-devQ/net/ipv4/ip_input.c
- --- linux-2.6.20.7/net/ipv4/ip_input.c 2007-05-28 10:45:48.000000000 +0800
- +++ linux-2.6.20.7-devQ/net/ipv4/ip_input.c 2007-06-27 14:09:24.000000000 +0800
- @@ -328,7 +328,7 @@ drop:
- return -1;
- }
-
- -static inline int ip_rcv_finish(struct sk_buff *skb)
- +static inline int ip_rcv_finish2(struct sk_buff *skb)
- {
- struct iphdr *iph = skb->nh.iph;
-
- @@ -367,6 +367,100 @@ drop:
- return NET_RX_DROP;
- }
-
- +static void __tfn(unsigned long data)
- +{
- + unsigned long now = jiffies, cnt = 0;
- + struct sk_buff * head = (void *) data;
- + while (head) {
- + struct sk_buff * skb = head;
- + ++cnt;
- + head = head->next;
- + skb->prev =
- + skb->next = NULL;
- + ip_rcv_finish2(skb);
- + }
- + if (net_ratelimit())
- + printk(KERN_INFO "skbQ tfn by %d jiff<%lu %lu> cnt %lu\n",
- + smp_processor_id(), now, jiffies, cnt);
- +}
- +#define HERE_ARE 32
- +static struct timer_list buff_ts[HERE_ARE] cacheline_aligned_in_smp;
- +static struct sk_buff_queue buff_qs[HERE_ARE] cacheline_aligned_in_smp;
- +
- +static inline int __skb__nic_hit(const struct sk_buff *skb)
- +{
- + int j, k;
- + if (!skb || !skb->iif)
- + return 0;
- + k = sizeof(__sk_buff_nics) >> 2;
- + for (j = 0; j < k; j++) {
- + if (__sk_buff_nics[j] > 0 &&
- + __sk_buff_nics[j] == skb->iif)
- + return skb->iif;
- + }
- + return 0;
- +}
- +
- +static inline int ip_rcv_finish(struct sk_buff * skb)
- +{
- +#ifdef CONFIG_SMP
- + unsigned long flags, now = jiffies;
- + int cpu = smp_processor_id();
- + int j = cpu;
- + struct timer_list *t = &buff_ts[cpu];
- + struct sk_buff_queue *q = &buff_qs[cpu];
- + if (!__skb__nic_hit(skb))
- + return ip_rcv_finish2(skb);
- + if (!q->next) {
- + q->next =
- + q->prev = (struct sk_buff *)q;
- + q->qlen = 0;
- + q->last = now;
- + init_timer(t);
- + }
- + local_irq_save(flags);
- + __skb_queue_tail((struct sk_buff_head *)q, skb);
- + local_irq_restore(flags);
- + if (q->qlen > __sk_buff_thresh
- + || now > 4 + q->last) {
- + int best =256, ok = HERE_ARE -1;
- +
- + if (timer_pending(t))
- + return 0;
- + for_each_online_cpu(cpu) {
- + int pnr =
- + atomic_read(&per_cpu(softnet_data, cpu).poll_nr);
- + if (!pnr) {
- + ok = cpu;
- + break;
- + } else if (best > pnr) {
- + ok = cpu;
- + best = pnr;
- + }
- + }
- + local_irq_save(flags);
- + t->data = (unsigned long) q->next;
- + q->prev->next = NULL;
- + q->next =
- + q->prev = (struct sk_buff *)q;
- + q->qlen = 0;
- + q->last = now;
- + local_irq_restore(flags);
- +
- + t->function = __tfn;
- + t->expires = now = jiffies;
- + add_timer_on(t, ok);
- + if (net_ratelimit())
- + printk(KERN_INFO "skbQ sched<%d %d> jiff %lu\n",
- + j, ok, now);
- + }
- + return 0;
- +#else /* CONGIF_SMP */
- + return ip_rcv_finish2(skb);
- +#endif
- +}
- +#undef HERE_ARE
- +
- /*
- * Main IP Receive routine.
- */
- diff -upr linux-2.6.20.7/net/ipv4/sysctl_net_ipv4.c linux-2.6.20.7-devQ/net/ipv4/sysctl_net_ipv4.c
- --- linux-2.6.20.7/net/ipv4/sysctl_net_ipv4.c 2007-05-28 10:46:27.000000000 +0800
- +++ linux-2.6.20.7-devQ/net/ipv4/sysctl_net_ipv4.c 2007-06-27 11:31:15.000000000 +0800
- @@ -186,6 +186,9 @@ static int strategy_allowed_congestion_c
-
- }
-
- +extern __u32 __sk_buff_thresh;
- +extern int __sk_buff_nics[8];
- +
- ctl_table ipv4_table[] = {
- {
- .ctl_name = NET_IPV4_TCP_TIMESTAMPS,
- @@ -211,7 +214,23 @@ ctl_table ipv4_table[] = {
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- - {
- + {
- + .ctl_name = 11,
- + .procname = "skbuff_thresh",
- + .data = &__sk_buff_thresh,
- + .maxlen = 4,
- + .mode = 0644,
- + .proc_handler = &proc_dointvec
- + },
- + {
- + .ctl_name = 12,
- + .procname = "skbuff_nics",
- + .data = &__sk_buff_nics,
- + .maxlen = sizeof(__sk_buff_nics),
- + .mode = 0644,
- + .proc_handler = &proc_dointvec
- + },
- + {
- .ctl_name = NET_IPV4_TCP_RETRANS_COLLAPSE,
- .procname = "tcp_retrans_collapse",
- .data = &sysctl_tcp_retrans_collapse,
复制代码
[ 本帖最后由 sisi8408 于 2007-8-22 11:07 编辑 ] |
|