- 论坛徽章:
- 0
|
补丁后的tcp_sendmsg代码
- int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size)
- {
- struct sock *sk = sock->sk;
- struct iovec *iov;
- struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb;
- int iovlen, flags;
- int mss_now, size_goal;
- int err, copied;
- long timeo;
- int atomic; /* is atomic write? johnye. Feb 2, 2010 */
- lock_sock(sk);
- TCP_CHECK_TIMER(sk);
- flags = msg->msg_flags;
- timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
- /* Wait for a connection to finish. */
- if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
- if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
- goto out_err;
- /* This should be in poll */
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
- size_goal = tp->xmit_size_goal;
- /* Ok commence sending. */
- iovlen = msg->msg_iovlen;
- iov = msg->msg_iov;
- copied = 0;
- err = -EPIPE;
- if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
- goto do_error;
- /* for multi-seg data or too big chunk, no atomic. johnye. */
- atomic = tp->atomic_size;
- if(iovlen > 1 || iov->iov_len > atomic) atomic = 0;
- while (--iovlen >= 0) {
- int seglen = iov->iov_len;
- unsigned char __user *from = iov->iov_base;
- iov++;
- while (seglen > 0) {
- int copy;
- skb = tcp_write_queue_tail(sk);
- if (!tcp_send_head(sk) ||
- (copy = size_goal - skb->len) <= 0) {
- new_segment:
- /* Allocate new segment. If the interface is SG,
- * allocate skb fitting to single page.
- */
- if (!sk_stream_memory_free(sk))
- goto wait_for_sndbuf;
- skb = sk_stream_alloc_skb(sk, select_size(sk),
- sk->sk_allocation);
- if (!skb)
- goto wait_for_memory;
- /*
- * Check whether we can use HW checksum.
- */
- if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb_entail(sk, skb);
- copy = size_goal;
- }
- /* Try to append data to the end of skb. */
- if (copy > seglen)
- copy = seglen;
- /* if atomic write. johnye */
- if (atomic)
- copy = seglen;
- /* Where to copy to? */
- if (skb_tailroom(skb) > 0) {
- /* We have some space in skb head. Superb! */
- /* consider atomic write, johnye */
- if (copy > skb_tailroom(skb)) {
- if(atomic)
- goto skb_page_start; /* q mark yet, johnye */
- copy = skb_tailroom(skb);
- }
- if ((err = skb_add_data(skb, from, copy)) != 0)
- goto do_fault;
- goto skb_page_done;
- //} else {
- }
- skb_page_start:
- {
- int merge = 0;
- int i = skb_shinfo(skb)->nr_frags;
- struct page *page = TCP_PAGE(sk);
- int off = TCP_OFF(sk);
- if (skb_can_coalesce(skb, i, page, off) &&
- off != PAGE_SIZE) {
- /* We can extend the last page
- * fragment. */
- merge = 1;
- } else if (i == MAX_SKB_FRAGS ||
- (!i &&
- !(sk->sk_route_caps & NETIF_F_SG))) {
- /* Need to add new fragment and cannot
- * do this because interface is non-SG,
- * or because all the page slots are
- * busy. */
- tcp_mark_push(tp, skb);
- goto new_segment;
- } else if (page) {
- if (off == PAGE_SIZE) {
- put_page(page);
- TCP_PAGE(sk) = page = NULL;
- off = 0;
- }
- } else
- off = 0;
- /* consider atomic write, johnye */
- if (copy > PAGE_SIZE - off) {
- if (atomic && page) {
- put_page(page);
- TCP_PAGE(sk) = page = NULL;
- off = 0;
- merge = 0;
- } else {
- copy = PAGE_SIZE - off;
- }
- }
- if (!sk_wmem_schedule(sk, copy))
- goto wait_for_memory;
- if (!page) {
- /* Allocate new cache page. */
- if (!(page = sk_stream_alloc_page(sk)))
- goto wait_for_memory;
- }
- /* Time to copy data. We are close to
- * the end! */
- err = skb_copy_to_page(sk, from, skb, page,
- off, copy);
- if (err) {
- /* If this page was new, give it to the
- * socket so it does not get leaked.
- */
- if (!TCP_PAGE(sk)) {
- TCP_PAGE(sk) = page;
- TCP_OFF(sk) = 0;
- }
- goto do_error;
- }
- /* Update the skb. */
- if (merge) {
- skb_shinfo(skb)->frags[i - 1].size +=
- copy;
- } else {
- skb_fill_page_desc(skb, i, page, off, copy);
- if (TCP_PAGE(sk)) {
- get_page(page);
- } else if (off + copy < PAGE_SIZE) {
- get_page(page);
- TCP_PAGE(sk) = page;
- }
- }
- TCP_OFF(sk) = off + copy;
- }
- skb_page_done:
- if (!copied)
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
- tp->write_seq += copy;
- TCP_SKB_CB(skb)->end_seq += copy;
- skb_shinfo(skb)->gso_segs = 0;
- from += copy;
- copied += copy;
- if ((seglen -= copy) == 0 && iovlen == 0)
- goto out;
- if (skb->len < size_goal || (flags & MSG_OOB))
- continue;
- if (forced_push(tp)) {
- tcp_mark_push(tp, skb);
- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
- } else if (skb == tcp_send_head(sk))
- tcp_push_one(sk, mss_now);
- continue;
- wait_for_sndbuf:
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- wait_for_memory:
- if (copied)
- tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
- if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
- goto do_error;
- mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
- size_goal = tp->xmit_size_goal;
- }
- }
- out:
- if (copied)
- tcp_push(sk, flags, mss_now, tp->nonagle);
- TCP_CHECK_TIMER(sk);
- release_sock(sk);
- return copied;
- do_fault:
- if (!skb->len) {
- tcp_unlink_write_queue(skb, sk);
- /* It is the one place in all of TCP, except connection
- * reset, where we can be unlinking the send_head.
- */
- tcp_check_send_head(sk, skb);
- sk_wmem_free_skb(sk, skb);
- }
- do_error:
- if (copied)
- goto out;
- out_err:
- err = sk_stream_error(sk, flags, err);
- TCP_CHECK_TIMER(sk);
- release_sock(sk);
- return err;
- }
复制代码 |
|