IP层实现2--gro

来源:互联网 发布:千牛mac电脑官方下载 编辑:程序博客网 时间:2024/06/05 05:31

GRO(generic receive offload)。这是为了提高接收速度的一种机制。原理就是在接收端通过把多个相关的报文(比如TCP分段报文)组装成一个大的报文后再传送给协议栈进行处理。因为内核协议栈对报文的处理都是对报文头部进行处理,如果相关的多个报文合并后只有一个报文头,这样就减少了协议栈处理报文个数,加快协议栈对报文的处理速度。但如果包是被转,不需要使用GRO,这时使用GRO功能反而会降低处理速度。GRO功能和只是针对NAPI类型的驱动。

当支持NAPI时,网卡接收到数据后,通过调用napi_gro_receive,将数据交给上层协议,而在此过程中,会在每层协议中调用不同的回调函数。

实现时,是使用了分片,新的包被当作分片插入到旧包当中。

要支持GRO,首先要初始化napi_struct对象,并向设备进行注册:

[ net/core/dev.c ]

void netif_napi_add(struct net_device *dev, struct napi_struct *napi,    int (*poll)(struct napi_struct *, int), int weight){INIT_LIST_HEAD(&napi->poll_list);// 将 napi->poll_list 初始化为空列表napi->gro_count = 0;napi->gro_list = NULL;napi->skb = NULL;napi->poll = poll;// NAPI执行poll时的函数if (weight > NAPI_POLL_WEIGHT)// 64pr_err_once("netif_napi_add() called with weight %d on device %s\n",    weight, dev->name);napi->weight = weight;// 这里为16list_add(&napi->dev_list, &dev->napi_list);// NAPI挂载到网络设备上,这样可以通过net_device访问到NAPInapi->dev = dev;// NAPI相关联的网络设备#ifdef CONFIG_NETPOLLspin_lock_init(&napi->poll_lock);napi->poll_owner = -1;#endifset_bit(NAPI_STATE_SCHED, &napi->state); /* Poll is scheduled, 将NAPI关闭 */}

GRO要跨过很多个协议层,在跨越的时候,sk_buff中的数据偏移(如data等)不能改变,所以使用sb_buff->cb,设计了一个napi_gro_cb用来记录和GRO相关的数据:
[ include/linux/netdevice.h ]

struct napi_gro_cb {/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */void *frag0;/* Length of frag0. */unsigned int frag0_len;/* This indicates where we are processing relative to skb->data.  * 进行GRO处理中的数据偏移量,在整个过程中,skb->data的值是不能改变的,所以要用此值来保存偏移量 */int data_offset;/* This is non-zero if the packet cannot be merged with the new skb. */u16flush;/* Save the IP ID here and check when we get to the transport layer */u16flush_id;/* Number of segments aggregated.  * 合并的包的个数 */u16count;/* This is non-zero if the packet may be of the same flow.  * 标记挂在napi->gro_list上的报文是否跟现在的报文进行匹配 * 每层的gro_receive都设置该标记位。接收到一个报文后,使用该报文和挂在napi->gro_list 上 的报文进行匹配 * 在上层只要考虑下层中被设为1的报文就可以了 */u8same_flow;/* Free the skb? */u8free;#define NAPI_GRO_FREE  1#define NAPI_GRO_FREE_STOLEN_HEAD 2/* jiffies when first packet was created/queued */unsigned long age;/* Used in ipv6_gro_receive() */u16proto;/* Used in udp_gro_receive */u16udp_mark;/* used to support CHECKSUM_COMPLETE for tunneling protocols */__wsumcsum;/* used in skb_gro_receive() slow path */struct sk_buff *last;};

GRO的入口在驱动中调用,如对于8139cp驱动,在cp_rx_skb函数中进行调用,调用的函数如下:

[ net/core/dev.c ]

gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb){trace_napi_gro_receive_entry(skb);return napi_skb_finish(dev_gro_receive(napi, skb), skb);}EXPORT_SYMBOL(napi_gro_receive);

先看napi_skb_finish,它根据dev_gro_receive的返回值,作一些处理:

[ net/core/dev.c ]

static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb){switch (ret) {case GRO_NORMAL:// 没有经过GRO处理,提交给上层协议if (netif_receive_skb_internal(skb))ret = GRO_DROP;break;case GRO_DROP:// 丢弃kfree_skb(skb);break;case GRO_MERGED_FREE:// skb被合并,skb可以被删除if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)kmem_cache_free(skbuff_head_cache, skb);else__kfree_skb(skb);break;case GRO_HELD:// skb被加入到gro_list中case GRO_MERGED:// skb合并完成break;}return ret;}
对GRO的处理主要是通过dev_gro_receive来进行的:

[ net/core/dev.c ]

static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb){struct sk_buff **pp = NULL;struct packet_offload *ptype;__be16 type = skb->protocol;// 网络层协议struct list_head *head = &offload_base;// 每个包类型都会注册自己的packet_offload,用来处理GRO的各个操作 int same_flow;enum gro_result ret;/* 设备是否支持GRO * NETIF_F_GRO : Generic receive offload  * netpoll_rx_on 只在配置了 CONFIG_NETPOLL 才有用,否则返回false */if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))goto normal;// 不作处理if (skb_is_gso(skb) || skb_has_frag_list(skb))// 已经进行过了GROgoto normal;// 不作处理skb_gro_reset_offset(skb);// 初始化和GRO相关的数据gro_list_prepare(napi, skb);// 查看是否有包与skb是同一个流NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */rcu_read_lock();list_for_each_entry_rcu(ptype, head, list) {// 遍历所有的包类型表if (ptype->type != type || !ptype->callbacks.gro_receive)// 类型不同或是没有定义GRO函数continue;skb_set_network_header(skb, skb_gro_offset(skb));// 设置网络层头部(0)skb_reset_mac_len(skb);// 重置MAC地址长度(0)NAPI_GRO_CB(skb)->same_flow = 0;// 不在同一个流NAPI_GRO_CB(skb)->flush = 0;// 可以合并NAPI_GRO_CB(skb)->free = 0;// 不能释放NAPI_GRO_CB(skb)->udp_mark = 0;pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);// 调用包类型的GRO函数break;}rcu_read_unlock();if (&ptype->list == head)goto normal;// 不作处理same_flow = NAPI_GRO_CB(skb)->same_flow;// 是否在同一个流里ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;// skb是要被删除还是被合并了if (pp) {// 包是经过合并的struct sk_buff *nskb = *pp;// 合并后的包// 将合并过的包从napi->gro_list中移除*pp = nskb->next;nskb->next = NULL;napi_gro_complete(nskb); // 将合并过的包提交给上层napi->gro_count--; // napi中的包的数量减1}if (same_flow)// skb在同一个流里goto ok;if (NAPI_GRO_CB(skb)->flush)// skb不能被合并goto normal;/* 没有合并的包被挂在napi->gro_list中 */if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {// napi中的包的数量>=8struct sk_buff *nskb = napi->gro_list;// GRO列表/* locate the end of the list to select the 'oldest' flow */while (nskb->next) {pp = &nskb->next;nskb = *pp;} // 将napi->gro_list中的最后一个包移除*pp = NULL;nskb->next = NULL;napi_gro_complete(nskb); // 将移除的包提交给上层} else {napi->gro_count++;// napi中包的数量数加1}NAPI_GRO_CB(skb)->count = 1;// skb合并过的次数设为1NAPI_GRO_CB(skb)->age = jiffies;// skb被合并的时间NAPI_GRO_CB(skb)->last = skb;// last指向gro_list的最后一个对象(skb后面会挂到gro_list后面)skb_shinfo(skb)->gso_size = skb_gro_len(skb);// skb负载数据的大小skb->next = napi->gro_list;// skb挂入gro_list中napi->gro_list = skb;ret = GRO_HELD;// skb被挂入gro_list中/* 不支持GRO * skb进行过GRO操作 * &ptype->list == head * skb不能被合并 */pull:if (skb_headlen(skb) < skb_gro_offset(skb)) {// 在分片中int grow = skb_gro_offset(skb) - skb_headlen(skb);// skb中负载数据的偏移 - 主buffer的长度BUG_ON(skb->end - skb->tail < grow);/* 将skb第一个分片中负载数据之前的的内容(协议头)拷贝到主buffer中 * 这样在分片中只存在负载数据 */memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);skb->tail += grow;// 主buffer增加skb->data_len -= grow;// 分片数据减少skb_shinfo(skb)->frags[0].page_offset += grow;// 分片内存的起始地址进行调整skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);// 分片的数据大小减少if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {// 第1个分片数据大小为0skb_frag_unref(skb, 0);// 第1个分片引用计数减1memmove(skb_shinfo(skb)->frags,// 将分片数组中后面的内容向前移动skb_shinfo(skb)->frags + 1,--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));}}/* skb在同一个流中 */ok:return ret;normal:ret = GRO_NORMAL;goto pull;}

其中调用到的函数:

[ net/core/dev.c ]
static void skb_gro_reset_offset(struct sk_buff *skb){const struct skb_shared_info *pinfo = skb_shinfo(skb);// skb_shared_infoconst skb_frag_t *frag0 = &pinfo->frags[0];// 分片数组第一个NAPI_GRO_CB(skb)->data_offset = 0;// 数据偏移为0NAPI_GRO_CB(skb)->frag0 = NULL;// 分片地址为0NAPI_GRO_CB(skb)->frag0_len = 0;// 分片长度为0/* 数据只包含MAC头部 * 有分片 * 分片的内存不是在高端内存 */if (skb_mac_header(skb) == skb_tail_pointer(skb) &&    pinfo->nr_frags &&    !PageHighMem(skb_frag_page(frag0))) {NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);// 分片地址NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);// 分片长度}}
[ net/core/dev.c ]
static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb){    struct sk_buff *p;    unsigned int maclen = skb->dev->hard_header_len;    // MAC地址长度    u32 hash = skb_get_hash_raw(skb);    // 哈希值    for (p = napi->gro_list; p; p = p->next) {    // 遍历gro_list        unsigned long diffs;        NAPI_GRO_CB(p)->flush = 0;    // 可以合并        if (hash != skb_get_hash_raw(p)) {    // 哈希值不同            NAPI_GRO_CB(p)->same_flow = 0;    // 不在同一个流,换下一个            continue;        }        diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;    // 网络设备是否相同        diffs |= p->vlan_tci ^ skb->vlan_tci;    // vlan tag control information        if (maclen == ETH_HLEN)    // 以太网,长度为14            diffs |= compare_ether_header(skb_mac_header(p),    // 以太网头部是否相同                              skb_gro_mac_header(skb));        else if (!diffs)            diffs = memcmp(skb_mac_header(p),    // MAC头部是否相同                       skb_gro_mac_header(skb),                       maclen);        /* 网络设备相同         * vlan tag control information 相同         * MAC头部相同         * 则说明是与skb在同一个流中         */        NAPI_GRO_CB(p)->same_flow = !diffs;    } // for}

最后还要调用napi_gro_complete函数:

[ net/core/dev.c ]

static int napi_gro_complete(struct sk_buff *skb){    struct packet_offload *ptype;    __be16 type = skb->protocol;    // 协议    struct list_head *head = &offload_base;    // 每个包类型都会注册自己的packet_offload,用来处理GRO的各个操作     int err = -ENOENT;    BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));    if (NAPI_GRO_CB(skb)->count == 1) {    // 合并过1次        skb_shinfo(skb)->gso_size = 0;    // GSO数据大小为0        goto out;    // 完成    }    rcu_read_lock();    list_for_each_entry_rcu(ptype, head, list) {    // 遍历所有的包类型表        if (ptype->type != type || !ptype->callbacks.gro_complete)    // 类型不同或是没有定义GRO函数            continue;        err = ptype->callbacks.gro_complete(skb, 0);    // 调用包类型的GRO函数        break;    }    rcu_read_unlock();    if (err) {        WARN_ON(&ptype->list == head);        kfree_skb(skb);        return NET_RX_SUCCESS;    }out:    return netif_receive_skb_internal(skb);    // 接收数据}
[ net/core/dev.c ]
static int netif_receive_skb_internal(struct sk_buff *skb){net_timestamp_check(netdev_tstamp_prequeue, skb);// 时间戳if (skb_defer_rx_timestamp(skb))// 这里直接返回flasereturn NET_RX_SUCCESS;#ifdef CONFIG_RPSif (static_key_false(&rps_needed)) {struct rps_dev_flow voidflow, *rflow = &voidflow;int cpu, ret;rcu_read_lock();cpu = get_rps_cpu(skb->dev, skb, &rflow);if (cpu >= 0) {ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);rcu_read_unlock();return ret;}rcu_read_unlock();}#endifreturn __netif_receive_skb(skb);}static int __netif_receive_skb(struct sk_buff *skb){    int ret;    if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {        unsigned long pflags = current->flags;        /*         * PFMEMALLOC skbs are special, they should         * - be delivered to SOCK_MEMALLOC sockets only         * - stay away from userspace         * - have bounded memory usage         *         * Use PF_MEMALLOC as this saves us from propagating the allocation         * context down to all allocation sites.         */        current->flags |= PF_MEMALLOC;        ret = __netif_receive_skb_core(skb, true);        tsk_restore_flags(current, pflags, PF_MEMALLOC);    } else        ret = __netif_receive_skb_core(skb, false);    return ret;}
[ net/core/dev.c ]
static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc){struct packet_type *ptype, *pt_prev;rx_handler_func_t *rx_handler;struct net_device *orig_dev;struct net_device *null_or_dev;bool deliver_exact = false;int ret = NET_RX_DROP;__be16 type;net_timestamp_check(!netdev_tstamp_prequeue, skb);// 时间戳trace_netif_receive_skb(skb);/* if we've gotten here through NAPI, check netpoll  * 还在继续进行poll */if (netpoll_receive_skb(skb))goto out;orig_dev = skb->dev;// 网络设备skb_reset_network_header(skb);// 重置网络层头部if (!skb_transport_header_was_set(skb))skb_reset_transport_header(skb);// 重置传输层头部skb_reset_mac_len(skb);// 重置MAC地址长度pt_prev = NULL;rcu_read_lock();another_round:skb->skb_iif = skb->dev->ifindex; // ifindex of device we arrived on/* softnet_data是一个PRE_CPU变量 * 正在接收的包都放在这里 */__this_cpu_inc(softnet_data.processed);// 以太网协议if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||    skb->protocol == cpu_to_be16(ETH_P_8021AD)) {skb = vlan_untag(skb);// 空函数if (unlikely(!skb))goto unlock;}#ifdef CONFIG_NET_CLS_ACTif (skb->tc_verd & TC_NCLS) {skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);goto ncls;}#endifif (pfmemalloc)goto skip_taps;/* 每种以太网层的协议类型都注册了一个packet_type用来处理如接收等的操作 */list_for_each_entry_rcu(ptype, &ptype_all, list) {// 遍历所有的packet_type协议表if (!ptype->dev || ptype->dev == skb->dev) {// 网络设备相同if (pt_prev)ret = deliver_skb(skb, pt_prev, orig_dev);// 接收数据pt_prev = ptype;}}skip_taps:#ifdef CONFIG_NET_CLS_ACTskb = handle_ing(skb, &pt_prev, &ret, orig_dev);if (!skb)goto unlock;ncls:#endifif (pfmemalloc && !skb_pfmemalloc_protocol(skb))goto drop;if (vlan_tx_tag_present(skb)) {// vlan tag control informationif (pt_prev) {ret = deliver_skb(skb, pt_prev, orig_dev);// 接收数据pt_prev = NULL;}if (vlan_do_receive(&skb))// 空函数goto another_round;else if (unlikely(!skb))goto unlock;}rx_handler = rcu_dereference(skb->dev->rx_handler);// 存在rx_handler if (rx_handler) {if (pt_prev) {ret = deliver_skb(skb, pt_prev, orig_dev);pt_prev = NULL;}switch (rx_handler(&skb)) {// 调用rx_handler case RX_HANDLER_CONSUMED:ret = NET_RX_SUCCESS;goto unlock;case RX_HANDLER_ANOTHER:goto another_round;case RX_HANDLER_EXACT:deliver_exact = true;case RX_HANDLER_PASS:break;default:BUG();}}if (unlikely(vlan_tx_tag_present(skb))) {// vlan tag control informationif (vlan_tx_tag_get_id(skb))skb->pkt_type = PACKET_OTHERHOST;/* Note: we might in the future use prio bits * and set skb->priority like in vlan_do_receive() * For the time being, just ignore Priority Code Point */skb->vlan_tci = 0;}/* deliver only exact match when indicated */null_or_dev = deliver_exact ? skb->dev : NULL;// 设置了rx_handler 才有用type = skb->protocol;// 链路层类型list_for_each_entry_rcu(ptype,&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {// 对所有网络类型列表循环if (ptype->type == type &&    (ptype->dev == null_or_dev || ptype->dev == skb->dev ||     ptype->dev == orig_dev)) {if (pt_prev)ret = deliver_skb(skb, pt_prev, orig_dev);// 接收数据pt_prev = ptype;}}if (pt_prev) {if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))goto drop;elseret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);// 调用回调函数} else {drop:atomic_long_inc(&skb->dev->rx_dropped);kfree_skb(skb);/* Jamal, now you will not able to escape explaining * me how you were going to use this. :-) */ret = NET_RX_DROP;}unlock:rcu_read_unlock();out:return ret;}
[ net/core/dev.c ]
static inline int deliver_skb(struct sk_buff *skb,      struct packet_type *pt_prev,      struct net_device *orig_dev){if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))return -ENOMEM;atomic_inc(&skb->users);return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);// 调用回调函数}


下面是IP层对应的GRO函数

[ net/ipv4/af_inet.c ]
static struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb){const struct net_offload *ops;struct sk_buff **pp = NULL;struct sk_buff *p;const struct iphdr *iph;unsigned int hlen;unsigned int off;unsigned int id;int flush = 1;int proto;off = skb_gro_offset(skb);// 当前的数据偏移,此时指向IP头部hlen = off + sizeof(*iph);// 当前的数据偏移加IP头的长度iph = skb_gro_header_fast(skb, off);// IP头部,在有分片的情况下可得到if (skb_gro_header_hard(skb, hlen)) {// 分片长度是否小于hleniph = skb_gro_header_slow(skb, hlen, off);// IP头部if (unlikely(!iph))goto out;}proto = iph->protocol;// IP上层协议 rcu_read_lock();ops = rcu_dereference(inet_offloads[proto]);// 得到上层协议的net_offload if (!ops || !ops->callbacks.gro_receive)// 上层协议相应的GRO函数是否为空goto out_unlock;if (*(u8 *)iph != 0x45)// IP头是否合法goto out_unlock;if (unlikely(ip_fast_csum((u8 *)iph, 5)))// IP头校验goto out_unlock;id = ntohl(*(__be32 *)&iph->id);// IP IDflush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));// 是否为分片包id >>= 16;/* 对gro_list列表中所有的包循环 * 看列表中的包是否和skb是同一个流,是否可以和skb进行合并 */for (p = *head; p; p = p->next) {struct iphdr *iph2;if (!NAPI_GRO_CB(p)->same_flow)// 如果不是同一个流的直接下一个continue;iph2 = (struct iphdr *)(p->data + off)// 得到IP头部/* The above works because, with the exception of the top * (inner most) layer, we only aggregate pkts with the same * hdr length so all the hdrs we'll need to verify will start * at the same offset. * 头部协议不同,源地址不同,目的地址不同,则不在同一个流 */if ((iph->protocol ^ iph2->protocol) |    ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |    ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {NAPI_GRO_CB(p)->same_flow = 0;continue;}/* All fields must match except length and checksum.  * IP头部的各项要相同,不同则不能合并 */NAPI_GRO_CB(p)->flush |=(iph->ttl ^ iph2->ttl) |(iph->tos ^ iph2->tos) |((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));/* Save the IP ID check to be included later when we get to * the transport layer so only the inner most IP ID is checked. * This is because some GSO/TSO implementations do not * correctly increment the IP ID for the outer hdrs.                 * 包的ID是否正确,ID是否连续                 */NAPI_GRO_CB(p)->flush_id =    ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);NAPI_GRO_CB(p)->flush |= flush;// flush不为0说明不能合并} // forNAPI_GRO_CB(skb)->flush |= flush; // flush不为0说明不能合并skb_set_network_header(skb, off);//  设置网络层头部/* The above will be needed by the transport layer if there is one * immediately following this IP hdr. */skb_gro_pull(skb, sizeof(*iph));// GRO的数据偏移到IP头后面skb_set_transport_header(skb, skb_gro_offset(skb));// 设置传输层头部偏移pp = ops->callbacks.gro_receive(head, skb);// 调用上层的GRO函数out_unlock:rcu_read_unlock();out:NAPI_GRO_CB(skb)->flush |= flush;// flush不为0说明不能合并return pp;}



static int inet_gro_complete(struct sk_buff *skb, int nhoff)// nhoff = 0{__be16 newlen = htons(skb->len - nhoff);// 数据总长度struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);// IP头部,此时skb->data指向IP头部const struct net_offload *ops;int proto = iph->protocol;// 上层协议int err = -ENOSYS;if (skb->encapsulation) // vlan encapsulation protocolskb_set_inner_network_header(skb, nhoff);// 设置虚拟局域网IP头偏移csum_replace2(&iph->check, iph->tot_len, newlen);// IP校验iph->tot_len = newlen;// IP数据包总长度rcu_read_lock();ops = rcu_dereference(inet_offloads[proto]);// 上层协议对应的GRO函数if (WARN_ON(!ops || !ops->callbacks.gro_complete))// 存在GRO函数goto out_unlock;/* Only need to add sizeof(*iph) to get to the next hdr below * because any hdr with option will have been flushed in * inet_gro_receive(). * 调用上层GRO函数 */err = ops->callbacks.gro_complete(skb, nhoff + sizeof(*iph));out_unlock:rcu_read_unlock();return err;}


下面是TCP层对应的GRO函数

[ net/ipv4/tcp_offload.c ]

static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb){/* Use the IP hdr immediately proceeding for this transport */const struct iphdr *iph = skb_gro_network_header(skb);// IP头部__wsum wsum;/* Don't bother verifying checksum if we're going to flush anyway.  * 不能合并 */if (NAPI_GRO_CB(skb)->flush)goto skip_csum;wsum = NAPI_GRO_CB(skb)->csum;    // 校验和// 对SKB进行校验switch (skb->ip_summed) {case CHECKSUM_NONE:wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),    0);/* fall through */case CHECKSUM_COMPLETE:if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,  wsum)) {skb->ip_summed = CHECKSUM_UNNECESSARY;break;}NAPI_GRO_CB(skb)->flush = 1;    // 不能合并return NULL;}skip_csum:return tcp_gro_receive(head, skb);}

static int tcp4_gro_complete(struct sk_buff *skb, int thoff)// thoff为TCP头部偏移{const struct iphdr *iph = ip_hdr(skb);// IP头部struct tcphdr *th = tcp_hdr(skb);// TCP头部th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,// 计算检验和  iph->daddr, 0);skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;// GSO类型为TCPreturn tcp_gro_complete(skb);}

/* 设置用于校验的变量 * 保存GRO合并的包的数量 * 保存拥塞窗口减少标志 */int tcp_gro_complete(struct sk_buff *skb){struct tcphdr *th = tcp_hdr(skb);// tcp头部/* 校验开始位置的偏移为:buffer头部到TCP头部位置(就是从TCP头部开始位置开始) * Offset from skb->head where checksumming should start */skb->csum_start = (unsigned char *)th - skb->head;/* 校验偏移为tcphdr中check变量的偏移位置 * Offset from csum_start where checksum should be stored */skb->csum_offset = offsetof(struct tcphdr, check);skb->ip_summed = CHECKSUM_PARTIAL;// 部分校验skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;// GSO合并的包的数量if (th->cwr)// 拥塞窗口减少标志skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;return 0;}EXPORT_SYMBOL(tcp_gro_complete);

struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb){struct sk_buff **pp = NULL;struct sk_buff *p;struct tcphdr *th;struct tcphdr *th2;unsigned int len;unsigned int thlen;__be32 flags;unsigned int mss = 1;unsigned int hlen;unsigned int off;int flush = 1;int i;off = skb_gro_offset(skb);// 当前数据的偏移,指向TCP头部hlen = off + sizeof(*th);// 当前数据的偏移加上TCP头部的大小th = skb_gro_header_fast(skb, off);// TCP头部,在有分片的情况下可得到if (skb_gro_header_hard(skb, hlen)) {// 分片长度是否小于hlenth = skb_gro_header_slow(skb, hlen, off);// TCP头部if (unlikely(!th))goto out;}thlen = th->doff * 4;// TCP头部实际大小if (thlen < sizeof(*th))// TCP头部出错goto out;hlen = off + thlen;// 当前数据的偏移加上TCP头部的大小if (skb_gro_header_hard(skb, hlen)) {// 分片长度是否小于hlenth = skb_gro_header_slow(skb, hlen, off);// TCP头部if (unlikely(!th))goto out;}skb_gro_pull(skb, thlen);// 数据偏移到TCP头部后面len = skb_gro_len(skb);// 负载大小flags = tcp_flag_word(th);// TCP标记for (; (p = *head); head = &p->next) { // 对gro_list列表中所有的包循环if (!NAPI_GRO_CB(p)->same_flow)// 如果不是同一个流的直接下一个continue;th2 = tcp_hdr(p);// TCP头部/* 源地址不同,则不在同一个流 */if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {NAPI_GRO_CB(p)->same_flow = 0;continue;}goto found;// 找到了要合并的包p(head),th2是其对应TCP头部}goto out_check_final;found:/* Include the IP ID check below from the inner most IP hdr  * flush不为0则不能进行合并 */flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id;// 包是否为分片包flush |= (__force int)(flags & TCP_FLAG_CWR);// 设置了拥塞状态flush |= (__force int)((flags ^ tcp_flag_word(th2)) &  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));// 两个包的不同位不包含(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)flush |= (__force int)(th->ack_seq ^ th2->ack_seq);// ack序列号不同for (i = sizeof(*th); i < thlen; i += 4)// tcp 的options位不同flush |= *(u32 *)((u8 *)th + i) ^ *(u32 *)((u8 *)th2 + i);mss = tcp_skb_mss(p);// GSO分片大小flush |= (len - 1) >= mss;// 负载大小大于GSO分片大小flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);if (flush || skb_gro_receive(head, skb)) {// 如果可以合并,调用skb_gro_receive进行合并mss = 1;// 设为1goto out_check_final;}// head 为合并后的包p = *head;th2 = tcp_hdr(p);tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);// 设置包的标志out_check_final:// flush不为0则不能进行合并flush = len < mss;flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |TCP_FLAG_RST | TCP_FLAG_SYN |TCP_FLAG_FIN));if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))pp = head;// 返回合并过的包out:NAPI_GRO_CB(skb)->flush |= (flush != 0); // flush不为0则不能进行合并return pp;}
[ net/core/skbuff.c ]
/* 合并后的包由head返回 */int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb){struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);// skbinfo指向skb_shared_infounsigned int offset = skb_gro_offset(skb);// skb当前的数据偏移unsigned int headlen = skb_headlen(skb);// skb主buffer中数据的长度struct sk_buff *nskb, *lp, *p = *head;// p指向要合并的包unsigned int len = skb_gro_len(skb);// skb总数据大小(从当前处理数据的开始位置算起的数据的大小,包含分片数据)unsigned int delta_truesize;unsigned int headroom;if (unlikely(p->len + len >= 65536))// 两个包加起来的数据不能大于65536return -E2BIG;/* 所有合并到p的包都挂在p->skb_shared_info->frag_list上 * NAPI_GRO_CB(p)->last指向frag_list列表中的最后一个对象 */lp = NAPI_GRO_CB(p)->last;// 合并列表的最后一个,skb将合并到这里pinfo = skb_shinfo(lp);// lp中的skb_shared_info/* skb当前数据的偏移 > 主buffer中的数据长度 * 说明要合并的数据在分片当中 */if (headlen <= offset) {skb_frag_t *frag;skb_frag_t *frag2;int i = skbinfo->nr_frags;// skb中分片的数量int nr_frags = pinfo->nr_frags + i;// 合并后分片的数量(两个包分片数量的和)if (nr_frags > MAX_SKB_FRAGS)// 分片数量大于16,直接进行合并goto merge;offset -= headlen;// skb数据到页面起始地址的偏移pinfo->nr_frags = nr_frags;// 设置合并后的总分片数skbinfo->nr_frags = 0;// skb分片数设为0frag = pinfo->frags + nr_frags;// 分片数组中的最后位置的下一个位置frag2 = skbinfo->frags + i;// skb分片数组中的最后位置的下一个位置do {// 通过循环将skb的分片数组中的值拷贝过来*--frag = *--frag2;} while (--i);/* 此时frag指向新拷贝过来的分片的第一个项 * 因为拷贝过来的数据只包含从offset开始的数据,所以这里要加上一个偏移 * skb中的协议头都被丢弃掉了,只包含负载数据 */frag->page_offset += offset;skb_frag_size_sub(frag, offset);// frag的数据大小减去偏移值/* all fragments truesize : remove (head size + sk_buff)  * skb分片的truesize */delta_truesize = skb->truesize - SKB_TRUESIZE(skb_end_offset(skb));skb->truesize -= skb->data_len;// skb的大小去掉分片的大小skb->len -= skb->data_len;// skb的大小去掉分片的大小skb->data_len = 0;// skb的分片大小为0NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;// skb可以被释放goto done;// 完成合并} else if (skb->head_frag) {// 分配sk_buff缓冲时指定缓冲大小int nr_frags = pinfo->nr_frags;// 分片数skb_frag_t *frag = pinfo->frags + nr_frags;// 分片数组最后位置的下一个位置struct page *page = virt_to_head_page(skb->head);// skb主buffer所在页面unsigned int first_size = headlen - offset;// skb当前数据的大小unsigned int first_offset;/* 分片数量大于16,直接进行合并 * 1 用来存放skb主buffer的数据 */if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)goto merge;// skb主buffer中数据相对于页面的偏移first_offset = skb->data -       (unsigned char *)page_address(page) +       offset;pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;// 合并后分片的数量/* 这个分片用来存放skb主buffer的数据 */frag->page.p  = page;// 指向skb主buffer面面frag->page_offset = first_offset;// 数据的偏移skb_frag_size_set(frag, first_size);// 数据大小// skb分片数组直接拷贝到后面memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);/* We dont need to clear skbinfo->nr_frags here *//* skb的truesize */delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;// skb可以被释放goto done;// 完成合并} // ifif (pinfo->frag_list)// lp中有可以合并的包,去合并goto merge;if (skb_gro_len(p) != pinfo->gso_size)// skb_shared_info中记录的大小和skb->cb中的大小不一至,出错return -E2BIG;/* 由p创建一个新的sk_buff * p为要合并进去的包 */headroom = skb_headroom(p);// skb数据地址到buffer地址间的间隙/* 分配一个新的sk_buff * 缓冲大小为skb当前数据位置到buffer地址的大小 * 此时skb_gro_offset返回的偏移是负载数据到skb->data的偏移 */nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);if (unlikely(!nskb))return -ENOMEM;__copy_skb_header(nskb, p);// 拷贝p的头部到新的nskb中nskb->mac_len = p->mac_len;// MAC地址长度skb_reserve(nskb, headroom);// 设置数据开始位置__skb_put(nskb, skb_gro_offset(p));// 设置数据大小skb_set_mac_header(nskb, skb_mac_header(p) - p->data);// 设置MAC头部skb_set_network_header(nskb, skb_network_offset(p));// 设置IP头部skb_set_transport_header(nskb, skb_transport_offset(p));// 设置传输层头部__skb_pull(p, skb_gro_offset(p));// 设置数据开始位置为skb_gro_offset(p)之后memcpy(skb_mac_header(nskb), skb_mac_header(p),// 将p的头部数据拷贝过来(从MAC头部之后到当前数据偏移位置之间的数据)       p->data - skb_mac_header(p));skb_shinfo(nskb)->frag_list = p;// 将p挂到新的包的合并列表中skb_shinfo(nskb)->gso_size = pinfo->gso_size;// GSO分片数据大小(pinfo为合并列表中的最后一个)pinfo->gso_size = 0;// GSO分片大小设为0skb_header_release(p);// p->data指向负载数据,p被设置为不包含协议头的数据NAPI_GRO_CB(nskb)->last = p;// 新包的last指向pnskb->data_len += p->len;// 新包的分片大小加上p的大小nskb->truesize += p->truesize;// 新包的truesize加上p的truesizenskb->len += p->len;// 新包的总大小加上p的大小/* 新包代替p的位置 * 新包的last指向p */*head = nskb;nskb->next = p->next;p->next = NULL;/* 现在p指向了新包 * 原来的包被挂在了新包的frag_list上,NAPI_GRO_CB(nskb)->last指向原来的包 */p = nskb;merge:delta_truesize = skb->truesize;if (offset > headlen) {// skb数据在分片中unsigned int eat = offset - headlen;// skb负载数据在分片中的偏移/* 调整分片,数据从当前数据开始 */skbinfo->frags[0].page_offset += eat;// skb分片的偏移指向负载数据的位置skb_frag_size_sub(&skbinfo->frags[0], eat); // skb数据大小skb->data_len -= eat;// skb分片长度skb->len -= eat;// skb总长度offset = headlen;// 设为skb主buffer长度}__skb_pull(skb, offset); // skb设置数据开始位置为offset之后,指向负载数据if (NAPI_GRO_CB(p)->last == p)// frag_list为空skb_shinfo(p)->frag_list = skb;// 将skb挂上去elseNAPI_GRO_CB(p)->last->next = skb; // skb挂在frag_list末尾NAPI_GRO_CB(p)->last = skb;// last指向frag_list末尾skb_header_release(skb);// skb->data指向负载数据,skb被设置为不包含协议头的数据lp = p;// lp指向新包done:NAPI_GRO_CB(p)->count++;// 合并的包的个数加1p->data_len += len;// 分片长度加skb的数据大小p->truesize += delta_truesize;p->len += len;// 总长度加skb的数据大小if (lp != p) {// frag_list 不为空lp->data_len += len;// 分片长度加skb的数据大小lp->truesize += delta_truesize;lp->len += len;// 总长度加skb的数据大小}NAPI_GRO_CB(skb)->same_flow = 1;// 将skb设置为同一个流里的return 0;}EXPORT_SYMBOL_GPL(skb_gro_receive);


0 0
原创粉丝点击