← Back to team overview

kernel-packages team mailing list archive

[Bug 1521053] Re: Network Performance dropping between vms on different location in Azure

 

@timg-tpi

I reverted it on latest ubuntu-vivid, but there is variable dependency
with the other related commits, so I patched like below

it is better, but still dropping is there.

original 100 -> 0.3
below patch 100 -> 15~20

Thanks.

####################################################################

diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index bf2604b..ad73121 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -132,8 +132,6 @@ struct hv_netvsc_packet {
        struct hv_device *device;
        bool is_data_pkt;
        bool xmit_more; /* from skb */
-       bool cp_partial; /* partial copy into send buffer */
-
        u16 vlan_tci;

        u16 q_idx;
@@ -148,9 +146,6 @@ struct hv_netvsc_packet {
        /* This points to the memory after page_buf */
        struct rndis_message *rndis_msg;

-       u32 rmsg_size; /* RNDIS header and PPI size */
-       u32 rmsg_pgcnt; /* page count of RNDIS header and PPI */
-
        u32 total_data_buflen;
        /* Points to the send/receive buffer where the ethernet frame is */
        void *data;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index b15041b..20102cd 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -703,18 +703,15 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
        u32 msg_size = 0;
        u32 padding = 0;
        u32 remain = packet->total_data_buflen % net_device->pkt_align;
-       u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
-               packet->page_buf_cnt;

        /* Add padding */
-       if (packet->is_data_pkt && packet->xmit_more && remain &&
-           !packet->cp_partial) {
+       if (packet->is_data_pkt && packet->xmit_more && remain) {
                padding = net_device->pkt_align - remain;
                packet->rndis_msg->msg_len += padding;
                packet->total_data_buflen += padding;
        }

-       for (i = 0; i < page_count; i++) {
+       for (i = 0; i < packet->page_buf_cnt; i++) {
                char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
                u32 offset = packet->page_buf[i].offset;
                u32 len = packet->page_buf[i].len;
@@ -742,7 +739,6 @@ static inline int netvsc_send_pkt(
        struct net_device *ndev = net_device->ndev;
        u64 req_id;
        int ret;
-       struct hv_page_buffer *pgbuf;
        u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);

        nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
@@ -781,10 +777,8 @@ static inline int netvsc_send_pkt(
                packet->xmit_more = false;

        if (packet->page_buf_cnt) {
-               pgbuf = packet->cp_partial ? packet->page_buf +
-                       packet->rmsg_pgcnt : packet->page_buf;
                ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
-                                                     pgbuf,
+                                                     packet->page_buf,
                                                      packet->page_buf_cnt,
                                                      &nvmsg,
                                                      sizeof(struct nvsp_message),
@@ -841,7 +835,6 @@ int netvsc_send(struct hv_device *device,
        unsigned long flag;
        struct multi_send_data *msdp;
        struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
-       bool try_batch;

        net_device = get_outbound_net_device(device);
        if (!net_device)
@@ -855,7 +848,6 @@ int netvsc_send(struct hv_device *device,
        }
        packet->channel = out_channel;
        packet->send_buf_index = NETVSC_INVALID_INDEX;
-       packet->cp_partial = false;

        msdp = &net_device->msd[q_idx];
@@ -864,18 +856,11 @@ int netvsc_send(struct hv_device *device,
        if (msdp->pkt)
                msd_len = msdp->pkt->total_data_buflen;

-       try_batch = packet->is_data_pkt && msd_len > 0 && msdp->count <
-                   net_device->max_pkt;
-
-       if (try_batch && msd_len + pktlen + net_device->pkt_align <
+       if (packet->is_data_pkt && msd_len > 0 &&
+           msdp->count < net_device->max_pkt &&
+           msd_len + pktlen + net_device->pkt_align <
            net_device->send_section_size) {
                section_index = msdp->pkt->send_buf_index;
-
-       } else if (try_batch && msd_len + packet->rmsg_size <
-                  net_device->send_section_size) {
-               section_index = msdp->pkt->send_buf_index;
-               packet->cp_partial = true;
-
        } else if (packet->is_data_pkt && pktlen + net_device->pkt_align <
                   net_device->send_section_size) {
                section_index = netvsc_get_next_send_section(net_device);
@@ -893,19 +878,13 @@ int netvsc_send(struct hv_device *device,
                                        packet);

                packet->send_buf_index = section_index;
-
-               if (packet->cp_partial) {
-                       packet->page_buf_cnt -= packet->rmsg_pgcnt;
-                       packet->total_data_buflen = msd_len + packet->rmsg_size;
-               } else {
-                       packet->page_buf_cnt = 0;
-                       packet->total_data_buflen += msd_len;
-               }
+               packet->page_buf_cnt = 0;
+               packet->total_data_buflen += msd_len;

                if (msdp->pkt)
                        netvsc_xmit_completion(msdp->pkt);

-               if (packet->xmit_more && !packet->cp_partial) {
+               if (packet->xmit_more) {
                        msdp->pkt = packet;
                        msdp->count++;
                } else {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d06f1bc..1d1649c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -315,16 +315,15 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
 }

 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
-                          struct hv_netvsc_packet *packet)
+                          struct hv_page_buffer *pb)
 {
-       struct hv_page_buffer *pb = packet->page_buf;
        u32 slots_used = 0;
        char *data = skb->data;
        int frags = skb_shinfo(skb)->nr_frags;
        int i;

        /* The packet is laid out thus:
-        * 1. hdr: RNDIS header and PPI
+        * 1. hdr
         * 2. skb linear data
         * 3. skb fragment data
         */
@@ -333,9 +332,6 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
                                        offset_in_page(hdr),
                                        len, &pb[slots_used]);

-       packet->rmsg_size = len;
-       packet->rmsg_pgcnt = slots_used;
-
        slots_used += fill_pg_buf(virt_to_page(data),
                                offset_in_page(data),
                                skb_headlen(skb), &pb[slots_used]);
@@ -609,7 +605,7 @@ do_send:
        rndis_msg->msg_len += rndis_msg_size;
        packet->total_data_buflen = rndis_msg->msg_len;
        packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
-                                              skb, packet);
+                                              skb, &page_buf[0]);

        ret = netvsc_send(net_device_ctx->device_ctx, packet);

-- 
You received this bug notification because you are a member of Kernel
Packages, which is subscribed to linux in Ubuntu.
https://bugs.launchpad.net/bugs/1521053

Title:
  Network Performance dropping between vms on different location in
  Azure

Status in linux package in Ubuntu:
  Invalid
Status in linux source package in Vivid:
  In Progress

Bug description:
  [Impact]

  Ubuntu VMs between different location in Azure , especially North Europe and East Europe in this case, have network performance issue.
  It should be around 100MB/s speed between them. but it's around 0.3MB/s when dropping happens.

  [Fix]

  Upstream development
  0d158852a8089099a6959ae235b20f230871982f ("hv_netvsc: Clean up two unused variables")

  It's affected over 3.19.0-28-generic (ubuntu-vivid)

  [Testcase]

  Make 2 VMs on North Europe, West Europe each.
  Then run below test script

  NE VM

  - netcat & nload
   while true; do netcat -l 8080 < /dev/zero; done;
   nload -u M eth0 ( need nload pkg )

  - iperf
   iperf -s -f M

  WE VM

  - netcat
   for i in {1..1000}
   do
    timeout 30s nc NE_HOST 8080 > /dev/null
   done

  - iperf
   iperf -c HOST -f M

  Network performance dropping can be seen frequently.

  More Tests
  http://pastebin.ubuntu.com/13657083/

To manage notifications about this bug go to:
https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1521053/+subscriptions


References