[c/azure][PATCH 0/8] Upstream Commits Needed for DPDK on Azure

classic Classic list List threaded Threaded
9 messages Options
Reply | Threaded
Open this post in threaded view
|

[c/azure][PATCH 0/8] Upstream Commits Needed for DPDK on Azure

Marcelo Henrique Cerri
BugLink: http://bugs.launchpad.net/bugs/1812123

Dexuan Cui (1):
  vmbus: fix subchannel removal

Stephen Hemminger (7):
  vmbus: keep pointer to ring buffer page
  uio: introduce UIO_MEM_IOVA
  hv_uio_generic: map ringbuffer phys addr
  uio_hv_generic: defer opening vmbus until first use
  uio_hv_generic: set callbacks on open
  vmbus: pass channel to hv_process_channel_removal
  vmbus: split ring buffer allocation from open

 drivers/hv/channel.c         | 270 +++++++++++++++++++----------------
 drivers/hv/channel_mgmt.c    |  17 +--
 drivers/hv/ring_buffer.c     |   1 +
 drivers/hv/vmbus_drv.c       |   3 +-
 drivers/uio/uio.c            |  24 ++--
 drivers/uio/uio_hv_generic.c | 110 +++++++++-----
 include/linux/hyperv.h       |  13 +-
 include/linux/uio_driver.h   |   1 +
 8 files changed, 258 insertions(+), 181 deletions(-)

--
2.17.1


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[c/azure][PATCH 1/8] vmbus: keep pointer to ring buffer page

Marcelo Henrique Cerri
From: Stephen Hemminger <[hidden email]>

BugLink: http://bugs.launchpad.net/bugs/1812123

Avoid going from struct page to virt address (and back) by just
keeping pointer to the allocated pages instead of virt address.

Signed-off-by: Stephen Hemminger <[hidden email]>
Signed-off-by: Greg Kroah-Hartman <[hidden email]>
(cherry picked from commit 52a42c2a90226dc61c99bbd0cb096deeb52c334b)
Signed-off-by: Marcelo Henrique Cerri <[hidden email]>
---
 drivers/hv/channel.c         | 20 +++++++++-----------
 drivers/uio/uio_hv_generic.c |  5 +++--
 include/linux/hyperv.h       |  2 +-
 3 files changed, 13 insertions(+), 14 deletions(-)

diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index ebbe8981ccc3..c247bfae0284 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -90,11 +90,14 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
  unsigned long flags;
  int ret, err = 0;
  struct page *page;
+ unsigned int order;
 
  if (send_ringbuffer_size % PAGE_SIZE ||
     recv_ringbuffer_size % PAGE_SIZE)
  return -EINVAL;
 
+ order = get_order(send_ringbuffer_size + recv_ringbuffer_size);
+
  spin_lock_irqsave(&newchannel->lock, flags);
  if (newchannel->state == CHANNEL_OPEN_STATE) {
  newchannel->state = CHANNEL_OPENING_STATE;
@@ -109,21 +112,17 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
 
  /* Allocate the ring buffer */
  page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
- GFP_KERNEL|__GFP_ZERO,
- get_order(send_ringbuffer_size +
- recv_ringbuffer_size));
+ GFP_KERNEL|__GFP_ZERO, order);
 
  if (!page)
- page = alloc_pages(GFP_KERNEL|__GFP_ZERO,
-   get_order(send_ringbuffer_size +
-     recv_ringbuffer_size));
+ page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
 
  if (!page) {
  err = -ENOMEM;
  goto error_set_chnstate;
  }
 
- newchannel->ringbuffer_pages = page_address(page);
+ newchannel->ringbuffer_page = page;
  newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
    recv_ringbuffer_size) >> PAGE_SHIFT;
 
@@ -238,8 +237,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
 error_free_pages:
  hv_ringbuffer_cleanup(&newchannel->outbound);
  hv_ringbuffer_cleanup(&newchannel->inbound);
- __free_pages(page,
-     get_order(send_ringbuffer_size + recv_ringbuffer_size));
+ __free_pages(page, order);
 error_set_chnstate:
  newchannel->state = CHANNEL_OPEN_STATE;
  return err;
@@ -666,8 +664,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
  hv_ringbuffer_cleanup(&channel->outbound);
  hv_ringbuffer_cleanup(&channel->inbound);
 
- free_pages((unsigned long)channel->ringbuffer_pages,
- get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
+ __free_pages(channel->ringbuffer_page,
+     get_order(channel->ringbuffer_pagecount << PAGE_SHIFT));
 
 out:
  return ret;
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index c690d100adcd..0e1e8cf89f34 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -133,11 +133,12 @@ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
  = container_of(kobj, struct vmbus_channel, kobj);
  struct hv_device *dev = channel->primary_channel->device_obj;
  u16 q_idx = channel->offermsg.offer.sub_channel_index;
+ void *ring_buffer = page_address(channel->ringbuffer_page);
 
  dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n",
  q_idx, vma_pages(vma), vma->vm_pgoff);
 
- return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages),
+ return vm_iomap_memory(vma, virt_to_phys(ring_buffer),
        channel->ringbuffer_pagecount << PAGE_SHIFT);
 }
 
@@ -226,7 +227,7 @@ hv_uio_probe(struct hv_device *dev,
  /* mem resources */
  pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
  pdata->info.mem[TXRX_RING_MAP].addr
- = (uintptr_t)dev->channel->ringbuffer_pages;
+ = (uintptr_t)page_address(dev->channel->ringbuffer_page);
  pdata->info.mem[TXRX_RING_MAP].size
  = dev->channel->ringbuffer_pagecount << PAGE_SHIFT;
  pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index f8e63faf3230..e39b7113b8e4 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -724,7 +724,7 @@ struct vmbus_channel {
  u32 ringbuffer_gpadlhandle;
 
  /* Allocated memory for ring buffer */
- void *ringbuffer_pages;
+ struct page *ringbuffer_page;
  u32 ringbuffer_pagecount;
  struct hv_ring_buffer_info outbound; /* send to parent */
  struct hv_ring_buffer_info inbound; /* receive from parent */
--
2.17.1


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[c/azure][PATCH 2/8] uio: introduce UIO_MEM_IOVA

Marcelo Henrique Cerri
In reply to this post by Marcelo Henrique Cerri
From: Stephen Hemminger <[hidden email]>

BugLink: http://bugs.launchpad.net/bugs/1812123

Introduce the concept of mapping physical memory locations that
are normal memory. The new type UIO_MEM_IOVA are similar to
existing UIO_MEM_PHYS but the backing memory is not marked as uncached.

Also, indent related switch to the currently used style.

Signed-off-by: Stephen Hemminger <[hidden email]>
Signed-off-by: Greg Kroah-Hartman <[hidden email]>
(cherry picked from commit bfddabfa230452cea32aae82f9cd85ab22601acf)
Signed-off-by: Marcelo Henrique Cerri <[hidden email]>
---
 drivers/uio/uio.c          | 24 +++++++++++++-----------
 include/linux/uio_driver.h |  1 +
 2 files changed, 14 insertions(+), 11 deletions(-)

diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index d5b2efae82fc..5cff6cb84fd4 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -739,7 +739,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
  return -EINVAL;
 
  vma->vm_ops = &uio_physical_vm_ops;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (idev->info->mem[mi].memtype == UIO_MEM_PHYS)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
  /*
  * We cannot use the vm_iomap_memory() helper here,
@@ -796,18 +797,19 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
  }
 
  switch (idev->info->mem[mi].memtype) {
- case UIO_MEM_PHYS:
- ret = uio_mmap_physical(vma);
- break;
- case UIO_MEM_LOGICAL:
- case UIO_MEM_VIRTUAL:
- ret = uio_mmap_logical(vma);
- break;
- default:
- ret = -EINVAL;
+ case UIO_MEM_IOVA:
+ case UIO_MEM_PHYS:
+ ret = uio_mmap_physical(vma);
+ break;
+ case UIO_MEM_LOGICAL:
+ case UIO_MEM_VIRTUAL:
+ ret = uio_mmap_logical(vma);
+ break;
+ default:
+ ret = -EINVAL;
  }
 
-out:
+ out:
  mutex_unlock(&idev->info_lock);
  return ret;
 }
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 6f8b68cd460f..a3cd7cb67a69 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -133,6 +133,7 @@ extern void uio_event_notify(struct uio_info *info);
 #define UIO_MEM_PHYS 1
 #define UIO_MEM_LOGICAL 2
 #define UIO_MEM_VIRTUAL 3
+#define UIO_MEM_IOVA 4
 
 /* defines for uio_port->porttype */
 #define UIO_PORT_NONE 0
--
2.17.1


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[c/azure][PATCH 3/8] hv_uio_generic: map ringbuffer phys addr

Marcelo Henrique Cerri
In reply to this post by Marcelo Henrique Cerri
From: Stephen Hemminger <[hidden email]>

BugLink: http://bugs.launchpad.net/bugs/1812123

The ring buffer is contiguous IOVA and is mapped via phys addr
for sysfs file. Use same method for the UIO mapping.

Signed-off-by: Stephen Hemminger <[hidden email]>
Signed-off-by: Greg Kroah-Hartman <[hidden email]>
(cherry picked from commit 9da197f1df40c838f0f06abf94cd23b4ed81e522)
Signed-off-by: Marcelo Henrique Cerri <[hidden email]>
---
 drivers/uio/uio_hv_generic.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 0e1e8cf89f34..da5c1496082c 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -227,10 +227,10 @@ hv_uio_probe(struct hv_device *dev,
  /* mem resources */
  pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
  pdata->info.mem[TXRX_RING_MAP].addr
- = (uintptr_t)page_address(dev->channel->ringbuffer_page);
+ = (uintptr_t)virt_to_phys(page_address(dev->channel->ringbuffer_page));
  pdata->info.mem[TXRX_RING_MAP].size
  = dev->channel->ringbuffer_pagecount << PAGE_SHIFT;
- pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL;
+ pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_IOVA;
 
  pdata->info.mem[INT_PAGE_MAP].name = "int_page";
  pdata->info.mem[INT_PAGE_MAP].addr
--
2.17.1


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[c/azure][PATCH 4/8] uio_hv_generic: defer opening vmbus until first use

Marcelo Henrique Cerri
In reply to this post by Marcelo Henrique Cerri
From: Stephen Hemminger <[hidden email]>

BugLink: http://bugs.launchpad.net/bugs/1812123

This fixes two design flaws in hv_uio_generic.

Since hv_uio_probe is called from vmbus_probe with lock held
it potentially can cause sleep in an atomic section because
vmbus_open will wait for response from host.

The hv_uio_generic driver could not handle applications
exiting and restarting because the vmbus channel was
persistent.  Change the semantics so that the buffers are
allocated on probe, but not attached to host until
device is opened.

Signed-off-by: Stephen Hemminger <[hidden email]>
Signed-off-by: Greg Kroah-Hartman <[hidden email]>
(cherry picked from commit cdfa835c6e5e87d145f9f632b58843de97509f2b)
Signed-off-by: Marcelo Henrique Cerri <[hidden email]>
---
 drivers/uio/uio_hv_generic.c | 104 +++++++++++++++++++++++++----------
 1 file changed, 74 insertions(+), 30 deletions(-)

diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index da5c1496082c..418e3cbbb869 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -58,6 +58,7 @@ enum hv_uio_map {
 struct hv_uio_private_data {
  struct uio_info info;
  struct hv_device *device;
+ atomic_t refcnt;
 
  void *recv_buf;
  u32 recv_gpadl;
@@ -131,12 +132,10 @@ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
 {
  struct vmbus_channel *channel
  = container_of(kobj, struct vmbus_channel, kobj);
- struct hv_device *dev = channel->primary_channel->device_obj;
- u16 q_idx = channel->offermsg.offer.sub_channel_index;
  void *ring_buffer = page_address(channel->ringbuffer_page);
 
- dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n",
- q_idx, vma_pages(vma), vma->vm_pgoff);
+ if (channel->state != CHANNEL_OPENED_STATE)
+ return -ENODEV;
 
  return vm_iomap_memory(vma, virt_to_phys(ring_buffer),
        channel->ringbuffer_pagecount << PAGE_SHIFT);
@@ -179,57 +178,103 @@ hv_uio_new_channel(struct vmbus_channel *new_sc)
  }
 }
 
+/* free the reserved buffers for send and receive */
 static void
 hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
 {
- if (pdata->send_gpadl)
+ if (pdata->send_gpadl) {
  vmbus_teardown_gpadl(dev->channel, pdata->send_gpadl);
- vfree(pdata->send_buf);
+ pdata->send_gpadl = 0;
+ vfree(pdata->send_buf);
+ }
 
- if (pdata->recv_gpadl)
+ if (pdata->recv_gpadl) {
  vmbus_teardown_gpadl(dev->channel, pdata->recv_gpadl);
- vfree(pdata->recv_buf);
+ pdata->recv_gpadl = 0;
+ vfree(pdata->recv_buf);
+ }
+}
+
+/* VMBus primary channel is opened on first use */
+static int
+hv_uio_open(struct uio_info *info, struct inode *inode)
+{
+ struct hv_uio_private_data *pdata
+ = container_of(info, struct hv_uio_private_data, info);
+ struct hv_device *dev = pdata->device;
+ int ret;
+
+ if (atomic_inc_return(&pdata->refcnt) != 1)
+ return 0;
+
+ ret = vmbus_connect_ring(dev->channel,
+ hv_uio_channel_cb, dev->channel);
+
+ if (ret == 0)
+ dev->channel->inbound.ring_buffer->interrupt_mask = 1;
+ else
+ atomic_dec(&pdata->refcnt);
+
+ return ret;
+}
+
+/* VMBus primary channel is closed on last close */
+static int
+hv_uio_release(struct uio_info *info, struct inode *inode)
+{
+ struct hv_uio_private_data *pdata
+ = container_of(info, struct hv_uio_private_data, info);
+ struct hv_device *dev = pdata->device;
+ int ret = 0;
+
+ if (atomic_dec_and_test(&pdata->refcnt))
+ ret = vmbus_disconnect_ring(dev->channel);
+
+ return ret;
 }
 
 static int
 hv_uio_probe(struct hv_device *dev,
      const struct hv_vmbus_device_id *dev_id)
 {
+ struct vmbus_channel *channel = dev->channel;
  struct hv_uio_private_data *pdata;
+ void *ring_buffer;
  int ret;
 
+ /* Communicating with host has to be via shared memory not hypercall */
+ if (!channel->offermsg.monitor_allocated) {
+ dev_err(&dev->device, "vmbus channel requires hypercall\n");
+ return -ENOTSUPP;
+ }
+
  pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
  if (!pdata)
  return -ENOMEM;
 
- ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE,
- HV_RING_SIZE * PAGE_SIZE, NULL, 0,
- hv_uio_channel_cb, dev->channel);
+ ret = vmbus_alloc_ring(channel, HV_RING_SIZE * PAGE_SIZE,
+       HV_RING_SIZE * PAGE_SIZE);
  if (ret)
  goto fail;
 
- /* Communicating with host has to be via shared memory not hypercall */
- if (!dev->channel->offermsg.monitor_allocated) {
- dev_err(&dev->device, "vmbus channel requires hypercall\n");
- ret = -ENOTSUPP;
- goto fail_close;
- }
-
- dev->channel->inbound.ring_buffer->interrupt_mask = 1;
- set_channel_read_mode(dev->channel, HV_CALL_ISR);
+ set_channel_read_mode(channel, HV_CALL_ISR);
 
  /* Fill general uio info */
  pdata->info.name = "uio_hv_generic";
  pdata->info.version = DRIVER_VERSION;
  pdata->info.irqcontrol = hv_uio_irqcontrol;
+ pdata->info.open = hv_uio_open;
+ pdata->info.release = hv_uio_release;
  pdata->info.irq = UIO_IRQ_CUSTOM;
+ atomic_set(&pdata->refcnt, 0);
 
  /* mem resources */
  pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
+ ring_buffer = page_address(channel->ringbuffer_page);
  pdata->info.mem[TXRX_RING_MAP].addr
- = (uintptr_t)virt_to_phys(page_address(dev->channel->ringbuffer_page));
+ = (uintptr_t)virt_to_phys(ring_buffer);
  pdata->info.mem[TXRX_RING_MAP].size
- = dev->channel->ringbuffer_pagecount << PAGE_SHIFT;
+ = channel->ringbuffer_pagecount << PAGE_SHIFT;
  pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_IOVA;
 
  pdata->info.mem[INT_PAGE_MAP].name = "int_page";
@@ -250,7 +295,7 @@ hv_uio_probe(struct hv_device *dev,
  goto fail_close;
  }
 
- ret = vmbus_establish_gpadl(dev->channel, pdata->recv_buf,
+ ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
     RECV_BUFFER_SIZE, &pdata->recv_gpadl);
  if (ret)
  goto fail_close;
@@ -264,14 +309,13 @@ hv_uio_probe(struct hv_device *dev,
  pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
  pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
 
-
  pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
  if (pdata->send_buf == NULL) {
  ret = -ENOMEM;
  goto fail_close;
  }
 
- ret = vmbus_establish_gpadl(dev->channel, pdata->send_buf,
+ ret = vmbus_establish_gpadl(channel, pdata->send_buf,
     SEND_BUFFER_SIZE, &pdata->send_gpadl);
  if (ret)
  goto fail_close;
@@ -293,10 +337,10 @@ hv_uio_probe(struct hv_device *dev,
  goto fail_close;
  }
 
- vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
- vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
+ vmbus_set_chn_rescind_callback(channel, hv_uio_rescind);
+ vmbus_set_sc_create_callback(channel, hv_uio_new_channel);
 
- ret = sysfs_create_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
+ ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
  if (ret)
  dev_notice(&dev->device,
    "sysfs create ring bin file failed; %d\n", ret);
@@ -307,7 +351,6 @@ hv_uio_probe(struct hv_device *dev,
 
 fail_close:
  hv_uio_cleanup(dev, pdata);
- vmbus_close(dev->channel);
 fail:
  kfree(pdata);
 
@@ -325,7 +368,8 @@ hv_uio_remove(struct hv_device *dev)
  uio_unregister_device(&pdata->info);
  hv_uio_cleanup(dev, pdata);
  hv_set_drvdata(dev, NULL);
- vmbus_close(dev->channel);
+
+ vmbus_free_ring(dev->channel);
  kfree(pdata);
  return 0;
 }
--
2.17.1


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[c/azure][PATCH 5/8] uio_hv_generic: set callbacks on open

Marcelo Henrique Cerri
In reply to this post by Marcelo Henrique Cerri
From: Stephen Hemminger <[hidden email]>

BugLink: http://bugs.launchpad.net/bugs/1812123

This fixes the problem where uio application was unable to
use multple queues on restart. The root cause is that the callbacks
are cleared on disconnect. Change to setting up callbacks
everytime in open.

Fixes: cdfa835c6e5e ("uio_hv_generic: defer opening vmbus until first use")
Reported-by: Mohammed Gamal <[hidden email]>
Signed-off-by: Stephen Hemminger <[hidden email]>
Signed-off-by: Greg Kroah-Hartman <[hidden email]>
(cherry picked from commit 5e3c420dcca53766dec57d5bf4df8eecdb953c03)
Signed-off-by: Marcelo Henrique Cerri <[hidden email]>
---
 drivers/uio/uio_hv_generic.c | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 418e3cbbb869..84f3d418694c 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -207,9 +207,11 @@ hv_uio_open(struct uio_info *info, struct inode *inode)
  if (atomic_inc_return(&pdata->refcnt) != 1)
  return 0;
 
+ vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
+ vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
+
  ret = vmbus_connect_ring(dev->channel,
  hv_uio_channel_cb, dev->channel);
-
  if (ret == 0)
  dev->channel->inbound.ring_buffer->interrupt_mask = 1;
  else
@@ -337,9 +339,6 @@ hv_uio_probe(struct hv_device *dev,
  goto fail_close;
  }
 
- vmbus_set_chn_rescind_callback(channel, hv_uio_rescind);
- vmbus_set_sc_create_callback(channel, hv_uio_new_channel);
-
  ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
  if (ret)
  dev_notice(&dev->device,
--
2.17.1


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[c/azure][PATCH 6/8] vmbus: pass channel to hv_process_channel_removal

Marcelo Henrique Cerri
In reply to this post by Marcelo Henrique Cerri
From: Stephen Hemminger <[hidden email]>

BugLink: http://bugs.launchpad.net/bugs/1812123

Rather than passing relid and then looking up the channel.
Pass the channel directly, since caller already knows it.

Signed-off-by: Stephen Hemminger <[hidden email]>
Signed-off-by: Greg Kroah-Hartman <[hidden email]>
(cherry picked from commit 800b932969c53c4044ff9f9fd1ee793a87fa8ef0)
Signed-off-by: Marcelo Henrique Cerri <[hidden email]>
---
 drivers/hv/channel.c      |  3 +--
 drivers/hv/channel_mgmt.c | 17 +++++------------
 drivers/hv/vmbus_drv.c    |  3 +--
 include/linux/hyperv.h    |  2 +-
 4 files changed, 8 insertions(+), 17 deletions(-)

diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index c247bfae0284..00ca3008d412 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -696,8 +696,7 @@ void vmbus_close(struct vmbus_channel *channel)
  wait_for_completion(&cur_channel->rescind_event);
  mutex_lock(&vmbus_connection.channel_mutex);
  vmbus_close_internal(cur_channel);
- hv_process_channel_removal(
-   cur_channel->offermsg.child_relid);
+ hv_process_channel_removal(cur_channel);
  } else {
  mutex_lock(&vmbus_connection.channel_mutex);
  vmbus_close_internal(cur_channel);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 16eb9b3f1cb1..ab2d9f28ebe7 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -385,21 +385,14 @@ static void vmbus_release_relid(u32 relid)
  trace_vmbus_release_relid(&msg, ret);
 }
 
-void hv_process_channel_removal(u32 relid)
+void hv_process_channel_removal(struct vmbus_channel *channel)
 {
+ struct vmbus_channel *primary_channel;
  unsigned long flags;
- struct vmbus_channel *primary_channel, *channel;
 
  BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
-
- /*
- * Make sure channel is valid as we may have raced.
- */
- channel = relid2channel(relid);
- if (!channel)
- return;
-
  BUG_ON(!channel->rescind);
+
  if (channel->target_cpu != get_cpu()) {
  put_cpu();
  smp_call_function_single(channel->target_cpu,
@@ -429,7 +422,7 @@ void hv_process_channel_removal(u32 relid)
  cpumask_clear_cpu(channel->target_cpu,
   &primary_channel->alloced_cpus_in_node);
 
- vmbus_release_relid(relid);
+ vmbus_release_relid(channel->offermsg.child_relid);
 
  free_channel(channel);
 }
@@ -1010,7 +1003,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
  * The channel is currently not open;
  * it is safe for us to cleanup the channel.
  */
- hv_process_channel_removal(rescind->child_relid);
+ hv_process_channel_removal(channel);
  } else {
  complete(&channel->rescind_event);
  }
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index c9a466be7709..28c6f00d9d2c 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -768,10 +768,9 @@ static void vmbus_device_release(struct device *device)
  struct vmbus_channel *channel = hv_dev->channel;
 
  mutex_lock(&vmbus_connection.channel_mutex);
- hv_process_channel_removal(channel->offermsg.child_relid);
+ hv_process_channel_removal(channel);
  mutex_unlock(&vmbus_connection.channel_mutex);
  kfree(hv_dev);
-
 }
 
 /* The one and only one */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index e39b7113b8e4..815b0a0102da 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1434,7 +1434,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
  const int *srv_version, int srv_vercnt,
  int *nego_fw_version, int *nego_srv_version);
 
-void hv_process_channel_removal(u32 relid);
+void hv_process_channel_removal(struct vmbus_channel *channel);
 
 void vmbus_setevent(struct vmbus_channel *channel);
 /*
--
2.17.1


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[c/azure][PATCH 7/8] vmbus: split ring buffer allocation from open

Marcelo Henrique Cerri
In reply to this post by Marcelo Henrique Cerri
From: Stephen Hemminger <[hidden email]>

BugLink: http://bugs.launchpad.net/bugs/1812123

The UIO driver needs the ring buffer to be persistent(reused)
across open/close. Split the allocation and setup of ring buffer
out of vmbus_open. For normal usage vmbus_open/vmbus_close there
are no changes; only impacts uio_hv_generic which needs to keep
ring buffer memory and reuse when application restarts.

Signed-off-by: Stephen Hemminger <[hidden email]>
Signed-off-by: Greg Kroah-Hartman <[hidden email]>
(cherry picked from commit ae6935ed7d424ffa74d634da00767e7b03c98fd3)
Signed-off-by: Marcelo Henrique Cerri <[hidden email]>
---
 drivers/hv/channel.c     | 267 ++++++++++++++++++++++-----------------
 drivers/hv/ring_buffer.c |   1 +
 include/linux/hyperv.h   |   9 ++
 3 files changed, 162 insertions(+), 115 deletions(-)

diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 00ca3008d412..0ecbdcf3ad15 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -78,84 +78,96 @@ void vmbus_setevent(struct vmbus_channel *channel)
 }
 EXPORT_SYMBOL_GPL(vmbus_setevent);
 
-/*
- * vmbus_open - Open the specified channel.
- */
-int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
-     u32 recv_ringbuffer_size, void *userdata, u32 userdatalen,
-     void (*onchannelcallback)(void *context), void *context)
+/* vmbus_free_ring - drop mapping of ring buffer */
+void vmbus_free_ring(struct vmbus_channel *channel)
 {
- struct vmbus_channel_open_channel *open_msg;
- struct vmbus_channel_msginfo *open_info = NULL;
- unsigned long flags;
- int ret, err = 0;
- struct page *page;
- unsigned int order;
+ hv_ringbuffer_cleanup(&channel->outbound);
+ hv_ringbuffer_cleanup(&channel->inbound);
 
- if (send_ringbuffer_size % PAGE_SIZE ||
-    recv_ringbuffer_size % PAGE_SIZE)
- return -EINVAL;
+ if (channel->ringbuffer_page) {
+ __free_pages(channel->ringbuffer_page,
+     get_order(channel->ringbuffer_pagecount
+       << PAGE_SHIFT));
+ channel->ringbuffer_page = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(vmbus_free_ring);
 
- order = get_order(send_ringbuffer_size + recv_ringbuffer_size);
+/* vmbus_alloc_ring - allocate and map pages for ring buffer */
+int vmbus_alloc_ring(struct vmbus_channel *newchannel,
+     u32 send_size, u32 recv_size)
+{
+ struct page *page;
+ int order;
 
- spin_lock_irqsave(&newchannel->lock, flags);
- if (newchannel->state == CHANNEL_OPEN_STATE) {
- newchannel->state = CHANNEL_OPENING_STATE;
- } else {
- spin_unlock_irqrestore(&newchannel->lock, flags);
+ if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE)
  return -EINVAL;
- }
- spin_unlock_irqrestore(&newchannel->lock, flags);
-
- newchannel->onchannel_callback = onchannelcallback;
- newchannel->channel_callback_context = context;
 
  /* Allocate the ring buffer */
+ order = get_order(send_size + recv_size);
  page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
  GFP_KERNEL|__GFP_ZERO, order);
 
  if (!page)
  page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
 
- if (!page) {
- err = -ENOMEM;
- goto error_set_chnstate;
- }
+ if (!page)
+ return -ENOMEM;
 
  newchannel->ringbuffer_page = page;
- newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
-   recv_ringbuffer_size) >> PAGE_SHIFT;
+ newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT;
+ newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT;
 
- ret = hv_ringbuffer_init(&newchannel->outbound, page,
- send_ringbuffer_size >> PAGE_SHIFT);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
 
- if (ret != 0) {
- err = ret;
- goto error_free_pages;
- }
+static int __vmbus_open(struct vmbus_channel *newchannel,
+       void *userdata, u32 userdatalen,
+       void (*onchannelcallback)(void *context), void *context)
+{
+ struct vmbus_channel_open_channel *open_msg;
+ struct vmbus_channel_msginfo *open_info = NULL;
+ struct page *page = newchannel->ringbuffer_page;
+ u32 send_pages, recv_pages;
+ unsigned long flags;
+ int err;
 
- ret = hv_ringbuffer_init(&newchannel->inbound,
- &page[send_ringbuffer_size >> PAGE_SHIFT],
- recv_ringbuffer_size >> PAGE_SHIFT);
- if (ret != 0) {
- err = ret;
- goto error_free_pages;
+ if (userdatalen > MAX_USER_DEFINED_BYTES)
+ return -EINVAL;
+
+ send_pages = newchannel->ringbuffer_send_offset;
+ recv_pages = newchannel->ringbuffer_pagecount - send_pages;
+
+ spin_lock_irqsave(&newchannel->lock, flags);
+ if (newchannel->state != CHANNEL_OPEN_STATE) {
+ spin_unlock_irqrestore(&newchannel->lock, flags);
+ return -EINVAL;
  }
+ spin_unlock_irqrestore(&newchannel->lock, flags);
 
+ newchannel->state = CHANNEL_OPENING_STATE;
+ newchannel->onchannel_callback = onchannelcallback;
+ newchannel->channel_callback_context = context;
+
+ err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
+ if (err)
+ goto error_clean_ring;
+
+ err = hv_ringbuffer_init(&newchannel->inbound,
+ &page[send_pages], recv_pages);
+ if (err)
+ goto error_clean_ring;
 
  /* Establish the gpadl for the ring buffer */
  newchannel->ringbuffer_gpadlhandle = 0;
 
- ret = vmbus_establish_gpadl(newchannel,
-    page_address(page),
-    send_ringbuffer_size +
-    recv_ringbuffer_size,
+ err = vmbus_establish_gpadl(newchannel,
+    page_address(newchannel->ringbuffer_page),
+    (send_pages + recv_pages) << PAGE_SHIFT,
     &newchannel->ringbuffer_gpadlhandle);
-
- if (ret != 0) {
- err = ret;
- goto error_free_pages;
- }
+ if (err)
+ goto error_clean_ring;
 
  /* Create and init the channel open message */
  open_info = kmalloc(sizeof(*open_info) +
@@ -174,15 +186,9 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
  open_msg->openid = newchannel->offermsg.child_relid;
  open_msg->child_relid = newchannel->offermsg.child_relid;
  open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
- open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
-  PAGE_SHIFT;
+ open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset;
  open_msg->target_vp = newchannel->target_vp;
 
- if (userdatalen > MAX_USER_DEFINED_BYTES) {
- err = -EINVAL;
- goto error_free_gpadl;
- }
-
  if (userdatalen)
  memcpy(open_msg->userdata, userdata, userdatalen);
 
@@ -193,18 +199,16 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
 
  if (newchannel->rescind) {
  err = -ENODEV;
- goto error_free_gpadl;
+ goto error_free_info;
  }
 
- ret = vmbus_post_msg(open_msg,
+ err = vmbus_post_msg(open_msg,
      sizeof(struct vmbus_channel_open_channel), true);
 
- trace_vmbus_open(open_msg, ret);
+ trace_vmbus_open(open_msg, err);
 
- if (ret != 0) {
- err = ret;
+ if (err != 0)
  goto error_clean_msglist;
- }
 
  wait_for_completion(&open_info->waitevent);
 
@@ -214,12 +218,12 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
 
  if (newchannel->rescind) {
  err = -ENODEV;
- goto error_free_gpadl;
+ goto error_free_info;
  }
 
  if (open_info->response.open_result.status) {
  err = -EAGAIN;
- goto error_free_gpadl;
+ goto error_free_info;
  }
 
  newchannel->state = CHANNEL_OPENED_STATE;
@@ -230,18 +234,50 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
  spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
  list_del(&open_info->msglistentry);
  spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
+error_free_info:
+ kfree(open_info);
 error_free_gpadl:
  vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
- kfree(open_info);
-error_free_pages:
+ newchannel->ringbuffer_gpadlhandle = 0;
+error_clean_ring:
  hv_ringbuffer_cleanup(&newchannel->outbound);
  hv_ringbuffer_cleanup(&newchannel->inbound);
- __free_pages(page, order);
-error_set_chnstate:
  newchannel->state = CHANNEL_OPEN_STATE;
  return err;
 }
+
+/*
+ * vmbus_connect_ring - Open the channel but reuse ring buffer
+ */
+int vmbus_connect_ring(struct vmbus_channel *newchannel,
+       void (*onchannelcallback)(void *context), void *context)
+{
+ return  __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
+}
+EXPORT_SYMBOL_GPL(vmbus_connect_ring);
+
+/*
+ * vmbus_open - Open the specified channel.
+ */
+int vmbus_open(struct vmbus_channel *newchannel,
+       u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
+       void *userdata, u32 userdatalen,
+       void (*onchannelcallback)(void *context), void *context)
+{
+ int err;
+
+ err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
+       recv_ringbuffer_size);
+ if (err)
+ return err;
+
+ err = __vmbus_open(newchannel, userdata, userdatalen,
+   onchannelcallback, context);
+ if (err)
+ vmbus_free_ring(newchannel);
+
+ return err;
+}
 EXPORT_SYMBOL_GPL(vmbus_open);
 
 /* Used for Hyper-V Socket: a guest client's connect() to the host */
@@ -618,10 +654,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
  * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
  * here we should skip most of the below cleanup work.
  */
- if (channel->state != CHANNEL_OPENED_STATE) {
- ret = -EINVAL;
- goto out;
- }
+ if (channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
 
  channel->state = CHANNEL_OPEN_STATE;
 
@@ -643,11 +677,10 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
  * If we failed to post the close msg,
  * it is perhaps better to leak memory.
  */
- goto out;
  }
 
  /* Tear down the gpadl for the channel's ring buffer */
- if (channel->ringbuffer_gpadlhandle) {
+ else if (channel->ringbuffer_gpadlhandle) {
  ret = vmbus_teardown_gpadl(channel,
    channel->ringbuffer_gpadlhandle);
  if (ret) {
@@ -656,59 +689,63 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
  * If we failed to teardown gpadl,
  * it is perhaps better to leak memory.
  */
- goto out;
  }
- }
-
- /* Cleanup the ring buffers for this channel */
- hv_ringbuffer_cleanup(&channel->outbound);
- hv_ringbuffer_cleanup(&channel->inbound);
 
- __free_pages(channel->ringbuffer_page,
-     get_order(channel->ringbuffer_pagecount << PAGE_SHIFT));
+ channel->ringbuffer_gpadlhandle = 0;
+ }
 
-out:
  return ret;
 }
 
-/*
- * vmbus_close - Close the specified channel
- */
-void vmbus_close(struct vmbus_channel *channel)
+/* disconnect ring - close all channels */
+int vmbus_disconnect_ring(struct vmbus_channel *channel)
 {
- struct list_head *cur, *tmp;
- struct vmbus_channel *cur_channel;
+ struct vmbus_channel *cur_channel, *tmp;
+ unsigned long flags;
+ LIST_HEAD(list);
+ int ret;
 
- if (channel->primary_channel != NULL) {
- /*
- * We will only close sub-channels when
- * the primary is closed.
- */
- return;
- }
- /*
- * Close all the sub-channels first and then close the
- * primary channel.
- */
- list_for_each_safe(cur, tmp, &channel->sc_list) {
- cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
- if (cur_channel->rescind) {
+ if (channel->primary_channel != NULL)
+ return -EINVAL;
+
+ /* Snapshot the list of subchannels */
+ spin_lock_irqsave(&channel->lock, flags);
+ list_splice_init(&channel->sc_list, &list);
+ channel->num_sc = 0;
+ spin_unlock_irqrestore(&channel->lock, flags);
+
+ list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
+ if (cur_channel->rescind)
  wait_for_completion(&cur_channel->rescind_event);
- mutex_lock(&vmbus_connection.channel_mutex);
- vmbus_close_internal(cur_channel);
- hv_process_channel_removal(cur_channel);
- } else {
- mutex_lock(&vmbus_connection.channel_mutex);
- vmbus_close_internal(cur_channel);
+
+ mutex_lock(&vmbus_connection.channel_mutex);
+ if (vmbus_close_internal(cur_channel) == 0) {
+ vmbus_free_ring(cur_channel);
+
+ if (cur_channel->rescind)
+ hv_process_channel_removal(cur_channel);
  }
  mutex_unlock(&vmbus_connection.channel_mutex);
  }
+
  /*
  * Now close the primary.
  */
  mutex_lock(&vmbus_connection.channel_mutex);
- vmbus_close_internal(channel);
+ ret = vmbus_close_internal(channel);
  mutex_unlock(&vmbus_connection.channel_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_disconnect_ring);
+
+/*
+ * vmbus_close - Close the specified channel
+ */
+void vmbus_close(struct vmbus_channel *channel)
+{
+ if (vmbus_disconnect_ring(channel) == 0)
+ vmbus_free_ring(channel);
 }
 EXPORT_SYMBOL_GPL(vmbus_close);
 
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index be3c8b10b84a..240774e2bc2e 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -241,6 +241,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
 {
  vunmap(ring_info->ring_buffer);
+ ring_info->ring_buffer = NULL;
 }
 
 /* Write to the ring buffer. */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 815b0a0102da..89d09b4064a4 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -726,6 +726,7 @@ struct vmbus_channel {
  /* Allocated memory for ring buffer */
  struct page *ringbuffer_page;
  u32 ringbuffer_pagecount;
+ u32 ringbuffer_send_offset;
  struct hv_ring_buffer_info outbound; /* send to parent */
  struct hv_ring_buffer_info inbound; /* receive from parent */
 
@@ -1013,6 +1014,14 @@ struct vmbus_packet_mpb_array {
  struct hv_mpb_array range;
 } __packed;
 
+int vmbus_alloc_ring(struct vmbus_channel *channel,
+     u32 send_size, u32 recv_size);
+void vmbus_free_ring(struct vmbus_channel *channel);
+
+int vmbus_connect_ring(struct vmbus_channel *channel,
+       void (*onchannel_callback)(void *context),
+       void *context);
+int vmbus_disconnect_ring(struct vmbus_channel *channel);
 
 extern int vmbus_open(struct vmbus_channel *channel,
     u32 send_ringbuffersize,
--
2.17.1


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[c/azure][PATCH 8/8] vmbus: fix subchannel removal

Marcelo Henrique Cerri
In reply to this post by Marcelo Henrique Cerri
From: Dexuan Cui <[hidden email]>

BugLink: http://bugs.launchpad.net/bugs/1812123

The changes to split ring allocation from open/close, broke
the cleanup of subchannels. This resulted in problems using
uio on network devices because the subchannel was left behind
when the network device was unbound.

The cause was in the disconnect logic which used list splice
to move the subchannel list into a local variable. This won't
work because the subchannel list is needed later during the
process of the rescind messages (relid2channel).

The fix is to just leave the subchannel list in place
which is what the original code did. The list is cleaned
up later when the host rescind is processed.

Without the fix, we have a lot of "hang" issues in netvsc when we
try to change the NIC's MTU, set the number of channels, etc.

Fixes: ae6935ed7d42 ("vmbus: split ring buffer allocation from open")
Cc: [hidden email]
Signed-off-by: Stephen Hemminger <[hidden email]>
Signed-off-by: Dexuan Cui <[hidden email]>
Signed-off-by: Sasha Levin <[hidden email]>
(backported from commit b5679cebf780c6f1c2451a73bf1842a4409840e7)
[marcelo.cerri: keep `channel->num_sc = 0;` to avoid cherry picking
 4d3c5c69191f ("Drivers: hv: vmbus: Remove the useless API
 vmbus_get_outgoing_channel()")]
Signed-off-by: Marcelo Henrique Cerri <[hidden email]>
---
 drivers/hv/channel.c | 8 +-------
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 0ecbdcf3ad15..dbc3a20bdb33 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -701,20 +701,14 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
 int vmbus_disconnect_ring(struct vmbus_channel *channel)
 {
  struct vmbus_channel *cur_channel, *tmp;
- unsigned long flags;
- LIST_HEAD(list);
  int ret;
 
  if (channel->primary_channel != NULL)
  return -EINVAL;
 
- /* Snapshot the list of subchannels */
- spin_lock_irqsave(&channel->lock, flags);
- list_splice_init(&channel->sc_list, &list);
  channel->num_sc = 0;
- spin_unlock_irqrestore(&channel->lock, flags);
 
- list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
+ list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
  if (cur_channel->rescind)
  wait_for_completion(&cur_channel->rescind_event);
 
--
2.17.1


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team