[Disco 00/10] CVE-2019-3016

classic Classic list List threaded Threaded
15 messages Options
Reply | Threaded
Open this post in threaded view
|

[Disco 00/10] CVE-2019-3016

Thadeu Lima de Souza Cascardo-3
This backports the guest mapping API and some followup fixes.

It has been built-tested on all platforms.

Boris Ostrovsky (5):
  UBUNTU: SAUCE: x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit
  UBUNTU: SAUCE: x86/kvm: Introduce kvm_(un)map_gfn()
  UBUNTU: SAUCE: x86/kvm: Cache gfn to pfn translation
  UBUNTU: SAUCE: x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not
    missed
  UBUNTU: SAUCE: x86/KVM: Clean up host's steal time structure

Christian Borntraeger (1):
  kvm: fix compile on s390 part 2

KarimAllah Ahmed (2):
  KVM: Introduce a new guest mapping API
  KVM: Properly check if "page" is valid in kvm_vcpu_unmap

Paolo Bonzini (2):
  kvm: fix compilation on aarch64
  kvm: fix compilation on s390

 arch/x86/include/asm/kvm_host.h |   4 +-
 arch/x86/kvm/x86.c              |  67 ++++++++------
 include/linux/kvm_host.h        |  33 +++++++
 include/linux/kvm_types.h       |   9 +-
 virt/kvm/kvm_main.c             | 149 +++++++++++++++++++++++++++++++-
 5 files changed, 233 insertions(+), 29 deletions(-)

--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Disco 01/10] KVM: Introduce a new guest mapping API

Thadeu Lima de Souza Cascardo-3
From: KarimAllah Ahmed <[hidden email]>

CVE-2019-3016
CVE-2020-3016

In KVM, specially for nested guests, there is a dominant pattern of:

        => map guest memory -> do_something -> unmap guest memory

In addition to all this unnecessarily noise in the code due to boiler plate
code, most of the time the mapping function does not properly handle memory
that is not backed by "struct page". This new guest mapping API encapsulate
most of this boiler plate code and also handles guest memory that is not
backed by "struct page".

The current implementation of this API is using memremap for memory that is
not backed by a "struct page" which would lead to a huge slow-down if it
was used for high-frequency mapping operations. The API does not have any
effect on current setups where guest memory is backed by a "struct page".
Further patches are going to also introduce a pfn-cache which would
significantly improve the performance of the memremap case.

Signed-off-by: KarimAllah Ahmed <[hidden email]>
Reviewed-by: Konrad Rzeszutek Wilk <[hidden email]>
Signed-off-by: Paolo Bonzini <[hidden email]>
(cherry picked from commit e45adf665a53df0db37f784ed87c6b57ddd81885)
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 include/linux/kvm_host.h | 28 ++++++++++++++++++
 virt/kvm/kvm_main.c      | 64 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 92 insertions(+)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 5a578bb6787d..1d780c2eef36 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -206,6 +206,32 @@ enum {
  READING_SHADOW_PAGE_TABLES,
 };
 
+#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
+
+struct kvm_host_map {
+ /*
+ * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
+ * a 'struct page' for it. When using mem= kernel parameter some memory
+ * can be used as guest memory but they are not managed by host
+ * kernel).
+ * If 'pfn' is not managed by the host kernel, this field is
+ * initialized to KVM_UNMAPPED_PAGE.
+ */
+ struct page *page;
+ void *hva;
+ kvm_pfn_t pfn;
+ kvm_pfn_t gfn;
+};
+
+/*
+ * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
+ * directly to check for that.
+ */
+static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
+{
+ return !!map->hva;
+}
+
 /*
  * Sometimes a large or cross-page mmio needs to be broken up into separate
  * exits for userspace servicing.
@@ -712,7 +738,9 @@ struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3f24108d9714..0753da6e03d3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1792,6 +1792,70 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(gfn_to_page);
 
+static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct kvm_host_map *map)
+{
+ kvm_pfn_t pfn;
+ void *hva = NULL;
+ struct page *page = KVM_UNMAPPED_PAGE;
+
+ if (!map)
+ return -EINVAL;
+
+ pfn = gfn_to_pfn_memslot(slot, gfn);
+ if (is_error_noslot_pfn(pfn))
+ return -EINVAL;
+
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ hva = kmap(page);
+ } else {
+ hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
+ }
+
+ if (!hva)
+ return -EFAULT;
+
+ map->page = page;
+ map->hva = hva;
+ map->pfn = pfn;
+ map->gfn = gfn;
+
+ return 0;
+}
+
+int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
+{
+ return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_map);
+
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+    bool dirty)
+{
+ if (!map)
+ return;
+
+ if (!map->hva)
+ return;
+
+ if (map->page)
+ kunmap(map->page);
+ else
+ memunmap(map->hva);
+
+ if (dirty) {
+ kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
+ kvm_release_pfn_dirty(map->pfn);
+ } else {
+ kvm_release_pfn_clean(map->pfn);
+ }
+
+ map->hva = NULL;
+ map->page = NULL;
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
+
 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
  kvm_pfn_t pfn;
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Disco 02/10] kvm: fix compilation on aarch64

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
From: Paolo Bonzini <[hidden email]>

CVE-2019-3016
CVE-2020-3016

Commit e45adf665a53 ("KVM: Introduce a new guest mapping API", 2019-01-31)
introduced a build failure on aarch64 defconfig:

$ make -j$(nproc) ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- O=out defconfig \
                Image.gz
...
../arch/arm64/kvm/../../../virt/kvm/kvm_main.c:
    In function '__kvm_map_gfn':
../arch/arm64/kvm/../../../virt/kvm/kvm_main.c:1763:9: error:
    implicit declaration of function 'memremap'; did you mean 'memset_p'?
../arch/arm64/kvm/../../../virt/kvm/kvm_main.c:1763:46: error:
    'MEMREMAP_WB' undeclared (first use in this function)
../arch/arm64/kvm/../../../virt/kvm/kvm_main.c:
    In function 'kvm_vcpu_unmap':
../arch/arm64/kvm/../../../virt/kvm/kvm_main.c:1795:3: error:
    implicit declaration of function 'memunmap'; did you mean 'vm_munmap'?

because these functions are declared in <linux/io.h> rather than <asm/io.h>,
and the former was being pulled in already on x86 but not on aarch64.

Reported-by: Nathan Chancellor <[hidden email]>
Signed-off-by: Paolo Bonzini <[hidden email]>
(backported from commit c011d23ba046826ccf8c4a4a6c1d01c9ccaa1403)
[cascardo: only context fixup]
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 virt/kvm/kvm_main.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 0753da6e03d3..c23b84dfe37a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -52,9 +52,9 @@
 #include <linux/sort.h>
 #include <linux/bsearch.h>
 #include <linux/kthread.h>
+#include <linux/io.h>
 
 #include <asm/processor.h>
-#include <asm/io.h>
 #include <asm/ioctl.h>
 #include <linux/uaccess.h>
 #include <asm/pgtable.h>
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Disco 03/10] kvm: fix compilation on s390

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
From: Paolo Bonzini <[hidden email]>

CVE-2019-3016
CVE-2020-3016

s390 does not have memremap, even though in this particular case it
would be useful.

Signed-off-by: Paolo Bonzini <[hidden email]>
(cherry picked from commit d30b214d1d0addb7b2c9c78178d1501cd39a01fb)
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 virt/kvm/kvm_main.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index c23b84dfe37a..f4d59133dce6 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1809,8 +1809,10 @@ static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
  if (pfn_valid(pfn)) {
  page = pfn_to_page(pfn);
  hva = kmap(page);
+#ifdef CONFIG_HAS_IOMEM
  } else {
  hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
+#endif
  }
 
  if (!hva)
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Disco 04/10] kvm: fix compile on s390 part 2

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
From: Christian Borntraeger <[hidden email]>

CVE-2019-3016
CVE-2020-3016

We also need to fence the memunmap part.

Fixes: e45adf665a53 ("KVM: Introduce a new guest mapping API")
Fixes: d30b214d1d0a (kvm: fix compilation on s390)
Cc: Michal Kubecek <[hidden email]>
Cc: KarimAllah Ahmed <[hidden email]>
Signed-off-by: Christian Borntraeger <[hidden email]>
(cherry picked from commit eb1f2f387db8c0d084581fb26e7faffde700bc8e)
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 virt/kvm/kvm_main.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f4d59133dce6..8d7585b99499 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1843,8 +1843,10 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
 
  if (map->page)
  kunmap(map->page);
+#ifdef CONFIG_HAS_IOMEM
  else
  memunmap(map->hva);
+#endif
 
  if (dirty) {
  kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Disco 05/10] KVM: Properly check if "page" is valid in kvm_vcpu_unmap

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
From: KarimAllah Ahmed <[hidden email]>

CVE-2019-3016
CVE-2020-3016

The field "page" is initialized to KVM_UNMAPPED_PAGE when it is not used
(i.e. when the memory lives outside kernel control). So this check will
always end up using kunmap even for memremap regions.

Fixes: e45adf665a53 ("KVM: Introduce a new guest mapping API")
Cc: [hidden email]
Signed-off-by: KarimAllah Ahmed <[hidden email]>
Signed-off-by: Paolo Bonzini <[hidden email]>
(cherry picked from commit b614c6027896ff9ad6757122e84760d938cab15e)
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 virt/kvm/kvm_main.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8d7585b99499..d40c8d70fd4e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1841,7 +1841,7 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
  if (!map->hva)
  return;
 
- if (map->page)
+ if (map->page != KVM_UNMAPPED_PAGE)
  kunmap(map->page);
 #ifdef CONFIG_HAS_IOMEM
  else
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Disco 06/10] UBUNTU: SAUCE: x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
From: Boris Ostrovsky <[hidden email]>

CVE-2019-3016
CVE-2020-3016

kvm_steal_time_set_preempted() may accidentally clear KVM_VCPU_FLUSH_TLB
bit if it is called more than once while VCPU is preempted.

This is part of CVE-2019-3016.

(This bug was also independently discovered by Jim Mattson
<[hidden email]>)

Signed-off-by: Boris Ostrovsky <[hidden email]>
Reviewed-by: Joao Martins <[hidden email]>
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 arch/x86/kvm/x86.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 43e9f01229e4..19461f2de496 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3330,6 +3330,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
  if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
  return;
 
+ if (vcpu->arch.st.steal.preempted)
+ return;
+
  vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
 
  kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Disco 07/10] UBUNTU: SAUCE: x86/kvm: Introduce kvm_(un)map_gfn()

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
From: Boris Ostrovsky <[hidden email]>

CVE-2019-3016
CVE-2020-3016

kvm_vcpu_(un)map operates on gfns from any current address space.
In certain cases we want to make sure we are not mapping SMRAM
and for that we can use kvm_(un)map_gfn() that we are introducing
in this patch.

This is part of CVE-2019-3016.

Signed-off-by: Paolo Bonzini <[hidden email]>
Signed-off-by: Boris Ostrovsky <[hidden email]>
Reviewed-by: Joao Martins <[hidden email]>
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 include/linux/kvm_host.h |  2 ++
 virt/kvm/kvm_main.c      | 29 ++++++++++++++++++++++++-----
 2 files changed, 26 insertions(+), 5 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1d780c2eef36..2aa24a82b9c3 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -739,8 +739,10 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d40c8d70fd4e..7d0fddd33519 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1792,12 +1792,13 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(gfn_to_page);
 
-static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
+static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
  struct kvm_host_map *map)
 {
  kvm_pfn_t pfn;
  void *hva = NULL;
  struct page *page = KVM_UNMAPPED_PAGE;
+ struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
 
  if (!map)
  return -EINVAL;
@@ -1826,14 +1827,20 @@ static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
  return 0;
 }
 
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
+{
+ return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
+}
+EXPORT_SYMBOL_GPL(kvm_map_gfn);
+
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
 {
- return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map);
+ return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
 
-void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
-    bool dirty)
+static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
+ struct kvm_host_map *map, bool dirty)
 {
  if (!map)
  return;
@@ -1849,7 +1856,7 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
 #endif
 
  if (dirty) {
- kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
+ mark_page_dirty_in_slot(memslot, map->gfn);
  kvm_release_pfn_dirty(map->pfn);
  } else {
  kvm_release_pfn_clean(map->pfn);
@@ -1858,6 +1865,18 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
  map->hva = NULL;
  map->page = NULL;
 }
+
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
+{
+ __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
+
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
+{
+ __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
+}
 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
 
 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Disco 08/10] UBUNTU: SAUCE: x86/kvm: Cache gfn to pfn translation

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
From: Boris Ostrovsky <[hidden email]>

CVE-2019-3016
CVE-2020-3016

__kvm_map_gfn()'s call to gfn_to_pfn_memslot() is
* relatively expensive
* in certain cases (such as when done from atomic context) cannot be called

Stashing gfn-to-pfn mapping should help with both cases.

This is part of CVE-2019-3016.

Signed-off-by: Boris Ostrovsky <[hidden email]>
Reviewed-by: Joao Martins <[hidden email]>
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 arch/x86/include/asm/kvm_host.h |  1 +
 arch/x86/kvm/x86.c              | 10 ++++
 include/linux/kvm_host.h        |  7 ++-
 include/linux/kvm_types.h       |  9 ++-
 virt/kvm/kvm_main.c             | 98 ++++++++++++++++++++++++++-------
 5 files changed, 103 insertions(+), 22 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9cd74ed85574..438a957d0440 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -678,6 +678,7 @@ struct kvm_vcpu_arch {
  u64 last_steal;
  struct gfn_to_hva_cache stime;
  struct kvm_steal_time steal;
+ struct gfn_to_pfn_cache cache;
  } st;
 
  u64 tsc_offset;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 19461f2de496..0395e86e7eda 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8833,6 +8833,9 @@ static void fx_init(struct kvm_vcpu *vcpu)
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
  void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
+ struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
+
+ kvm_release_pfn(cache->pfn, cache->dirty, cache);
 
  kvmclock_reset(vcpu);
 
@@ -9494,11 +9497,18 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
 
 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
 {
+ struct kvm_vcpu *vcpu;
+ int i;
+
  /*
  * memslots->generation has been incremented.
  * mmio generation may have reached its maximum value.
  */
  kvm_mmu_invalidate_mmio_sptes(kvm, gen);
+
+ /* Force re-initialization of steal_time cache */
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_vcpu_kick(vcpu);
 }
 
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2aa24a82b9c3..ea149807db3e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -709,6 +709,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
 void kvm_set_pfn_accessed(kvm_pfn_t pfn);
 void kvm_get_pfn(kvm_pfn_t pfn);
 
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  int len);
 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
@@ -739,10 +740,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache, bool atomic);
 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+  struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 8bf259dae9f6..a38729c8296f 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -32,7 +32,7 @@ struct kvm_memslots;
 
 enum kvm_mr_change;
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 /*
  * Address types:
@@ -63,4 +63,11 @@ struct gfn_to_hva_cache {
  struct kvm_memory_slot *memslot;
 };
 
+struct gfn_to_pfn_cache {
+ u64 generation;
+ gfn_t gfn;
+ kvm_pfn_t pfn;
+ bool dirty;
+};
+
 #endif /* __KVM_TYPES_H__ */
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 7d0fddd33519..d377ab09a227 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1792,27 +1792,72 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(gfn_to_page);
 
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
+{
+ if (pfn == 0)
+ return;
+
+ if (cache)
+ cache->pfn = cache->gfn = 0;
+
+ if (dirty)
+ kvm_release_pfn_dirty(pfn);
+ else
+ kvm_release_pfn_clean(pfn);
+}
+
+static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct gfn_to_pfn_cache *cache, u64 gen)
+{
+ kvm_release_pfn(cache->pfn, cache->dirty, cache);
+
+ cache->pfn = gfn_to_pfn_memslot(slot, gfn);
+ cache->gfn = gfn;
+ cache->dirty = false;
+ cache->generation = gen;
+}
+
 static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
- struct kvm_host_map *map)
+ struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache,
+ bool atomic)
 {
  kvm_pfn_t pfn;
  void *hva = NULL;
  struct page *page = KVM_UNMAPPED_PAGE;
  struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
+ u64 gen = slots->generation;
 
  if (!map)
  return -EINVAL;
 
- pfn = gfn_to_pfn_memslot(slot, gfn);
+ if (cache) {
+ if (!cache->pfn || cache->gfn != gfn ||
+ cache->generation != gen) {
+ if (atomic)
+ return -EAGAIN;
+ kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
+ }
+ pfn = cache->pfn;
+ } else {
+ if (atomic)
+ return -EAGAIN;
+ pfn = gfn_to_pfn_memslot(slot, gfn);
+ }
  if (is_error_noslot_pfn(pfn))
  return -EINVAL;
 
  if (pfn_valid(pfn)) {
  page = pfn_to_page(pfn);
- hva = kmap(page);
+ if (atomic)
+ hva = kmap_atomic(page);
+ else
+ hva = kmap(page);
 #ifdef CONFIG_HAS_IOMEM
- } else {
+ } else if (!atomic) {
  hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
+ } else {
+ return -EINVAL;
 #endif
  }
 
@@ -1827,20 +1872,25 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
  return 0;
 }
 
-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache, bool atomic)
 {
- return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
+ return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
+ cache, atomic);
 }
 EXPORT_SYMBOL_GPL(kvm_map_gfn);
 
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
 {
- return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
+ return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
+ NULL, false);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
 
 static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
- struct kvm_host_map *map, bool dirty)
+ struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache,
+ bool dirty, bool atomic)
 {
  if (!map)
  return;
@@ -1848,34 +1898,44 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
  if (!map->hva)
  return;
 
- if (map->page != KVM_UNMAPPED_PAGE)
- kunmap(map->page);
+ if (map->page != KVM_UNMAPPED_PAGE) {
+ if (atomic)
+ kunmap_atomic(map->hva);
+ else
+ kunmap(map->page);
+ }
 #ifdef CONFIG_HAS_IOMEM
- else
+ else if (!atomic)
  memunmap(map->hva);
+ else
+ WARN_ONCE(1, "Unexpected unmapping in atomic context");
 #endif
 
- if (dirty) {
+ if (dirty)
  mark_page_dirty_in_slot(memslot, map->gfn);
- kvm_release_pfn_dirty(map->pfn);
- } else {
- kvm_release_pfn_clean(map->pfn);
- }
+
+ if (cache)
+ cache->dirty |= dirty;
+ else
+ kvm_release_pfn(map->pfn, dirty, NULL);
 
  map->hva = NULL;
  map->page = NULL;
 }
 
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+  struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
 {
- __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
+ __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
+ cache, dirty, atomic);
  return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
 
 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
 {
- __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
+ __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
+ dirty, false);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
 
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Disco 09/10] UBUNTU: SAUCE: x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not missed

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
From: Boris Ostrovsky <[hidden email]>

CVE-2019-3016
CVE-2020-3016

There is a potential race in record_steal_time() between setting
host-local vcpu->arch.st.steal.preempted to zero (i.e. clearing
KVM_VCPU_PREEMPTED) and propagating this value to the guest with
kvm_write_guest_cached(). Between those two events the guest may
still see KVM_VCPU_PREEMPTED in its copy of kvm_steal_time, set
KVM_VCPU_FLUSH_TLB and assume that hypervisor will do the right
thing. Which it won't.

Instad of copying, we should map kvm_steal_time and that will
guarantee atomicity of accesses to @preempted.

This is part of CVE-2019-3016.

Signed-off-by: Boris Ostrovsky <[hidden email]>
Reviewed-by: Joao Martins <[hidden email]>
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 arch/x86/kvm/x86.c | 49 +++++++++++++++++++++++++++-------------------
 1 file changed, 29 insertions(+), 20 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0395e86e7eda..85b3e49e1b66 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2452,43 +2452,45 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
 
 static void record_steal_time(struct kvm_vcpu *vcpu)
 {
+ struct kvm_host_map map;
+ struct kvm_steal_time *st;
+
  if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
  return;
 
- if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
+ /* -EAGAIN is returned in atomic context so we can just return. */
+ if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
+ &map, &vcpu->arch.st.cache, false))
  return;
 
+ st = map.hva +
+ offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
+
  /*
  * Doing a TLB flush here, on the guest's behalf, can avoid
  * expensive IPIs.
  */
- if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
+ if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
  kvm_vcpu_flush_tlb(vcpu, false);
 
- if (vcpu->arch.st.steal.version & 1)
- vcpu->arch.st.steal.version += 1;  /* first time write, random junk */
+ vcpu->arch.st.steal.preempted = 0;
 
- vcpu->arch.st.steal.version += 1;
+ if (st->version & 1)
+ st->version += 1;  /* first time write, random junk */
 
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+ st->version += 1;
 
  smp_wmb();
 
- vcpu->arch.st.steal.steal += current->sched_info.run_delay -
+ st->steal += current->sched_info.run_delay -
  vcpu->arch.st.last_steal;
  vcpu->arch.st.last_steal = current->sched_info.run_delay;
 
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
-
  smp_wmb();
 
- vcpu->arch.st.steal.version += 1;
+ st->version += 1;
 
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+ kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
 }
 
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -3327,18 +3329,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 {
+ struct kvm_host_map map;
+ struct kvm_steal_time *st;
+
  if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
  return;
 
  if (vcpu->arch.st.steal.preempted)
  return;
 
- vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
+ if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
+ &vcpu->arch.st.cache, true))
+ return;
+
+ st = map.hva +
+ offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
+
+ st->preempted = vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
 
- kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal.preempted,
- offsetof(struct kvm_steal_time, preempted),
- sizeof(vcpu->arch.st.steal.preempted));
+ kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Disco 10/10] UBUNTU: SAUCE: x86/KVM: Clean up host's steal time structure

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
From: Boris Ostrovsky <[hidden email]>

CVE-2019-3016
CVE-2020-3016

Now that we are mapping kvm_steal_time from the guest directly we
don't need keep a copy of it in kvm_vcpu_arch.st. The same is true
for the stime field.

This is part of CVE-2019-3016.

Signed-off-by: Boris Ostrovsky <[hidden email]>
Reviewed-by: Joao Martins <[hidden email]>
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 arch/x86/include/asm/kvm_host.h |  3 +--
 arch/x86/kvm/x86.c              | 11 +++--------
 2 files changed, 4 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 438a957d0440..6b6061b5de95 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -674,10 +674,9 @@ struct kvm_vcpu_arch {
  bool pvclock_set_guest_stopped_request;
 
  struct {
+ u8 preempted;
  u64 msr_val;
  u64 last_steal;
- struct gfn_to_hva_cache stime;
- struct kvm_steal_time steal;
  struct gfn_to_pfn_cache cache;
  } st;
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 85b3e49e1b66..23c27e90e551 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2473,7 +2473,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
  if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
  kvm_vcpu_flush_tlb(vcpu, false);
 
- vcpu->arch.st.steal.preempted = 0;
+ vcpu->arch.st.preempted = 0;
 
  if (st->version & 1)
  st->version += 1;  /* first time write, random junk */
@@ -2632,11 +2632,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  if (data & KVM_STEAL_RESERVED_MASK)
  return 1;
 
- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
- data & KVM_STEAL_VALID_BITS,
- sizeof(struct kvm_steal_time)))
- return 1;
-
  vcpu->arch.st.msr_val = data;
 
  if (!(data & KVM_MSR_ENABLED))
@@ -3335,7 +3330,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
  if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
  return;
 
- if (vcpu->arch.st.steal.preempted)
+ if (vcpu->arch.st.preempted)
  return;
 
  if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
@@ -3345,7 +3340,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
  st = map.hva +
  offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
 
- st->preempted = vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
+ st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
 
  kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
 }
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

CMNT: [Disco 00/10] CVE-2019-3016

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
Tyler brought to my attention that CVE-2020-3016 is not a valid ID.

I submitted patches with both CVE-2020-3016 and CVE-2019-3016, assuming both
were assigned.

Please, remove any references to CVE-2020-3016 from the commit messages.

Running the following before applying should work:

sed -i /CVE-2020-3016/d *{patch,mbox}

Or after applying, before pushing:

rm -rf .git/refs/original/
git filter-branch --msg-filter 'sed /CVE-2020-3016/d' origin/master-next..HEAD


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

ACK: [Disco 00/10] CVE-2019-3016

Sultan Alsawaf
In reply to this post by Thadeu Lima de Souza Cascardo-3
On Fri, Jan 31, 2020 at 08:10:25AM -0300, Thadeu Lima de Souza Cascardo wrote:

> This backports the guest mapping API and some followup fixes.
>
> It has been built-tested on all platforms.
>
> Boris Ostrovsky (5):
>   UBUNTU: SAUCE: x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit
>   UBUNTU: SAUCE: x86/kvm: Introduce kvm_(un)map_gfn()
>   UBUNTU: SAUCE: x86/kvm: Cache gfn to pfn translation
>   UBUNTU: SAUCE: x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not
>     missed
>   UBUNTU: SAUCE: x86/KVM: Clean up host's steal time structure
>
> Christian Borntraeger (1):
>   kvm: fix compile on s390 part 2
>
> KarimAllah Ahmed (2):
>   KVM: Introduce a new guest mapping API
>   KVM: Properly check if "page" is valid in kvm_vcpu_unmap
>
> Paolo Bonzini (2):
>   kvm: fix compilation on aarch64
>   kvm: fix compilation on s390
>
>  arch/x86/include/asm/kvm_host.h |   4 +-
>  arch/x86/kvm/x86.c              |  67 ++++++++------
>  include/linux/kvm_host.h        |  33 +++++++
>  include/linux/kvm_types.h       |   9 +-
>  virt/kvm/kvm_main.c             | 149 +++++++++++++++++++++++++++++++-
>  5 files changed, 233 insertions(+), 29 deletions(-)
>
> --
> 2.24.0
>
>
> --
> kernel-team mailing list
> [hidden email]
> https://lists.ubuntu.com/mailman/listinfo/kernel-team

Acked-by: Sultan Alsawaf <[hidden email]>

--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

ACK/Cmnt: [Disco 00/10] CVE-2019-3016

Stefan Bader-2
In reply to this post by Thadeu Lima de Souza Cascardo-3
On 31.01.20 12:10, Thadeu Lima de Souza Cascardo wrote:

> This backports the guest mapping API and some followup fixes.
>
> It has been built-tested on all platforms.
>
> Boris Ostrovsky (5):
>   UBUNTU: SAUCE: x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit
>   UBUNTU: SAUCE: x86/kvm: Introduce kvm_(un)map_gfn()
>   UBUNTU: SAUCE: x86/kvm: Cache gfn to pfn translation
>   UBUNTU: SAUCE: x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not
>     missed
>   UBUNTU: SAUCE: x86/KVM: Clean up host's steal time structure
>
> Christian Borntraeger (1):
>   kvm: fix compile on s390 part 2
>
> KarimAllah Ahmed (2):
>   KVM: Introduce a new guest mapping API
>   KVM: Properly check if "page" is valid in kvm_vcpu_unmap
>
> Paolo Bonzini (2):
>   kvm: fix compilation on aarch64
>   kvm: fix compilation on s390
>
>  arch/x86/include/asm/kvm_host.h |   4 +-
>  arch/x86/kvm/x86.c              |  67 ++++++++------
>  include/linux/kvm_host.h        |  33 +++++++
>  include/linux/kvm_types.h       |   9 +-
>  virt/kvm/kvm_main.c             | 149 +++++++++++++++++++++++++++++++-
>  5 files changed, 233 insertions(+), 29 deletions(-)
>
As for the Eoan submission I would replace the SAUCE patches by the upstream
commits doing proper cherry-picking / backporting. Adding the right cve markup
and fixing it for the pre-reqs.
Oh and we should be able to do that when applying. I should have mentioned that
in the Eoan reply, too.

Acked-by: Stefan Bader <[hidden email]>


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team

signature.asc (849 bytes) Download Attachment
Reply | Threaded
Open this post in threaded view
|

APPLIED/cmnt: [Disco 00/10] CVE-2019-3016

Kleber Souza
In reply to this post by Thadeu Lima de Souza Cascardo-3
On 31.01.20 12:10, Thadeu Lima de Souza Cascardo wrote:

> This backports the guest mapping API and some followup fixes.
>
> It has been built-tested on all platforms.
>
> Boris Ostrovsky (5):
>   UBUNTU: SAUCE: x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit
>   UBUNTU: SAUCE: x86/kvm: Introduce kvm_(un)map_gfn()
>   UBUNTU: SAUCE: x86/kvm: Cache gfn to pfn translation
>   UBUNTU: SAUCE: x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not
>     missed
>   UBUNTU: SAUCE: x86/KVM: Clean up host's steal time structure
>
> Christian Borntraeger (1):
>   kvm: fix compile on s390 part 2
>
> KarimAllah Ahmed (2):
>   KVM: Introduce a new guest mapping API
>   KVM: Properly check if "page" is valid in kvm_vcpu_unmap
>
> Paolo Bonzini (2):
>   kvm: fix compilation on aarch64
>   kvm: fix compilation on s390
>
>  arch/x86/include/asm/kvm_host.h |   4 +-
>  arch/x86/kvm/x86.c              |  67 ++++++++------
>  include/linux/kvm_host.h        |  33 +++++++
>  include/linux/kvm_types.h       |   9 +-
>  virt/kvm/kvm_main.c             | 149 +++++++++++++++++++++++++++++++-
>  5 files changed, 233 insertions(+), 29 deletions(-)
>

Applied to disco/linux, removing the CVE-2020-3016 reference and
cherry-picking/backporting the SAUCE patches as noted by Stefan.

Thanks,
Kleber


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team