[Eoan 0/5] CVE-2019-3016

classic Classic list List threaded Threaded
10 messages Options
Reply | Threaded
Open this post in threaded view
|

[Eoan 0/5] CVE-2019-3016

Thadeu Lima de Souza Cascardo-3
This has been built on all platforms and smoke tested as a host on x86 and
ppc64el.

Boris Ostrovsky (5):
  UBUNTU: SAUCE: x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit
  UBUNTU: SAUCE: x86/kvm: Introduce kvm_(un)map_gfn()
  UBUNTU: SAUCE: x86/kvm: Cache gfn to pfn translation
  UBUNTU: SAUCE: x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not
    missed
  UBUNTU: SAUCE: x86/KVM: Clean up host's steal time structure

 arch/x86/include/asm/kvm_host.h |   4 +-
 arch/x86/kvm/x86.c              |  67 ++++++++++++-------
 include/linux/kvm_host.h        |   5 ++
 include/linux/kvm_types.h       |   9 ++-
 virt/kvm/kvm_main.c             | 113 +++++++++++++++++++++++++++-----
 5 files changed, 153 insertions(+), 45 deletions(-)

--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Eoan 1/5] UBUNTU: SAUCE: x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit

Thadeu Lima de Souza Cascardo-3
From: Boris Ostrovsky <[hidden email]>

CVE-2019-3016
CVE-2020-3016

kvm_steal_time_set_preempted() may accidentally clear KVM_VCPU_FLUSH_TLB
bit if it is called more than once while VCPU is preempted.

This is part of CVE-2019-3016.

(This bug was also independently discovered by Jim Mattson
<[hidden email]>)

Signed-off-by: Boris Ostrovsky <[hidden email]>
Reviewed-by: Joao Martins <[hidden email]>
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 arch/x86/kvm/x86.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9c45e6ca30fd..80e860bd39d5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3399,6 +3399,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
  if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
  return;
 
+ if (vcpu->arch.st.steal.preempted)
+ return;
+
  vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
 
  kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Eoan 2/5] UBUNTU: SAUCE: x86/kvm: Introduce kvm_(un)map_gfn()

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
From: Boris Ostrovsky <[hidden email]>

CVE-2019-3016
CVE-2020-3016

kvm_vcpu_(un)map operates on gfns from any current address space.
In certain cases we want to make sure we are not mapping SMRAM
and for that we can use kvm_(un)map_gfn() that we are introducing
in this patch.

This is part of CVE-2019-3016.

Signed-off-by: Paolo Bonzini <[hidden email]>
Signed-off-by: Boris Ostrovsky <[hidden email]>
Reviewed-by: Joao Martins <[hidden email]>
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 include/linux/kvm_host.h |  2 ++
 virt/kvm/kvm_main.c      | 29 ++++++++++++++++++++++++-----
 2 files changed, 26 insertions(+), 5 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d41c521a39da..df4cc0ead363 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -758,8 +758,10 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 49ef54267061..ca08942c7846 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1792,12 +1792,13 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(gfn_to_page);
 
-static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
+static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
  struct kvm_host_map *map)
 {
  kvm_pfn_t pfn;
  void *hva = NULL;
  struct page *page = KVM_UNMAPPED_PAGE;
+ struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
 
  if (!map)
  return -EINVAL;
@@ -1826,14 +1827,20 @@ static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
  return 0;
 }
 
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
+{
+ return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
+}
+EXPORT_SYMBOL_GPL(kvm_map_gfn);
+
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
 {
- return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map);
+ return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
 
-void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
-    bool dirty)
+static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
+ struct kvm_host_map *map, bool dirty)
 {
  if (!map)
  return;
@@ -1849,7 +1856,7 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
 #endif
 
  if (dirty) {
- kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
+ mark_page_dirty_in_slot(memslot, map->gfn);
  kvm_release_pfn_dirty(map->pfn);
  } else {
  kvm_release_pfn_clean(map->pfn);
@@ -1858,6 +1865,18 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
  map->hva = NULL;
  map->page = NULL;
 }
+
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
+{
+ __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
+
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
+{
+ __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
+}
 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
 
 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Eoan 3/5] UBUNTU: SAUCE: x86/kvm: Cache gfn to pfn translation

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
From: Boris Ostrovsky <[hidden email]>

CVE-2019-3016
CVE-2020-3016

__kvm_map_gfn()'s call to gfn_to_pfn_memslot() is
* relatively expensive
* in certain cases (such as when done from atomic context) cannot be called

Stashing gfn-to-pfn mapping should help with both cases.

This is part of CVE-2019-3016.

Signed-off-by: Boris Ostrovsky <[hidden email]>
Reviewed-by: Joao Martins <[hidden email]>
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 arch/x86/include/asm/kvm_host.h |  1 +
 arch/x86/kvm/x86.c              | 10 ++++
 include/linux/kvm_host.h        |  7 ++-
 include/linux/kvm_types.h       |  9 ++-
 virt/kvm/kvm_main.c             | 98 ++++++++++++++++++++++++++-------
 5 files changed, 103 insertions(+), 22 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f68e174f452f..7c06343614a4 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -678,6 +678,7 @@ struct kvm_vcpu_arch {
  u64 last_steal;
  struct gfn_to_hva_cache stime;
  struct kvm_steal_time steal;
+ struct gfn_to_pfn_cache cache;
  } st;
 
  u64 tsc_offset;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 80e860bd39d5..cb18560b07bc 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8945,6 +8945,9 @@ static void fx_init(struct kvm_vcpu *vcpu)
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
  void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
+ struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
+
+ kvm_release_pfn(cache->pfn, cache->dirty, cache);
 
  kvmclock_reset(vcpu);
 
@@ -9611,11 +9614,18 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
 
 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
 {
+ struct kvm_vcpu *vcpu;
+ int i;
+
  /*
  * memslots->generation has been incremented.
  * mmio generation may have reached its maximum value.
  */
  kvm_mmu_invalidate_mmio_sptes(kvm, gen);
+
+ /* Force re-initialization of steal_time cache */
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_vcpu_kick(vcpu);
 }
 
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index df4cc0ead363..abfc2fbde957 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -728,6 +728,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
 void kvm_set_pfn_accessed(kvm_pfn_t pfn);
 void kvm_get_pfn(kvm_pfn_t pfn);
 
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  int len);
 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
@@ -758,10 +759,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache, bool atomic);
 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+  struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index bde5374ae021..2382cb58969d 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -18,7 +18,7 @@ struct kvm_memslots;
 
 enum kvm_mr_change;
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 /*
  * Address types:
@@ -49,4 +49,11 @@ struct gfn_to_hva_cache {
  struct kvm_memory_slot *memslot;
 };
 
+struct gfn_to_pfn_cache {
+ u64 generation;
+ gfn_t gfn;
+ kvm_pfn_t pfn;
+ bool dirty;
+};
+
 #endif /* __KVM_TYPES_H__ */
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index ca08942c7846..669475b59456 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1792,27 +1792,72 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(gfn_to_page);
 
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
+{
+ if (pfn == 0)
+ return;
+
+ if (cache)
+ cache->pfn = cache->gfn = 0;
+
+ if (dirty)
+ kvm_release_pfn_dirty(pfn);
+ else
+ kvm_release_pfn_clean(pfn);
+}
+
+static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct gfn_to_pfn_cache *cache, u64 gen)
+{
+ kvm_release_pfn(cache->pfn, cache->dirty, cache);
+
+ cache->pfn = gfn_to_pfn_memslot(slot, gfn);
+ cache->gfn = gfn;
+ cache->dirty = false;
+ cache->generation = gen;
+}
+
 static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
- struct kvm_host_map *map)
+ struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache,
+ bool atomic)
 {
  kvm_pfn_t pfn;
  void *hva = NULL;
  struct page *page = KVM_UNMAPPED_PAGE;
  struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
+ u64 gen = slots->generation;
 
  if (!map)
  return -EINVAL;
 
- pfn = gfn_to_pfn_memslot(slot, gfn);
+ if (cache) {
+ if (!cache->pfn || cache->gfn != gfn ||
+ cache->generation != gen) {
+ if (atomic)
+ return -EAGAIN;
+ kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
+ }
+ pfn = cache->pfn;
+ } else {
+ if (atomic)
+ return -EAGAIN;
+ pfn = gfn_to_pfn_memslot(slot, gfn);
+ }
  if (is_error_noslot_pfn(pfn))
  return -EINVAL;
 
  if (pfn_valid(pfn)) {
  page = pfn_to_page(pfn);
- hva = kmap(page);
+ if (atomic)
+ hva = kmap_atomic(page);
+ else
+ hva = kmap(page);
 #ifdef CONFIG_HAS_IOMEM
- } else {
+ } else if (!atomic) {
  hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
+ } else {
+ return -EINVAL;
 #endif
  }
 
@@ -1827,20 +1872,25 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
  return 0;
 }
 
-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache, bool atomic)
 {
- return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
+ return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
+ cache, atomic);
 }
 EXPORT_SYMBOL_GPL(kvm_map_gfn);
 
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
 {
- return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
+ return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
+ NULL, false);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
 
 static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
- struct kvm_host_map *map, bool dirty)
+ struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache,
+ bool dirty, bool atomic)
 {
  if (!map)
  return;
@@ -1848,34 +1898,44 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
  if (!map->hva)
  return;
 
- if (map->page != KVM_UNMAPPED_PAGE)
- kunmap(map->page);
+ if (map->page != KVM_UNMAPPED_PAGE) {
+ if (atomic)
+ kunmap_atomic(map->hva);
+ else
+ kunmap(map->page);
+ }
 #ifdef CONFIG_HAS_IOMEM
- else
+ else if (!atomic)
  memunmap(map->hva);
+ else
+ WARN_ONCE(1, "Unexpected unmapping in atomic context");
 #endif
 
- if (dirty) {
+ if (dirty)
  mark_page_dirty_in_slot(memslot, map->gfn);
- kvm_release_pfn_dirty(map->pfn);
- } else {
- kvm_release_pfn_clean(map->pfn);
- }
+
+ if (cache)
+ cache->dirty |= dirty;
+ else
+ kvm_release_pfn(map->pfn, dirty, NULL);
 
  map->hva = NULL;
  map->page = NULL;
 }
 
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+  struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
 {
- __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
+ __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
+ cache, dirty, atomic);
  return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
 
 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
 {
- __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
+ __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
+ dirty, false);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
 
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Eoan 4/5] UBUNTU: SAUCE: x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not missed

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
From: Boris Ostrovsky <[hidden email]>

CVE-2019-3016
CVE-2020-3016

There is a potential race in record_steal_time() between setting
host-local vcpu->arch.st.steal.preempted to zero (i.e. clearing
KVM_VCPU_PREEMPTED) and propagating this value to the guest with
kvm_write_guest_cached(). Between those two events the guest may
still see KVM_VCPU_PREEMPTED in its copy of kvm_steal_time, set
KVM_VCPU_FLUSH_TLB and assume that hypervisor will do the right
thing. Which it won't.

Instad of copying, we should map kvm_steal_time and that will
guarantee atomicity of accesses to @preempted.

This is part of CVE-2019-3016.

Signed-off-by: Boris Ostrovsky <[hidden email]>
Reviewed-by: Joao Martins <[hidden email]>
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 arch/x86/kvm/x86.c | 49 +++++++++++++++++++++++++++-------------------
 1 file changed, 29 insertions(+), 20 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cb18560b07bc..f63fa5846f08 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2488,43 +2488,45 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
 
 static void record_steal_time(struct kvm_vcpu *vcpu)
 {
+ struct kvm_host_map map;
+ struct kvm_steal_time *st;
+
  if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
  return;
 
- if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
+ /* -EAGAIN is returned in atomic context so we can just return. */
+ if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
+ &map, &vcpu->arch.st.cache, false))
  return;
 
+ st = map.hva +
+ offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
+
  /*
  * Doing a TLB flush here, on the guest's behalf, can avoid
  * expensive IPIs.
  */
- if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
+ if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
  kvm_vcpu_flush_tlb(vcpu, false);
 
- if (vcpu->arch.st.steal.version & 1)
- vcpu->arch.st.steal.version += 1;  /* first time write, random junk */
+ vcpu->arch.st.steal.preempted = 0;
 
- vcpu->arch.st.steal.version += 1;
+ if (st->version & 1)
+ st->version += 1;  /* first time write, random junk */
 
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+ st->version += 1;
 
  smp_wmb();
 
- vcpu->arch.st.steal.steal += current->sched_info.run_delay -
+ st->steal += current->sched_info.run_delay -
  vcpu->arch.st.last_steal;
  vcpu->arch.st.last_steal = current->sched_info.run_delay;
 
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
-
  smp_wmb();
 
- vcpu->arch.st.steal.version += 1;
+ st->version += 1;
 
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+ kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
 }
 
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -3396,18 +3398,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 {
+ struct kvm_host_map map;
+ struct kvm_steal_time *st;
+
  if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
  return;
 
  if (vcpu->arch.st.steal.preempted)
  return;
 
- vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
+ if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
+ &vcpu->arch.st.cache, true))
+ return;
+
+ st = map.hva +
+ offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
+
+ st->preempted = vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
 
- kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal.preempted,
- offsetof(struct kvm_steal_time, preempted),
- sizeof(vcpu->arch.st.steal.preempted));
+ kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[Eoan 5/5] UBUNTU: SAUCE: x86/KVM: Clean up host's steal time structure

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
From: Boris Ostrovsky <[hidden email]>

CVE-2019-3016
CVE-2020-3016

Now that we are mapping kvm_steal_time from the guest directly we
don't need keep a copy of it in kvm_vcpu_arch.st. The same is true
for the stime field.

This is part of CVE-2019-3016.

Signed-off-by: Boris Ostrovsky <[hidden email]>
Reviewed-by: Joao Martins <[hidden email]>
Signed-off-by: Thadeu Lima de Souza Cascardo <[hidden email]>
---
 arch/x86/include/asm/kvm_host.h |  3 +--
 arch/x86/kvm/x86.c              | 11 +++--------
 2 files changed, 4 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7c06343614a4..f62f4ff5f4f4 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -674,10 +674,9 @@ struct kvm_vcpu_arch {
  bool pvclock_set_guest_stopped_request;
 
  struct {
+ u8 preempted;
  u64 msr_val;
  u64 last_steal;
- struct gfn_to_hva_cache stime;
- struct kvm_steal_time steal;
  struct gfn_to_pfn_cache cache;
  } st;
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f63fa5846f08..6ce9ace8a801 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2509,7 +2509,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
  if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
  kvm_vcpu_flush_tlb(vcpu, false);
 
- vcpu->arch.st.steal.preempted = 0;
+ vcpu->arch.st.preempted = 0;
 
  if (st->version & 1)
  st->version += 1;  /* first time write, random junk */
@@ -2682,11 +2682,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  if (data & KVM_STEAL_RESERVED_MASK)
  return 1;
 
- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
- data & KVM_STEAL_VALID_BITS,
- sizeof(struct kvm_steal_time)))
- return 1;
-
  vcpu->arch.st.msr_val = data;
 
  if (!(data & KVM_MSR_ENABLED))
@@ -3404,7 +3399,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
  if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
  return;
 
- if (vcpu->arch.st.steal.preempted)
+ if (vcpu->arch.st.preempted)
  return;
 
  if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
@@ -3414,7 +3409,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
  st = map.hva +
  offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
 
- st->preempted = vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
+ st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
 
  kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
 }
--
2.24.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

CMNT: [Eoan 0/5] CVE-2019-3016

Thadeu Lima de Souza Cascardo-3
In reply to this post by Thadeu Lima de Souza Cascardo-3
Tyler brought to my attention that CVE-2020-3016 is not a valid ID.

I submitted patches with both CVE-2020-3016 and CVE-2019-3016, assuming both
were assigned.

Please, remove any references to CVE-2020-3016 from the commit messages.

Running the following before applying should work:

sed -i /CVE-2020-3016/d *{patch,mbox}

Or after applying, before pushing:

rm -rf .git/refs/original/
git filter-branch --msg-filter 'sed /CVE-2020-3016/d' origin/master-next..HEAD


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

ACK: [Eoan 0/5] CVE-2019-3016

Sultan Alsawaf
In reply to this post by Thadeu Lima de Souza Cascardo-3
On Fri, Jan 31, 2020 at 08:06:39AM -0300, Thadeu Lima de Souza Cascardo wrote:

> This has been built on all platforms and smoke tested as a host on x86 and
> ppc64el.
>
> Boris Ostrovsky (5):
>   UBUNTU: SAUCE: x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit
>   UBUNTU: SAUCE: x86/kvm: Introduce kvm_(un)map_gfn()
>   UBUNTU: SAUCE: x86/kvm: Cache gfn to pfn translation
>   UBUNTU: SAUCE: x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not
>     missed
>   UBUNTU: SAUCE: x86/KVM: Clean up host's steal time structure
>
>  arch/x86/include/asm/kvm_host.h |   4 +-
>  arch/x86/kvm/x86.c              |  67 ++++++++++++-------
>  include/linux/kvm_host.h        |   5 ++
>  include/linux/kvm_types.h       |   9 ++-
>  virt/kvm/kvm_main.c             | 113 +++++++++++++++++++++++++++-----
>  5 files changed, 153 insertions(+), 45 deletions(-)
>
> --
> 2.24.0
>
>
> --
> kernel-team mailing list
> [hidden email]
> https://lists.ubuntu.com/mailman/listinfo/kernel-team

Acked-by: Sultan Alsawaf <[hidden email]>

--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

ACK/Cmnt: [Eoan 0/5] CVE-2019-3016

Stefan Bader-2
In reply to this post by Thadeu Lima de Souza Cascardo-3
On 31.01.20 12:06, Thadeu Lima de Souza Cascardo wrote:
> This has been built on all platforms and smoke tested as a host on x86 and
> ppc64el.
>
> Boris Ostrovsky (5):
>   UBUNTU: SAUCE: x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit
(? commit 8c6de56a42e0c657955e12b882a81ef07d1d073e)
>   UBUNTU: SAUCE: x86/kvm: Introduce kvm_(un)map_gfn()
(? commit 1eff70a9abd46f175defafd29bc17ad456f398a7)
>   UBUNTU: SAUCE: x86/kvm: Cache gfn to pfn translation
(? commit 917248144db5d7320655dbb41d3af0b8a0f3d589)
>   UBUNTU: SAUCE: x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not
>     missed
(? commit b043138246a41064527cf019a3d51d9f015e9796)
>   UBUNTU: SAUCE: x86/KVM: Clean up host's steal time structure
(? commit a6bd811f1209fe1c64c9f6fd578101d6436c6b6e)

All those patches seem to be upstream now. I would suggest to cherry-pick
/ backport those into Eoan instead of picking the SAUCE patches.

Acked-by: Stefan Bader <[hidden email]>
>
>  arch/x86/include/asm/kvm_host.h |   4 +-
>  arch/x86/kvm/x86.c              |  67 ++++++++++++-------
>  include/linux/kvm_host.h        |   5 ++
>  include/linux/kvm_types.h       |   9 ++-
>  virt/kvm/kvm_main.c             | 113 +++++++++++++++++++++++++++-----
>  5 files changed, 153 insertions(+), 45 deletions(-)
>



--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team

signature.asc (849 bytes) Download Attachment
Reply | Threaded
Open this post in threaded view
|

APPLIED/cmnt: [Eoan 0/5] CVE-2019-3016

Kleber Souza
In reply to this post by Thadeu Lima de Souza Cascardo-3
On 31.01.20 12:06, Thadeu Lima de Souza Cascardo wrote:

> This has been built on all platforms and smoke tested as a host on x86 and
> ppc64el.
>
> Boris Ostrovsky (5):
>   UBUNTU: SAUCE: x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit
>   UBUNTU: SAUCE: x86/kvm: Introduce kvm_(un)map_gfn()
>   UBUNTU: SAUCE: x86/kvm: Cache gfn to pfn translation
>   UBUNTU: SAUCE: x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not
>     missed
>   UBUNTU: SAUCE: x86/KVM: Clean up host's steal time structure
>
>  arch/x86/include/asm/kvm_host.h |   4 +-
>  arch/x86/kvm/x86.c              |  67 ++++++++++++-------
>  include/linux/kvm_host.h        |   5 ++
>  include/linux/kvm_types.h       |   9 ++-
>  virt/kvm/kvm_main.c             | 113 +++++++++++++++++++++++++++-----
>  5 files changed, 153 insertions(+), 45 deletions(-)
>

Applied to eoan/linux, cherry-picking/backporting the patches from
mainline as noted by Stefan.

Thanks,
Kleber

--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team