lkml.org 
[lkml]   [2019]   [Apr]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Date
Subject[PATCH RFC] KVM: x86: Add a mechanism for restricting guest PMU access to a set of whitelisted events
Some events can provide a guest with information about other guests or the host
(e.g. L3 cache stats); providing the capability to restrict access to
a "safe" set
of events would limit the potential for the PMU to be used in any side channel
attacks. This change introduces a new vcpu ioctl that sets an event whitelist.
If the guest attempts to program a counter for any unwhitelisted event,
the kernel counter won't be created, so any RDPMC/RDMSR will
show 0 instances of that event.

Signed-off-by: Eric Hankland <ehankland@google.com>
---

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9407aade9416..49c9e5b9d96f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -472,6 +472,7 @@ struct kvm_pmu {
struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
struct irq_work irq_work;
u64 reprogram_pmi;
+ struct kvm_pmu_whitelist *whitelist;
};
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 30955c88a8cf..a9ae5298fe93 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -238,6 +238,13 @@ struct kvm_cpuid2 {
struct kvm_cpuid_entry2 entries[0];
};

+/* for KVM_SET_PMU_WHITELIST */
+struct kvm_pmu_whitelist {
+ __u64 event_mask;
+ __u16 num_events;
+ __u64 events[0];
+};
+
/* for KVM_GET_PIT and KVM_SET_PIT */
struct kvm_pit_channel_state {
__u32 count; /* can be 65536 */
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 026db42a86c3..7b1da21b219f 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -101,6 +101,9 @@ static void pmc_reprogram_counter(struct kvm_pmc
*pmc, u32 type,
bool exclude_kernel, bool intr,
bool in_tx, bool in_tx_cp)
{
+ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+ int i;
+ u64 event_config;
struct perf_event *event;
struct perf_event_attr attr = {
.type = type,
@@ -127,6 +130,19 @@ static void pmc_reprogram_counter(struct kvm_pmc
*pmc, u32 type,
attr.config |= HSW_IN_TX_CHECKPOINTED;
}

+ if (pmu->whitelist) {
+ event_config = attr.config;
+ if (type == PERF_TYPE_HARDWARE)
+ event_config = kvm_x86_ops->pmu_ops->get_event_code(
+ attr.config);
+ event_config &= pmu->whitelist->event_mask;
+ for (i = 0; i < pmu->whitelist->num_events; i++)
+ if (event_config == pmu->whitelist->events[i])
+ break;
+ if (i == pmu->whitelist->num_events)
+ return;
+ }
+
event = perf_event_create_kernel_counter(&attr, -1, current,
intr ? kvm_perf_overflow_intr :
kvm_perf_overflow, pmc);
@@ -244,6 +260,39 @@ int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu
*vcpu, unsigned idx)
return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
}

+int kvm_vcpu_ioctl_set_pmu_whitelist(struct kvm_vcpu *vcpu,
+ struct kvm_pmu_whitelist __user *whtlst)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct kvm_pmu_whitelist *old = pmu->whitelist;
+ struct kvm_pmu_whitelist *new = NULL;
+ struct kvm_pmu_whitelist tmp;
+ int r;
+ size_t size;
+
+ r = -EFAULT;
+ if (copy_from_user(&tmp, whtlst, sizeof(struct kvm_pmu_whitelist)))
+ goto err;
+
+ size = sizeof(tmp) + sizeof(tmp.events[0]) * tmp.num_events;
+ new = kvzalloc(size, GFP_KERNEL_ACCOUNT);
+ r = -ENOMEM;
+ if (!new)
+ goto err;
+
+ r = -EFAULT;
+ if (copy_from_user(new, whtlst, size))
+ goto err;
+
+ pmu->whitelist = new;
+
+ kvfree(old);
+ return 0;
+err:
+ kvfree(new);
+ return r;
+}
+
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{
bool fast_mode = idx & (1u << 31);
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index f8f5da947afe..067b80f1bc32 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -23,6 +23,7 @@ struct kvm_pmu_ops {
unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
u8 unit_mask);
unsigned (*find_fixed_event)(int idx);
+ u64 (*get_event_code)(unsigned event_type);
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
@@ -116,6 +117,8 @@ void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);

void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
+int kvm_vcpu_ioctl_set_pmu_whitelist(struct kvm_vcpu *vcpu,
+ struct kvm_pmu_whitelist __user *whtlst);
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx);
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index cd944435dfbd..414729c7508c 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -48,6 +48,21 @@ static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
return amd_event_mapping[i].event_type;
}

+static u64 amd_get_event_code(unsigned event_type)
+{
+ int i;
+ u64 event_code = 0;
+
+ for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
+ if (amd_event_mapping[i].event_type == event_type) {
+ event_code = amd_event_mapping[i].eventsel |
+ ((u64)amd_event_mapping[i].unit_mask << 8);
+ break;
+ }
+
+ return event_code;
+}
+
/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
static unsigned amd_find_fixed_event(int idx)
{
@@ -192,6 +207,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
struct kvm_pmu_ops amd_pmu_ops = {
.find_arch_event = amd_find_arch_event,
.find_fixed_event = amd_find_fixed_event,
+ .get_event_code = amd_get_event_code,
.pmc_is_enabled = amd_pmc_is_enabled,
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
.msr_idx_to_pmc = amd_msr_idx_to_pmc,
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index b18c28592986..8d37068a122e 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -67,6 +67,21 @@ static void global_ctrl_changed(struct kvm_pmu
*pmu, u64 data)
reprogram_counter(pmu, bit);
}

+static u64 intel_get_event_code(unsigned event_type)
+{
+ int i;
+ u64 event_code = 0;
+
+ for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
+ if (intel_arch_events[i].event_type == event_type) {
+ event_code = intel_arch_events[i].eventsel |
+ ((u64)intel_arch_events[i].unit_mask << 8);
+ break;
+ }
+
+ return event_code;
+}
+
static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
u8 event_select,
u8 unit_mask)
@@ -352,6 +367,7 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)

struct kvm_pmu_ops intel_pmu_ops = {
.find_arch_event = intel_find_arch_event,
+ .get_event_code = intel_get_event_code,
.find_fixed_event = intel_find_fixed_event,
.pmc_is_enabled = intel_pmc_is_enabled,
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5ef809fa9c45..dd6d165ae138 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3077,6 +3077,7 @@ int kvm_vm_ioctl_check_extension(struct kvm
*kvm, long ext)
int r = 0;

switch (ext) {
+ case KVM_CAP_PMU_WHITELIST:
case KVM_CAP_IRQCHIP:
case KVM_CAP_HLT:
case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
@@ -4463,6 +4464,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_x86_ops->set_ple(vcpu, &ple);
break;
}
+ case KVM_SET_PMU_WHITELIST: {
+ struct kvm_pmu_whitelist __user *whitelist = argp;
+
+ r = kvm_vcpu_ioctl_set_pmu_whitelist(vcpu, whitelist);
+ goto out;
+ }
default:
r = -EINVAL;
}
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 59fee7246f75..d4f00a76a608 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1090,6 +1090,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PPC_GET_CPU_CHAR 151
#define KVM_CAP_S390_BPB 152
#define KVM_CAP_GET_MSR_FEATURES 153
+#define KVM_CAP_PMU_WHITELIST 154
#define KVM_CAP_MSR_PLATFORM_INFO 159
#define KVM_CAP_EXCEPTION_PAYLOAD 164

@@ -1603,6 +1604,8 @@ struct kvm_s390_ucas_mapping {
#define KVM_S390_SET_CMMA_BITS _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log)
/* Memory Encryption Commands */
#define KVM_MEMORY_ENCRYPT_OP _IOWR(KVMIO, 0xba, unsigned long)
+/* Available with KVM_CAP_PMU_WHITELIST */
+#define KVM_SET_PMU_WHITELIST _IOW(KVMIO, 0xbb, struct kvm_pmu_whitelist)

struct kvm_enc_region {
__u64 addr;
\
 
 \ /
  Last update: 2019-04-19 20:40    [W:0.038 / U:0.248 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site