lkml.org 
[lkml]   [2011]   [Jul]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
SubjectRe: [patch v2] x86: kvm: x86: fix information leak to userland
From
Date

On 30.10.2010, at 20:54, Vasiliy Kulikov wrote:

> Structures kvm_vcpu_events, kvm_debugregs, kvm_pit_state2 and
> kvm_clock_data are copied to userland with some padding and reserved
> fields unitialized. It leads to leaking of contents of kernel stack
> memory. We have to initialize them to zero.
>
> In patch v1 Jan Kiszka suggested to fill reserved fields with zeros
> instead of memset'ting the whole struct. It makes sense as these
> fields are explicitly marked as padding. No more fields need zeroing.
>
> Signed-off-by: Vasiliy Kulikov <segooon@gmail.com>
> ---
> Compile tesed only.
>
> arch/x86/kvm/x86.c | 6 ++++++
> 1 files changed, 6 insertions(+), 0 deletions(-)
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index b0818f6..463c65b 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -2560,6 +2560,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
> !kvm_exception_is_soft(vcpu->arch.exception.nr);
> events->exception.nr = vcpu->arch.exception.nr;
> events->exception.has_error_code = vcpu->arch.exception.has_error_code;
> + events->exception.pad = 0;
> events->exception.error_code = vcpu->arch.exception.error_code;
>
> events->interrupt.injected =
> @@ -2573,12 +2574,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
> events->nmi.injected = vcpu->arch.nmi_injected;
> events->nmi.pending = vcpu->arch.nmi_pending;
> events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
> + events->nmi.pad = 0;
>
> events->sipi_vector = vcpu->arch.sipi_vector;
>
> events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
> | KVM_VCPUEVENT_VALID_SIPI_VECTOR
> | KVM_VCPUEVENT_VALID_SHADOW);
> + memset(&events->reserved, 0, sizeof(events->reserved));
> }
>
> static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
> @@ -2623,6 +2626,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
> dbgregs->dr6 = vcpu->arch.dr6;
> dbgregs->dr7 = vcpu->arch.dr7;
> dbgregs->flags = 0;
> + memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
> }
>
> static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
> @@ -3106,6 +3110,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
> sizeof(ps->channels));
> ps->flags = kvm->arch.vpit->pit_state.flags;
> mutex_unlock(&kvm->arch.vpit->pit_state.lock);
> + memset(&ps->reserved, 0, sizeof(ps->reserved));

struct kvm_pit_state2 {
struct kvm_pit_channel_state channels[3];
__u32 flags;
__u32 reserved[9];
};

So memset(&ps->reserved) would give you the a __u32 **, no? Same goes for all the other array sets in here. Or am I understanding some C logic wrong? :)


Alex

> return r;
> }
>
> @@ -3486,6 +3491,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
> user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
> local_irq_enable();
> user_ns.flags = 0;
> + memset(&user_ns.pad, 0, sizeof(user_ns.pad));
>
> r = -EFAULT;
> if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
> --
> Vasiliy
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html



\
 
 \ /
  Last update: 2011-07-26 19:07    [W:0.624 / U:0.232 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site