lkml.org 
[lkml]   [2011]   [Dec]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [PATCH v4] kvm: make vcpu life cycle separated from kvm instance
    On 12/15/2011 12:28 PM, Liu Ping Fan wrote:


    > --- a/arch/x86/kvm/mmu.c
    > +++ b/arch/x86/kvm/mmu.c
    > @@ -1833,11 +1833,12 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
    >
    > static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
    > {
    > - int i;
    > struct kvm_vcpu *vcpu;
    >
    > - kvm_for_each_vcpu(i, vcpu, kvm)
    > + rcu_read_lock();
    > + kvm_for_each_vcpu(vcpu, kvm)
    > vcpu->arch.last_pte_updated = NULL;
    > + rcu_read_unlock();
    > }
    >


    I am sure that you should rebase it on the current kvm tree.

    > static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
    > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
    > index c38efd7..acaa154 100644
    > --- a/arch/x86/kvm/x86.c
    > +++ b/arch/x86/kvm/x86.c
    > @@ -1830,11 +1830,13 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
    >
    > switch (msr) {
    > case HV_X64_MSR_VP_INDEX: {
    > - int r;
    > + int r = 0;
    > struct kvm_vcpu *v;
    > - kvm_for_each_vcpu(r, v, vcpu->kvm)
    > + kvm_for_each_vcpu(v, vcpu->kvm) {
    > if (v == vcpu)
    > data = r;
    > + r++;
    > + }


    Do not need rcu_lock?

    > +struct kvm_vcpu *kvm_vcpu_get(struct kvm_vcpu *vcpu);
    > +void kvm_vcpu_put(struct kvm_vcpu *vcpu);
    > +void kvm_arch_vcpu_zap(struct work_struct *work);
    > +
    > +#define kvm_for_each_vcpu(vcpu, kvm) \
    > + list_for_each_entry_rcu(vcpu, &kvm->vcpus, list)
    >
    > -#define kvm_for_each_vcpu(idx, vcpup, kvm) \
    > - for (idx = 0; \
    > - idx < atomic_read(&kvm->online_vcpus) && \
    > - (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
    > - idx++)
    > +#define kvm_for_each_vcpu_continue(vcpu, kvm) \
    > + list_for_each_entry_continue_rcu(vcpu, &kvm->vcpus, list)
    >


    Where is it used?

    > +struct kvm_vcpu *kvm_vcpu_get(struct kvm_vcpu *vcpu)
    > +{
    > + if (vcpu == NULL)
    > + return NULL;
    > + if (atomic_add_unless(&vcpu->refcount, 1, 0))


    Why do not use atomic_inc()?
    Also, i think a memory barrier is needed after increasing refcount.

    > - kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
    > + /*Protected by kvm->lock*/
    > + list_add_rcu(&vcpu->list, &kvm->vcpus);
    > +
    > smp_wmb();


    This barrier can also be removed.

    > atomic_inc(&kvm->online_vcpus);
    >
    > #ifdef CONFIG_KVM_APIC_ARCHITECTURE
    > if (kvm->bsp_vcpu_id == id)
    > - kvm->bsp_vcpu = vcpu;
    > + kvm->bsp_vcpu = kvm_vcpu_get(vcpu);
    > #endif
    > mutex_unlock(&kvm->lock);
    > return r;
    > @@ -2593,13 +2667,15 @@ static int vcpu_stat_get(void *_offset, u64 *val)
    > unsigned offset = (long)_offset;
    > struct kvm *kvm;
    > struct kvm_vcpu *vcpu;
    > - int i;
    >
    > *val = 0;
    > raw_spin_lock(&kvm_lock);
    > - list_for_each_entry(kvm, &vm_list, vm_list)
    > - kvm_for_each_vcpu(i, vcpu, kvm)
    > + list_for_each_entry(kvm, &vm_list, vm_list) {
    > + rcu_read_lock();
    > + kvm_for_each_vcpu(vcpu, kvm)
    > *val += *(u32 *)((void *)vcpu + offset);
    > + rcu_read_unlock();
    > + }
    >
    > raw_spin_unlock(&kvm_lock);
    > return 0;
    > @@ -2765,7 +2841,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
    > kvm_preempt_ops.sched_out = kvm_sched_out;
    >
    > kvm_init_debug();
    > -


    You don not change anything, please do not touch this line.



    \
     
     \ /
      Last update: 2011-12-15 06:37    [W:0.026 / U:13.628 seconds]
    ©2003-2017 Jasper Spaans. hosted at Digital OceanAdvertise on this site