lkml.org 
[lkml]   [2010]   [Mar]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 8/9] oprofile/x86: group IBS code
    Date
    Moving code in preparation of the next patch. This groups all IBS code
    together.

    Signed-off-by: Robert Richter <robert.richter@amd.com>
    ---
    arch/x86/oprofile/op_model_amd.c | 202 +++++++++++++++++++-------------------
    1 files changed, 101 insertions(+), 101 deletions(-)

    diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
    index 128d84c..9c0d978 100644
    --- a/arch/x86/oprofile/op_model_amd.c
    +++ b/arch/x86/oprofile/op_model_amd.c
    @@ -105,107 +105,6 @@ static u32 get_ibs_caps(void)
    return ibs_caps;
    }

    -#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
    -
    -static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
    - struct op_msrs const * const msrs)
    -{
    - u64 val;
    - int i;
    -
    - /* enable active counters */
    - for (i = 0; i < NUM_COUNTERS; ++i) {
    - int virt = op_x86_phys_to_virt(i);
    - if (!reset_value[virt])
    - continue;
    - rdmsrl(msrs->controls[i].addr, val);
    - val &= model->reserved;
    - val |= op_x86_get_ctrl(model, &counter_config[virt]);
    - wrmsrl(msrs->controls[i].addr, val);
    - }
    -}
    -
    -#endif
    -
    -/* functions for op_amd_spec */
    -
    -static void op_amd_shutdown(struct op_msrs const * const msrs);
    -
    -static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
    -{
    - int i;
    -
    - for (i = 0; i < NUM_COUNTERS; i++) {
    - if (reserve_perfctr(i)) {
    - msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
    - msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
    - } else if (counter_config[i].enabled) {
    - op_x86_warn_reserved(i);
    - op_amd_shutdown(msrs);
    - return -EBUSY;
    - }
    - }
    -
    - return 0;
    -}
    -
    -static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
    - struct op_msrs const * const msrs)
    -{
    - u64 val;
    - int i;
    -
    - /* setup reset_value */
    - for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
    - if (counter_config[i].enabled
    - && msrs->counters[op_x86_virt_to_phys(i)].addr)
    - reset_value[i] = counter_config[i].count;
    - else
    - reset_value[i] = 0;
    - }
    -
    - /* clear all counters */
    - for (i = 0; i < NUM_CONTROLS; ++i) {
    - if (unlikely(!msrs->controls[i].addr)) {
    - if (counter_config[i].enabled && !smp_processor_id())
    - /*
    - * counter is reserved, this is on all
    - * cpus, so report only for cpu #0
    - */
    - op_x86_warn_reserved(i);
    - continue;
    - }
    - rdmsrl(msrs->controls[i].addr, val);
    - if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
    - op_x86_warn_in_use(i);
    - val &= model->reserved;
    - wrmsrl(msrs->controls[i].addr, val);
    - }
    -
    - /* avoid a false detection of ctr overflows in NMI handler */
    - for (i = 0; i < NUM_COUNTERS; ++i) {
    - if (unlikely(!msrs->counters[i].addr))
    - continue;
    - wrmsrl(msrs->counters[i].addr, -1LL);
    - }
    -
    - /* enable active counters */
    - for (i = 0; i < NUM_COUNTERS; ++i) {
    - int virt = op_x86_phys_to_virt(i);
    - if (!reset_value[virt])
    - continue;
    -
    - /* setup counter registers */
    - wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
    -
    - /* setup control registers */
    - rdmsrl(msrs->controls[i].addr, val);
    - val &= model->reserved;
    - val |= op_x86_get_ctrl(model, &counter_config[virt]);
    - wrmsrl(msrs->controls[i].addr, val);
    - }
    -}
    -
    /*
    * 16-bit Linear Feedback Shift Register (LFSR)
    *
    @@ -370,6 +269,107 @@ static void op_amd_stop_ibs(void)
    wrmsrl(MSR_AMD64_IBSOPCTL, 0);
    }

    +#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
    +
    +static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
    + struct op_msrs const * const msrs)
    +{
    + u64 val;
    + int i;
    +
    + /* enable active counters */
    + for (i = 0; i < NUM_COUNTERS; ++i) {
    + int virt = op_x86_phys_to_virt(i);
    + if (!reset_value[virt])
    + continue;
    + rdmsrl(msrs->controls[i].addr, val);
    + val &= model->reserved;
    + val |= op_x86_get_ctrl(model, &counter_config[virt]);
    + wrmsrl(msrs->controls[i].addr, val);
    + }
    +}
    +
    +#endif
    +
    +/* functions for op_amd_spec */
    +
    +static void op_amd_shutdown(struct op_msrs const * const msrs);
    +
    +static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
    +{
    + int i;
    +
    + for (i = 0; i < NUM_COUNTERS; i++) {
    + if (reserve_perfctr(i)) {
    + msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
    + msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
    + } else if (counter_config[i].enabled) {
    + op_x86_warn_reserved(i);
    + op_amd_shutdown(msrs);
    + return -EBUSY;
    + }
    + }
    +
    + return 0;
    +}
    +
    +static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
    + struct op_msrs const * const msrs)
    +{
    + u64 val;
    + int i;
    +
    + /* setup reset_value */
    + for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
    + if (counter_config[i].enabled
    + && msrs->counters[op_x86_virt_to_phys(i)].addr)
    + reset_value[i] = counter_config[i].count;
    + else
    + reset_value[i] = 0;
    + }
    +
    + /* clear all counters */
    + for (i = 0; i < NUM_CONTROLS; ++i) {
    + if (unlikely(!msrs->controls[i].addr)) {
    + if (counter_config[i].enabled && !smp_processor_id())
    + /*
    + * counter is reserved, this is on all
    + * cpus, so report only for cpu #0
    + */
    + op_x86_warn_reserved(i);
    + continue;
    + }
    + rdmsrl(msrs->controls[i].addr, val);
    + if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
    + op_x86_warn_in_use(i);
    + val &= model->reserved;
    + wrmsrl(msrs->controls[i].addr, val);
    + }
    +
    + /* avoid a false detection of ctr overflows in NMI handler */
    + for (i = 0; i < NUM_COUNTERS; ++i) {
    + if (unlikely(!msrs->counters[i].addr))
    + continue;
    + wrmsrl(msrs->counters[i].addr, -1LL);
    + }
    +
    + /* enable active counters */
    + for (i = 0; i < NUM_COUNTERS; ++i) {
    + int virt = op_x86_phys_to_virt(i);
    + if (!reset_value[virt])
    + continue;
    +
    + /* setup counter registers */
    + wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
    +
    + /* setup control registers */
    + rdmsrl(msrs->controls[i].addr, val);
    + val &= model->reserved;
    + val |= op_x86_get_ctrl(model, &counter_config[virt]);
    + wrmsrl(msrs->controls[i].addr, val);
    + }
    +}
    +
    static int op_amd_check_ctrs(struct pt_regs * const regs,
    struct op_msrs const * const msrs)
    {
    --
    1.7.0



    \
     
     \ /
      Last update: 2010-03-04 17:53    [W:0.039 / U:0.460 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site