lkml.org 
[lkml]   [2015]   [Mar]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v1 02/47] x86: mtrr: generalize run time disabling of MTRR
Date
From: "Luis R. Rodriguez" <mcgrof@suse.com>

It is possible to enable CONFIG_MTRR and up with it
disabled at run time and yet CONFIG_X86_PAT continues
to kick through fully functionally. This can happen
for instance on Xen where MTRR is not supported but
PAT is, this can happen now on Linux as of commit
47591df50 by Juergen introduced as of v3.19.

Technically we should assume the proper CPU
bits would be set to disable MTRR but we can't
always rely on this. At least on the Xen Hypervisor
for instance only X86_FEATURE_MTRR was disabled
as of Xen 4.4 through Xen commit 586ab6a [0],
but not X86_FEATURE_K6_MTRR, X86_FEATURE_CENTAUR_MCR,
or X86_FEATURE_CYRIX_ARR for instance.

x86 mtrr code relies on quite a bit of checks for
mtrr_if being set to check to see if MTRR did get
set up, instead of using that lets provide a generic
setter which when set we know MTRR is enabled. This
also adds a few checks where they were not before
which could potentially safeguard ourselves against
incorrect usage of MTRR where this was not desirable.

Where possible match error codes as if MTRR was
disabled on arch/x86/include/asm/mtrr.h.

Lastly, since disabling MTRR can happen at run time
and we could end up with PAT enabled best record now
on our logs when MTRR is disabled.

[0] ~/devel/xen (git::stable-4.5)$ git describe --contains 586ab6a
4.4.0-rc1~18

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Juergen Gross <jgross@suse.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Antonino Daplas <adaplas@gmail.com>
Cc: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
Cc: Tomi Valkeinen <tomi.valkeinen@ti.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: venkatesh.pallipadi@intel.com
Cc: Stefan Bader <stefan.bader@canonical.com>
Cc: konrad.wilk@oracle.com
Cc: ville.syrjala@linux.intel.com
Cc: david.vrabel@citrix.com
Cc: jbeulich@suse.com
Cc: toshi.kani@hp.com
Cc: bhelgaas@google.com
Cc: Roger Pau Monné <roger.pau@citrix.com>
Cc: linux-fbdev@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: xen-devel@lists.xensource.com
Signed-off-by: Luis R. Rodriguez <mcgrof@suse.com>
---
arch/x86/include/asm/mtrr.h | 2 ++
arch/x86/kernel/cpu/mtrr/cleanup.c | 2 +-
arch/x86/kernel/cpu/mtrr/generic.c | 5 +++--
arch/x86/kernel/cpu/mtrr/if.c | 3 +++
arch/x86/kernel/cpu/mtrr/main.c | 31 ++++++++++++++++++++++---------
5 files changed, 31 insertions(+), 12 deletions(-)

diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index f768f62..cade917 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -31,6 +31,7 @@
* arch_phys_wc_add and arch_phys_wc_del.
*/
# ifdef CONFIG_MTRR
+extern int mtrr_enabled;
extern u8 mtrr_type_lookup(u64 addr, u64 end);
extern void mtrr_save_fixed_ranges(void *);
extern void mtrr_save_state(void);
@@ -50,6 +51,7 @@ extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
extern int amd_special_default_mtrr(void);
extern int phys_wc_to_mtrr_index(int handle);
# else
+static const int mtrr_enabled;
static inline u8 mtrr_type_lookup(u64 addr, u64 end)
{
/*
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 5f90b85..784dc55 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -880,7 +880,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
* Make sure we only trim uncachable memory on machines that
* support the Intel MTRR architecture:
*/
- if (!is_cpu(INTEL) || disable_mtrr_trim)
+ if (!is_cpu(INTEL) || disable_mtrr_trim || !mtrr_enabled)
return 0;

rdmsr(MSR_MTRRdefType, def, dummy);
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 09c82de..df321b2 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -116,7 +116,8 @@ static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
u8 prev_match, curr_match;

*repeat = 0;
- if (!mtrr_state_set)
+ /* generic_mtrr_ops is only set for generic_mtrr_ops */
+ if (!mtrr_state_set || !mtrr_enabled)
return 0xFF;

if (!mtrr_state.enabled)
@@ -290,7 +291,7 @@ static void get_fixed_ranges(mtrr_type *frs)

void mtrr_save_fixed_ranges(void *info)
{
- if (cpu_has_mtrr)
+ if (mtrr_enabled && cpu_has_mtrr)
get_fixed_ranges(mtrr_state.fixed_ranges);
}

diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index d76f13d..e9e001a 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -436,6 +436,9 @@ static int __init mtrr_if_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;

+ if (!mtrr_enabled)
+ return 0;
+
if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
(!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
(!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index ea5f363..7db9c47 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -59,6 +59,7 @@
#define MTRR_TO_PHYS_WC_OFFSET 1000

u32 num_var_ranges;
+int mtrr_enabled;

unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
static DEFINE_MUTEX(mtrr_mutex);
@@ -84,6 +85,9 @@ static int have_wrcomb(void)
{
struct pci_dev *dev;

+ if (!mtrr_enabled)
+ return 0;
+
dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
if (dev != NULL) {
/*
@@ -286,7 +290,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
int i, replace, error;
mtrr_type ltype;

- if (!mtrr_if)
+ if (!mtrr_enabled)
return -ENXIO;

error = mtrr_if->validate_add_page(base, size, type);
@@ -388,6 +392,8 @@ int mtrr_add_page(unsigned long base, unsigned long size,

static int mtrr_check(unsigned long base, unsigned long size)
{
+ if (!mtrr_enabled)
+ return -ENODEV;
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
pr_warning("mtrr: size and base must be multiples of 4 kiB\n");
pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
@@ -463,8 +469,8 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
unsigned long lbase, lsize;
int error = -EINVAL;

- if (!mtrr_if)
- return -ENXIO;
+ if (!mtrr_enabled)
+ return -ENODEV;

max = num_var_ranges;
/* No CPU hotplug when we change MTRR entries */
@@ -523,6 +529,8 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
*/
int mtrr_del(int reg, unsigned long base, unsigned long size)
{
+ if (!mtrr_enabled)
+ return -ENODEV;
if (mtrr_check(base, size))
return -EINVAL;
return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
@@ -545,7 +553,7 @@ int arch_phys_wc_add(unsigned long base, unsigned long size)
{
int ret;

- if (pat_enabled)
+ if (pat_enabled || !mtrr_enabled)
return 0; /* Success! (We don't need to do anything.) */

ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
@@ -734,6 +742,7 @@ void __init mtrr_bp_init(void)
}

if (mtrr_if) {
+ mtrr_enabled = true;
set_num_var_ranges();
init_table();
if (use_intel()) {
@@ -744,12 +753,13 @@ void __init mtrr_bp_init(void)
mtrr_if->set_all();
}
}
- }
+ } else
+ pr_info("mtrr: system does not support MTRR\n");
}

void mtrr_ap_init(void)
{
- if (!use_intel() || mtrr_aps_delayed_init)
+ if (!use_intel() || mtrr_aps_delayed_init || !mtrr_enabled)
return;
/*
* Ideally we should hold mtrr_mutex here to avoid mtrr entries
@@ -774,6 +784,9 @@ void mtrr_save_state(void)
{
int first_cpu;

+ if (!mtrr_enabled)
+ return;
+
get_online_cpus();
first_cpu = cpumask_first(cpu_online_mask);
smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
@@ -782,7 +795,7 @@ void mtrr_save_state(void)

void set_mtrr_aps_delayed_init(void)
{
- if (!use_intel())
+ if (!use_intel() || !mtrr_enabled)
return;

mtrr_aps_delayed_init = true;
@@ -810,7 +823,7 @@ void mtrr_aps_init(void)

void mtrr_bp_restore(void)
{
- if (!use_intel())
+ if (!use_intel() || !mtrr_enabled)
return;

mtrr_if->set_all();
@@ -818,7 +831,7 @@ void mtrr_bp_restore(void)

static int __init mtrr_init_finialize(void)
{
- if (!mtrr_if)
+ if (!mtrr_enabled)
return 0;

if (use_intel()) {
--
2.3.2.209.gd67f9d5.dirty


\
 
 \ /
  Last update: 2015-03-21 00:41    [W:0.530 / U:0.660 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site