lkml.org 
[lkml]   [2012]   [Aug]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[ 038/122] x86/mce: Fix siginfo_t->si_addr value for non-recoverable memory faults
    Date
    From: Greg KH <gregkh@linuxfoundation.org>

    3.5-stable review patch. If anyone has any objections, please let me know.

    ------------------

    From: Tony Luck <tony.luck@intel.com>

    commit 6751ed65dc6642af64f7b8a440a75563c8aab7ae upstream.

    In commit dad1743e5993f1 ("x86/mce: Only restart instruction after machine
    check recovery if it is safe") we fixed mce_notify_process() to force a
    signal to the current process if it was not restartable (RIPV bit not
    set in MCG_STATUS). But doing it here means that the process doesn't
    get told the virtual address of the fault via siginfo_t->si_addr. This
    would prevent application level recovery from the fault.

    Make a new MF_MUST_KILL flag bit for memory_failure() et al. to use so
    that we will provide the right information with the signal.

    Signed-off-by: Tony Luck <tony.luck@intel.com>
    Acked-by: Borislav Petkov <borislav.petkov@amd.com>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

    ---
    arch/x86/kernel/cpu/mcheck/mce.c | 6 ++++--
    include/linux/mm.h | 1 +
    mm/memory-failure.c | 14 ++++++++------
    3 files changed, 13 insertions(+), 8 deletions(-)

    --- a/arch/x86/kernel/cpu/mcheck/mce.c
    +++ b/arch/x86/kernel/cpu/mcheck/mce.c
    @@ -1186,6 +1186,7 @@ void mce_notify_process(void)
    {
    unsigned long pfn;
    struct mce_info *mi = mce_find_info();
    + int flags = MF_ACTION_REQUIRED;

    if (!mi)
    mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL);
    @@ -1200,8 +1201,9 @@ void mce_notify_process(void)
    * doomed. We still need to mark the page as poisoned and alert any
    * other users of the page.
    */
    - if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 ||
    - mi->restartable == 0) {
    + if (!mi->restartable)
    + flags |= MF_MUST_KILL;
    + if (memory_failure(pfn, MCE_VECTOR, flags) < 0) {
    pr_err("Memory error not recovered");
    force_sig(SIGBUS, current);
    }
    --- a/include/linux/mm.h
    +++ b/include/linux/mm.h
    @@ -1591,6 +1591,7 @@ void vmemmap_populate_print_last(void);
    enum mf_flags {
    MF_COUNT_INCREASED = 1 << 0,
    MF_ACTION_REQUIRED = 1 << 1,
    + MF_MUST_KILL = 1 << 2,
    };
    extern int memory_failure(unsigned long pfn, int trapno, int flags);
    extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
    --- a/mm/memory-failure.c
    +++ b/mm/memory-failure.c
    @@ -345,14 +345,14 @@ static void add_to_kill(struct task_stru
    * Also when FAIL is set do a force kill because something went
    * wrong earlier.
    */
    -static void kill_procs(struct list_head *to_kill, int doit, int trapno,
    +static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
    int fail, struct page *page, unsigned long pfn,
    int flags)
    {
    struct to_kill *tk, *next;

    list_for_each_entry_safe (tk, next, to_kill, nd) {
    - if (doit) {
    + if (forcekill) {
    /*
    * In case something went wrong with munmapping
    * make sure the process doesn't catch the
    @@ -858,7 +858,7 @@ static int hwpoison_user_mappings(struct
    struct address_space *mapping;
    LIST_HEAD(tokill);
    int ret;
    - int kill = 1;
    + int kill = 1, forcekill;
    struct page *hpage = compound_head(p);
    struct page *ppage;

    @@ -888,7 +888,7 @@ static int hwpoison_user_mappings(struct
    * be called inside page lock (it's recommended but not enforced).
    */
    mapping = page_mapping(hpage);
    - if (!PageDirty(hpage) && mapping &&
    + if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
    mapping_cap_writeback_dirty(mapping)) {
    if (page_mkclean(hpage)) {
    SetPageDirty(hpage);
    @@ -965,12 +965,14 @@ static int hwpoison_user_mappings(struct
    * Now that the dirty bit has been propagated to the
    * struct page and all unmaps done we can decide if
    * killing is needed or not. Only kill when the page
    - * was dirty, otherwise the tokill list is merely
    + * was dirty or the process is not restartable,
    + * otherwise the tokill list is merely
    * freed. When there was a problem unmapping earlier
    * use a more force-full uncatchable kill to prevent
    * any accesses to the poisoned memory.
    */
    - kill_procs(&tokill, !!PageDirty(ppage), trapno,
    + forcekill = PageDirty(ppage) || (flags & MF_MUST_KILL);
    + kill_procs(&tokill, forcekill, trapno,
    ret != SWAP_SUCCESS, p, pfn, flags);

    return ret;



    \
     
     \ /
      Last update: 2012-08-08 01:21    [W:6.250 / U:0.040 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site