lkml.org 
[lkml]   [2018]   [Nov]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH AUTOSEL 4.19 060/146] kprobes: Return error if we fail to reuse kprobe instead of BUG_ON()
    Date
    From: Masami Hiramatsu <mhiramat@kernel.org>

    [ Upstream commit 819319fc93461c07b9cdb3064f154bd8cfd48172 ]

    Make reuse_unused_kprobe() to return error code if
    it fails to reuse unused kprobe for optprobe instead
    of calling BUG_ON().

    Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
    Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
    Cc: David S . Miller <davem@davemloft.net>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Naveen N . Rao <naveen.n.rao@linux.vnet.ibm.com>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Thomas Gleixner <tglx@linutronix.de>
    Link: http://lkml.kernel.org/r/153666124040.21306.14150398706331307654.stgit@devbox
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    kernel/kprobes.c | 27 ++++++++++++++++++++-------
    1 file changed, 20 insertions(+), 7 deletions(-)

    diff --git a/kernel/kprobes.c b/kernel/kprobes.c
    index ab257be4d924..4344381664cc 100644
    --- a/kernel/kprobes.c
    +++ b/kernel/kprobes.c
    @@ -700,9 +700,10 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
    }

    /* Cancel unoptimizing for reusing */
    -static void reuse_unused_kprobe(struct kprobe *ap)
    +static int reuse_unused_kprobe(struct kprobe *ap)
    {
    struct optimized_kprobe *op;
    + int ret;

    BUG_ON(!kprobe_unused(ap));
    /*
    @@ -714,8 +715,12 @@ static void reuse_unused_kprobe(struct kprobe *ap)
    /* Enable the probe again */
    ap->flags &= ~KPROBE_FLAG_DISABLED;
    /* Optimize it again (remove from op->list) */
    - BUG_ON(!kprobe_optready(ap));
    + ret = kprobe_optready(ap);
    + if (ret)
    + return ret;
    +
    optimize_kprobe(ap);
    + return 0;
    }

    /* Remove optimized instructions */
    @@ -940,11 +945,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
    #define kprobe_disarmed(p) kprobe_disabled(p)
    #define wait_for_kprobe_optimizer() do {} while (0)

    -/* There should be no unused kprobes can be reused without optimization */
    -static void reuse_unused_kprobe(struct kprobe *ap)
    +static int reuse_unused_kprobe(struct kprobe *ap)
    {
    + /*
    + * If the optimized kprobe is NOT supported, the aggr kprobe is
    + * released at the same time that the last aggregated kprobe is
    + * unregistered.
    + * Thus there should be no chance to reuse unused kprobe.
    + */
    printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
    - BUG_ON(kprobe_unused(ap));
    + return -EINVAL;
    }

    static void free_aggr_kprobe(struct kprobe *p)
    @@ -1318,9 +1328,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
    goto out;
    }
    init_aggr_kprobe(ap, orig_p);
    - } else if (kprobe_unused(ap))
    + } else if (kprobe_unused(ap)) {
    /* This probe is going to die. Rescue it */
    - reuse_unused_kprobe(ap);
    + ret = reuse_unused_kprobe(ap);
    + if (ret)
    + goto out;
    + }

    if (kprobe_gone(ap)) {
    /*
    --
    2.17.1
    \
     
     \ /
      Last update: 2018-11-01 00:50    [W:4.048 / U:0.596 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site