lkml.org 
[lkml]   [2016]   [Feb]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Date
Subject[PATCH v7 06/10] ppc64 ftrace: disable profiling for some functions
At least POWER7/8 have MMUs that don't completely autoload;
a normal, recoverable memory fault might pass through these functions.
If a dynamic tracer function causes such a fault, any of these functions
being traced with -mprofile-kernel may cause an endless recursion.

Signed-off-by: Torsten Duwe <duwe@suse.de>
---
arch/powerpc/kernel/process.c | 2 +-
arch/powerpc/mm/fault.c | 2 +-
arch/powerpc/mm/hash_utils_64.c | 18 +++++++++---------
arch/powerpc/mm/hugetlbpage-hash64.c | 2 +-
arch/powerpc/mm/hugetlbpage.c | 4 ++--
arch/powerpc/mm/mem.c | 2 +-
arch/powerpc/mm/pgtable_64.c | 2 +-
arch/powerpc/mm/slb.c | 6 +++---
arch/powerpc/mm/slice.c | 8 ++++----
9 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index dccc87e..5e72e8b 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -822,7 +822,7 @@ static inline void __switch_to_tm(struct task_struct *prev)
* don't know which of the checkpointed state and the transactional
* state to use.
*/
-void restore_tm_state(struct pt_regs *regs)
+notrace void restore_tm_state(struct pt_regs *regs)
{
unsigned long msr_diff;

diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index a67c6d7..125be37 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -205,7 +205,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
-int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+notrace int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
enum ctx_state prev_state = exception_enter();
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index ba59d59..01d3dee 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -825,7 +825,7 @@ void early_init_mmu_secondary(void)
/*
* Called by asm hashtable.S for doing lazy icache flush
*/
-unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
+notrace unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
{
struct page *page;

@@ -846,7 +846,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
}

#ifdef CONFIG_PPC_MM_SLICES
-static unsigned int get_paca_psize(unsigned long addr)
+static notrace unsigned int get_paca_psize(unsigned long addr)
{
u64 lpsizes;
unsigned char *hpsizes;
@@ -875,7 +875,7 @@ unsigned int get_paca_psize(unsigned long addr)
* For now this makes the whole process use 4k pages.
*/
#ifdef CONFIG_PPC_64K_PAGES
-void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
+notrace void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
{
if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
return;
@@ -897,7 +897,7 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
* Result is 0: full permissions, _PAGE_RW: read-only,
* _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
*/
-static int subpage_protection(struct mm_struct *mm, unsigned long ea)
+static notrace int subpage_protection(struct mm_struct *mm, unsigned long ea)
{
struct subpage_prot_table *spt = &mm->context.spt;
u32 spp = 0;
@@ -945,7 +945,7 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
trap, vsid, ssize, psize, lpsize, pte);
}

-static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
+static notrace void check_paca_psize(unsigned long ea, struct mm_struct *mm,
int psize, bool user_region)
{
if (user_region) {
@@ -967,7 +967,7 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
* -1 - critical hash insertion error
* -2 - access not permitted by subpage protection mechanism
*/
-int hash_page_mm(struct mm_struct *mm, unsigned long ea,
+notrace int hash_page_mm(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap,
unsigned long flags)
{
@@ -1165,7 +1165,7 @@ bail:
}
EXPORT_SYMBOL_GPL(hash_page_mm);

-int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
+notrace int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
unsigned long dsisr)
{
unsigned long flags = 0;
@@ -1296,7 +1296,7 @@ out_exit:
/* WARNING: This is called from hash_low_64.S, if you change this prototype,
* do not forget to update the assembly call site !
*/
-void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
+notrace void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
unsigned long flags)
{
unsigned long hash, index, shift, hidx, slot;
@@ -1444,7 +1444,7 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
exception_exit(prev_state);
}

-long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
+notrace long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
unsigned long pa, unsigned long rflags,
unsigned long vflags, int psize, int ssize)
{
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index e2138c7..17fc139 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -18,7 +18,7 @@ extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
unsigned long pa, unsigned long rlags,
unsigned long vflags, int psize, int ssize);

-int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
+notrace int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, unsigned long flags,
int ssize, unsigned int shift, unsigned int mmu_psize)
{
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 744e24b..70dda66 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -870,7 +870,7 @@ static int __init hugetlbpage_init(void)
#endif
arch_initcall(hugetlbpage_init);

-void flush_dcache_icache_hugepage(struct page *page)
+notrace void flush_dcache_icache_hugepage(struct page *page)
{
int i;
void *start;
@@ -903,7 +903,7 @@ void flush_dcache_icache_hugepage(struct page *page)
* when we have MSR[EE] = 0 but the paca->soft_enabled = 1
*/

-pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
+notrace pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
bool *is_thp, unsigned *shift)
{
pgd_t pgd, *pgdp;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 22d94c3..f690e8a 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -406,7 +406,7 @@ void flush_dcache_page(struct page *page)
}
EXPORT_SYMBOL(flush_dcache_page);

-void flush_dcache_icache_page(struct page *page)
+notrace void flush_dcache_icache_page(struct page *page)
{
#ifdef CONFIG_HUGETLB_PAGE
if (PageCompound(page)) {
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 3124a20..bb9041b 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -442,7 +442,7 @@ static void page_table_free_rcu(void *table)
}
}

-void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+notrace void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
unsigned long pgf = (unsigned long)table;

diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 825b687..852bd54 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -96,7 +96,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
: "memory" );
}

-static void __slb_flush_and_rebolt(void)
+static notrace void __slb_flush_and_rebolt(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
* and PR KVM appropriately too. */
@@ -136,7 +136,7 @@ static void __slb_flush_and_rebolt(void)
: "memory");
}

-void slb_flush_and_rebolt(void)
+notrace void slb_flush_and_rebolt(void)
{

WARN_ON(!irqs_disabled());
@@ -151,7 +151,7 @@ void slb_flush_and_rebolt(void)
get_paca()->slb_cache_ptr = 0;
}

-void slb_vmalloc_update(void)
+notrace void slb_vmalloc_update(void)
{
unsigned long vflags;

diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 42954f0..5fb0e5b 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -76,8 +76,8 @@ static void slice_print_mask(const char *label, struct slice_mask mask) {}

#endif

-static struct slice_mask slice_range_to_mask(unsigned long start,
- unsigned long len)
+static notrace struct slice_mask slice_range_to_mask(unsigned long start,
+ unsigned long len)
{
unsigned long end = start + len - 1;
struct slice_mask ret = { 0, 0 };
@@ -563,7 +563,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
current->mm->context.user_psize, 1);
}

-unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
+notrace unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
{
unsigned char *hpsizes;
int index, mask_index;
@@ -644,7 +644,7 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
spin_unlock_irqrestore(&slice_convert_lock, flags);
}

-void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
+notrace void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize)
{
struct slice_mask mask = slice_range_to_mask(start, len);
--
1.8.5.6
\
 
 \ /
  Last update: 2016-02-04 17:41    [W:0.162 / U:9.448 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site