lkml.org 
[lkml]   [2020]   [Mar]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 07/12] task_isolation: arch/arm: enable task isolation functionality
Date
From: Francis Giraldeau <francis.giraldeau@gmail.com>

This patch is a port of the task isolation functionality to the arm 32-bit
architecture. The task isolation needs an additional thread flag that
requires to change the entry assembly code to accept a bitfield larger than
one byte. The constants _TIF_SYSCALL_WORK and _TIF_WORK_MASK are now
defined in the literal pool. The rest of the patch is straightforward and
reflects what is done on other architectures.

To avoid problems with the tst instruction in the v7m build, we renumber
TIF_SECCOMP to bit 8 and let TIF_TASK_ISOLATION use bit 7.

Signed-off-by: Alex Belits <abelits@marvell.com>
---
arch/arm/Kconfig | 1 +
arch/arm/include/asm/thread_info.h | 10 +++++++---
arch/arm/kernel/entry-common.S | 15 ++++++++++-----
arch/arm/kernel/signal.c | 10 +++++++++-
arch/arm/kernel/smp.c | 4 ++++
arch/arm/mm/fault.c | 8 +++++++-
6 files changed, 38 insertions(+), 10 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 97864aabc2a6..1a66e6c6807c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -67,6 +67,7 @@ config ARM
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT
+ select HAVE_ARCH_TASK_ISOLATION
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARM_SMCCC if CPU_V7
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 0d0d5178e2c3..ec3c2084c391 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -139,7 +139,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
-#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
+#define TIF_TASK_ISOLATION 7 /* task isolation active */
+#define TIF_SECCOMP 8 /* seccomp syscall filtering active */

#define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17
@@ -153,18 +154,21 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_TASK_ISOLATION (1 << TIF_TASK_ISOLATION)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)

/* Checks for any syscall work in entry-common.S */
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
+ _TIF_TASK_ISOLATION)

/*
* Change these and you break ASM code in entry-common.S
*/
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_TASK_ISOLATION)

#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 271cb8a1eba1..6ceb5cb808a9 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -53,7 +53,8 @@ __ret_fast_syscall:
cmp r2, #TASK_SIZE
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ ldr r2, =_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ tst r1, r2
bne fast_work_pending


@@ -90,7 +91,8 @@ __ret_fast_syscall:
cmp r2, #TASK_SIZE
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ ldr r2, =_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ tst r1, r2
beq no_work_pending
UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
@@ -98,7 +100,8 @@ ENDPROC(ret_fast_syscall)
/* Slower path - fall through to work_pending */
#endif

- tst r1, #_TIF_SYSCALL_WORK
+ ldr r2, =_TIF_SYSCALL_WORK
+ tst r1, r2
bne __sys_trace_return_nosave
slow_work_pending:
mov r0, sp @ 'regs'
@@ -131,7 +134,8 @@ ENTRY(ret_to_user_from_irq)
cmp r2, #TASK_SIZE
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS]
- tst r1, #_TIF_WORK_MASK
+ ldr r2, =_TIF_WORK_MASK
+ tst r1, r2
bne slow_work_pending
no_work_pending:
asm_trace_hardirqs_on save = 0
@@ -251,7 +255,8 @@ local_restart:
ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
stmdb sp!, {r4, r5} @ push fifth and sixth args

- tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
+ ldr r11, =_TIF_SYSCALL_WORK @ are we tracing syscalls?
+ tst r10, r11
bne __sys_trace

invoke_syscall tbl, scno, r10, __ret_fast_syscall
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index ab2568996ddb..f775cbcb7487 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -12,6 +12,7 @@
#include <linux/tracehook.h>
#include <linux/uprobes.h>
#include <linux/syscalls.h>
+#include <linux/isolation.h>

#include <asm/elf.h>
#include <asm/cacheflush.h>
@@ -639,6 +640,9 @@ static int do_signal(struct pt_regs *regs, int syscall)
return 0;
}

+#define WORK_PENDING_LOOP_FLAGS (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE)
+
asmlinkage int
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
@@ -676,7 +680,11 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
}
local_irq_disable();
thread_flags = current_thread_info()->flags;
- } while (thread_flags & _TIF_WORK_MASK);
+ } while (thread_flags & WORK_PENDING_LOOP_FLAGS);
+
+ if (thread_flags & _TIF_TASK_ISOLATION)
+ task_isolation_start();
+
return 0;
}

diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 46e1be9e57a8..95f19b980776 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -26,6 +26,7 @@
#include <linux/completion.h>
#include <linux/cpufreq.h>
#include <linux/irq_work.h>
+#include <linux/isolation.h>

#include <linux/atomic.h>
#include <asm/bugs.h>
@@ -560,6 +561,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)

void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
+ task_isolation_remote_cpumask(mask, "wakeup IPI");
smp_cross_call(mask, IPI_WAKEUP);
}

@@ -579,6 +581,7 @@ void arch_irq_work_raise(void)
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
+ task_isolation_remote_cpumask(mask, "timer IPI");
smp_cross_call(mask, IPI_TIMER);
}
#endif
@@ -702,6 +705,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)

void smp_send_reschedule(int cpu)
{
+ task_isolation_remote(cpu, "reschedule IPI");
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}

diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index bd0f4821f7e1..acd11a69c4e4 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -17,6 +17,7 @@
#include <linux/sched/debug.h>
#include <linux/highmem.h>
#include <linux/perf_event.h>
+#include <linux/isolation.h>

#include <asm/pgtable.h>
#include <asm/system_misc.h>
@@ -332,8 +333,13 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
/*
* Handle the "normal" case first - VM_FAULT_MAJOR
*/
- if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
+ if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
+ VM_FAULT_BADACCESS)))) {
+ /* No signal was generated, but notify task-isolation tasks. */
+ if (user_mode(regs))
+ task_isolation_interrupt("page fault at %#lx", addr);
return 0;
+ }

/*
* If we are in kernel mode at this point, we
--
2.20.1
\
 
 \ /
  Last update: 2020-03-04 17:13    [W:0.547 / U:0.044 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site