lkml.org 
[lkml]   [2018]   [May]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: Clang patch stacks for LTS kernels (v4.4 and v4.9) and status update
Just for the records...

[ OBJDUMP irq_work_tick() ]

$ objdump -d -S --start-address=0x$(grep irq_work_tick System.map |
sed -e "s/ \+.*//") vmlinux | less

[ OBJDUMP native_save_fl() ]

$ objdump -d -S --start-address=0x$(grep native_save_fl System.map |
sed -e "s/ \+.*//") vmlinux | less

- sedat -
[ OBJDUMP irq_work_tick() ]

$ objdump -d -S --start-address=0x$(grep irq_work_tick System.map | sed -e "s/ \+.*//") vmlinux | less

vmlinux: file format elf64-x86-64


Disassembly of section .text:

ffffffff811982f0 <irq_work_tick>:
irq_work_run_list(this_cpu_ptr(&lazy_list));
}
EXPORT_SYMBOL_GPL(irq_work_run);

void irq_work_tick(void)
{
ffffffff811982f0: 41 57 push %r15
ffffffff811982f2: 41 56 push %r14
ffffffff811982f4: 41 54 push %r12
ffffffff811982f6: 53 push %rbx
struct llist_head *raised = this_cpu_ptr(&raised_list);
ffffffff811982f7: 48 c7 c1 80 e5 01 00 mov $0x1e580,%rcx
ffffffff811982fe: 65 48 03 0c 25 d8 f1 add %gs:0xf1d8,%rcx
ffffffff81198305: 00 00
* test whether the list is empty without deleting something from the
* list.
*/
static inline bool llist_empty(const struct llist_head *head)
{
return ACCESS_ONCE(head->first) == NULL;
ffffffff81198307: 48 83 39 00 cmpq $0x0,(%rcx)

if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
ffffffff8119830b: 74 6a je ffffffff81198377 <irq_work_tick+0x87>
}

static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
{
return ((1UL << (nr & (BITS_PER_LONG-1))) &
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
ffffffff8119830d: 48 8b 04 25 44 3c 0f mov 0xffffffff820f3c44,%rax
ffffffff81198314: 82
ffffffff81198315: a9 00 02 00 00 test $0x200,%eax
ffffffff8119831a: 75 5b jne ffffffff81198377 <irq_work_tick+0x87>
#define __PV_IS_CALLEE_SAVE(func) \
((struct paravirt_callee_save) { func })

static inline notrace unsigned long arch_local_save_flags(void)
{
return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
ffffffff8119831c: ff 14 25 d0 28 02 82 callq *0xffffffff820228d0
BUG_ON(!irqs_disabled());
ffffffff81198323: a9 00 02 00 00 test $0x200,%eax
ffffffff81198328: 0f 85 d4 00 00 00 jne ffffffff81198402 <irq_work_tick+0x112>
ffffffff8119832e: 48 83 39 00 cmpq $0x0,(%rcx)
if (llist_empty(list))
ffffffff81198332: 74 43 je ffffffff81198377 <irq_work_tick+0x87>
ffffffff81198334: 31 db xor %ebx,%ebx
* return the pointer to the first entry. The order of entries
* deleted is from the newest to the oldest added one.
*/
static inline struct llist_node *llist_del_all(struct llist_head *head)
{
return xchg(&head->first, NULL);
ffffffff81198336: 48 87 19 xchg %rbx,(%rcx)
while (llnode != NULL) {
ffffffff81198339: 48 85 db test %rbx,%rbx
ffffffff8119833c: 74 39 je ffffffff81198377 <irq_work_tick+0x87>
ffffffff8119833e: 66 90 xchg %ax,%ax
work = llist_entry(llnode, struct irq_work, llnode);
ffffffff81198340: 48 8d 7b f8 lea -0x8(%rbx),%rdi
flags = work->flags & ~IRQ_WORK_PENDING;
ffffffff81198344: 4c 8b 7b f8 mov -0x8(%rbx),%r15
return node->next;
ffffffff81198348: 4c 8b 23 mov (%rbx),%r12
ffffffff8119834b: 4d 89 fe mov %r15,%r14
ffffffff8119834e: 49 83 e6 fe and $0xfffffffffffffffe,%r14
xchg(&work->flags, flags);
ffffffff81198352: 4c 89 f0 mov %r14,%rax
ffffffff81198355: 48 87 43 f8 xchg %rax,-0x8(%rbx)
work->func(work);
ffffffff81198359: 4c 8b 5b 08 mov 0x8(%rbx),%r11
ffffffff8119835d: e8 de ad 86 00 callq ffffffff81a03140 <__x86_indirect_thunk_r11>
(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
ffffffff81198362: 49 83 e7 fc and $0xfffffffffffffffc,%r15
ffffffff81198366: 4c 89 f0 mov %r14,%rax
ffffffff81198369: f0 4c 0f b1 7b f8 lock cmpxchg %r15,-0x8(%rbx)
ffffffff8119836f: 4c 89 e3 mov %r12,%rbx
while (llnode != NULL) {
ffffffff81198372: 4d 85 e4 test %r12,%r12
ffffffff81198375: 75 c9 jne ffffffff81198340 <irq_work_tick+0x50>
irq_work_run_list(raised);
irq_work_run_list(this_cpu_ptr(&lazy_list));
ffffffff81198377: 48 c7 c1 88 e5 01 00 mov $0x1e588,%rcx
ffffffff8119837e: 65 48 03 0c 25 d8 f1 add %gs:0xf1d8,%rcx
ffffffff81198385: 00 00
ffffffff81198387: ff 14 25 d0 28 02 82 callq *0xffffffff820228d0
BUG_ON(!irqs_disabled());
ffffffff8119838e: a9 00 02 00 00 test $0x200,%eax
ffffffff81198393: 75 5a jne ffffffff811983ef <irq_work_tick+0xff>
return ACCESS_ONCE(head->first) == NULL;
ffffffff81198395: 48 83 39 00 cmpq $0x0,(%rcx)
if (llist_empty(list))
ffffffff81198399: 74 4c je ffffffff811983e7 <irq_work_tick+0xf7>
ffffffff8119839b: 31 db xor %ebx,%ebx
return xchg(&head->first, NULL);
ffffffff8119839d: 48 87 19 xchg %rbx,(%rcx)
while (llnode != NULL) {
ffffffff811983a0: 48 85 db test %rbx,%rbx
ffffffff811983a3: 74 42 je ffffffff811983e7 <irq_work_tick+0xf7>
ffffffff811983a5: 90 nop
ffffffff811983a6: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1)
ffffffff811983ad: 00 00 00
work = llist_entry(llnode, struct irq_work, llnode);
ffffffff811983b0: 48 8d 7b f8 lea -0x8(%rbx),%rdi
flags = work->flags & ~IRQ_WORK_PENDING;
ffffffff811983b4: 4c 8b 7b f8 mov -0x8(%rbx),%r15
return node->next;
ffffffff811983b8: 4c 8b 23 mov (%rbx),%r12
ffffffff811983bb: 4d 89 fe mov %r15,%r14
ffffffff811983be: 49 83 e6 fe and $0xfffffffffffffffe,%r14
xchg(&work->flags, flags);
ffffffff811983c2: 4c 89 f0 mov %r14,%rax
ffffffff811983c5: 48 87 43 f8 xchg %rax,-0x8(%rbx)
work->func(work);
ffffffff811983c9: 4c 8b 5b 08 mov 0x8(%rbx),%r11
ffffffff811983cd: e8 6e ad 86 00 callq ffffffff81a03140 <__x86_indirect_thunk_r11>
(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
ffffffff811983d2: 49 83 e7 fc and $0xfffffffffffffffc,%r15
ffffffff811983d6: 4c 89 f0 mov %r14,%rax
ffffffff811983d9: f0 4c 0f b1 7b f8 lock cmpxchg %r15,-0x8(%rbx)
ffffffff811983df: 4c 89 e3 mov %r12,%rbx
while (llnode != NULL) {
ffffffff811983e2: 4d 85 e4 test %r12,%r12
ffffffff811983e5: 75 c9 jne ffffffff811983b0 <irq_work_tick+0xc0>
}
ffffffff811983e7: 5b pop %rbx
ffffffff811983e8: 41 5c pop %r12
ffffffff811983ea: 41 5e pop %r14
ffffffff811983ec: 41 5f pop %r15
ffffffff811983ee: c3 retq
ffffffff811983ef: 0f 0b ud2
BUG_ON(!irqs_disabled());
ffffffff811983f1: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
ffffffff811983f6: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1)
ffffffff811983fd: 00 00 00
ffffffff81198400: eb fe jmp ffffffff81198400 <irq_work_tick+0x110>
ffffffff81198402: 0f 0b ud2
ffffffff81198404: 66 90 xchg %ax,%ax
ffffffff81198406: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1)
ffffffff8119840d: 00 00 00
ffffffff81198410: eb fe jmp ffffffff81198410 <irq_work_tick+0x120>
ffffffff81198412: 0f 1f 40 00 nopl 0x0(%rax)
ffffffff81198416: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1)
ffffffff8119841d: 00 00 00

ffffffff81198420 <irq_work_sync>:
...
[ OBJDUMP native_save_fl() ]

$ objdump -d -S --start-address=0x$(grep native_save_fl System.map | sed -e "s/ \+.*//") vmlinux | less

vmlinux: file format elf64-x86-64


Disassembly of section .text:

ffffffff810633a0 <native_save_fl>:
/*
* Interrupt control:
*/

static inline __nostackprotector unsigned long native_save_fl(void)
{
ffffffff810633a0: 55 push %rbp
ffffffff810633a1: 48 89 e5 mov %rsp,%rbp
ffffffff810633a4: 50 push %rax
/*
* "=rm" is safe here, because "pop" adjusts the stack before
* it evaluates its effective address -- this is part of the
* documented behavior of the "pop" instruction.
*/
asm volatile("# __raw_save_flags\n\t"
ffffffff810633a5: 9c pushfq
ffffffff810633a6: 8f 45 f8 popq -0x8(%rbp)
"pushf ; pop %0"
: "=rm" (flags)
: /* no input */
: "memory");

return flags;
ffffffff810633a9: 48 8b 45 f8 mov -0x8(%rbp),%rax
ffffffff810633ad: 48 83 c4 08 add $0x8,%rsp
ffffffff810633b1: 5d pop %rbp
ffffffff810633b2: c3 retq
ffffffff810633b3: 0f 1f 00 nopl (%rax)
ffffffff810633b6: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1)
ffffffff810633bd: 00 00 00

ffffffff810633c0 <native_restore_fl>:
}

static inline void native_restore_fl(unsigned long flags)
{
...

\
 
 \ /
  Last update: 2018-05-22 16:55    [W:0.062 / U:3.380 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site