lkml.org 
[lkml]   [2015]   [Nov]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Subject[lkp] [hperf_hmp] 9b7aaf11b8: -55.6% hackbench.throughput
Date
FYI, we noticed the below changes on

https://github.com/0day-ci/linux Arseniy-Krasnov/High-performance-balancing-logic-for-big-LITTLE/20151106-200901
commit 9b7aaf11b8d61eeb87f3b99fb5ae59e61bb35f27 ("hperf_hmp: task CPU selection logic.")


=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/testtime/test:
ivb43/aim9/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/300s/fork_test

commit:
1fda6948fbb2e756c8f7cabef40395cee2984298
9b7aaf11b8d61eeb87f3b99fb5ae59e61bb35f27

1fda6948fbb2e756 9b7aaf11b8d61eeb87f3b99fb5
---------------- --------------------------
%stddev %change %stddev
\ | \
3629 ± 0% +79.8% 6527 ± 0% aim9.fork_test.ops_per_sec
2397 ± 0% +3.8% 2488 ± 2% aim9.time.maximum_resident_set_size
36751387 ± 0% +79.8% 66094007 ± 0% aim9.time.minor_page_faults
106.00 ± 0% -1.9% 104.00 ± 0% aim9.time.percent_of_cpu_this_job_got
2177097 ± 0% +79.7% 3912847 ± 0% aim9.time.voluntary_context_switches
384576 ± 2% -19.8% 308345 ± 1% softirqs.TIMER
18717 ± 0% +55.2% 29047 ± 0% vmstat.system.cs
1992 ± 0% +23.0% 2450 ± 1% vmstat.system.in
0.00 ± -1% +Inf% 4667955 ±159% latency_stats.avg.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
0.00 ± -1% +Inf% 12289867 ±167% latency_stats.max.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
0.00 ± -1% +Inf% 13417616 ±168% latency_stats.sum.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
36751387 ± 0% +79.8% 66094007 ± 0% time.minor_page_faults
7.79 ± 1% -29.6% 5.49 ± 1% time.user_time
2177097 ± 0% +79.7% 3912847 ± 0% time.voluntary_context_switches
18182069 ± 2% +105.0% 37282110 ± 20% numa-numastat.node0.local_node
18185182 ± 2% +105.0% 37283692 ± 20% numa-numastat.node0.numa_hit
17845226 ± 1% +47.1% 26257212 ± 29% numa-numastat.node1.local_node
17848334 ± 1% +47.1% 26261870 ± 29% numa-numastat.node1.numa_hit
9231389 ± 2% +103.2% 18760144 ± 20% numa-vmstat.node0.numa_hit
9194586 ± 2% +104.0% 18756465 ± 20% numa-vmstat.node0.numa_local
9020640 ± 1% +46.1% 13177749 ± 29% numa-vmstat.node1.numa_hit
8970377 ± 1% +46.0% 13094337 ± 29% numa-vmstat.node1.numa_local
50263 ± 57% +65.9% 83410 ± 2% numa-vmstat.node1.numa_other
187073 ± 0% +27.5% 238459 ± 0% meminfo.Active
37344 ± 0% +29.7% 48445 ± 0% meminfo.Active(anon)
149728 ± 0% +26.9% 190013 ± 0% meminfo.Active(file)
26246 ± 1% +19.4% 31326 ± 1% meminfo.AnonPages
124178 ± 1% +42.7% 177141 ± 2% meminfo.Committed_AS
16449 ± 3% -7.1% 15283 ± 4% meminfo.KernelStack
23552 ± 0% +19.4% 28124 ± 0% meminfo.Shmem
1.94 ± 0% +25.8% 2.44 ± 1% turbostat.%Busy
62.00 ± 0% +19.4% 74.00 ± 1% turbostat.Avg_MHz
19.19 ± 1% +237.2% 64.70 ± 1% turbostat.CPU%c1
2.52 ± 14% -96.4% 0.09 ± 20% turbostat.CPU%c3
76.35 ± 0% -57.1% 32.77 ± 2% turbostat.CPU%c6
71.02 ± 0% +7.0% 76.01 ± 0% turbostat.CorWatt
1.02 ± 6% -92.9% 0.07 ± 5% turbostat.Pkg%pc2
101.32 ± 0% +5.2% 106.59 ± 0% turbostat.PkgWatt
5.393e+08 ± 4% +1110.3% 6.527e+09 ± 1% cpuidle.C1-IVT.time
169428 ± 5% +533.0% 1072450 ± 2% cpuidle.C1-IVT.usage
61798058 ± 8% +390.4% 3.03e+08 ± 2% cpuidle.C1E-IVT.time
21844 ± 7% +5834.4% 1296314 ± 9% cpuidle.C1E-IVT.usage
2.426e+08 ± 13% -85.1% 36261657 ± 8% cpuidle.C3-IVT.time
54288 ± 6% +103.7% 110586 ± 25% cpuidle.C3-IVT.usage
1.332e+10 ± 0% -45.7% 7.23e+09 ± 1% cpuidle.C6-IVT.time
2561092 ± 0% -25.8% 1899960 ± 3% cpuidle.C6-IVT.usage
3703941 ± 16% +163.2% 9749344 ± 49% cpuidle.POLL.time
322.50 ± 9% +264.6% 1175 ± 20% cpuidle.POLL.usage
9333 ± 0% +29.9% 12124 ± 0% proc-vmstat.nr_active_anon
37430 ± 0% +26.9% 47500 ± 0% proc-vmstat.nr_active_file
6561 ± 1% +19.5% 7843 ± 1% proc-vmstat.nr_anon_pages
1028 ± 3% -8.0% 945.75 ± 4% proc-vmstat.nr_kernel_stack
5887 ± 0% +19.4% 7030 ± 0% proc-vmstat.nr_shmem
36012779 ± 1% +76.4% 63517057 ± 2% proc-vmstat.numa_hit
36006558 ± 1% +76.4% 63510817 ± 2% proc-vmstat.numa_local
29705 ± 0% +71.7% 51008 ± 0% proc-vmstat.pgactivate
2594260 ± 2% +112.4% 5508951 ± 25% proc-vmstat.pgalloc_dma32
37064067 ± 1% +76.3% 65328590 ± 2% proc-vmstat.pgalloc_normal
37365693 ± 0% +78.4% 66653865 ± 0% proc-vmstat.pgfault
39631131 ± 1% +78.6% 70787743 ± 1% proc-vmstat.pgfree
24355 ± 2% -21.4% 19143 ± 13% slabinfo.kmalloc-192.active_objs
24503 ± 2% -21.0% 19368 ± 13% slabinfo.kmalloc-192.num_objs
46778 ± 1% +50.5% 70406 ± 13% slabinfo.kmalloc-32.active_objs
364.75 ± 1% +51.6% 553.00 ± 13% slabinfo.kmalloc-32.active_slabs
46778 ± 1% +51.5% 70858 ± 13% slabinfo.kmalloc-32.num_objs
364.75 ± 1% +51.6% 553.00 ± 13% slabinfo.kmalloc-32.num_slabs
65810 ± 0% +18.6% 78036 ± 0% slabinfo.kmalloc-64.active_objs
1031 ± 0% +18.6% 1223 ± 0% slabinfo.kmalloc-64.active_slabs
66055 ± 0% +18.6% 78325 ± 0% slabinfo.kmalloc-64.num_objs
1031 ± 0% +18.6% 1223 ± 0% slabinfo.kmalloc-64.num_slabs
471.75 ± 4% -27.0% 344.25 ± 12% slabinfo.kmem_cache.active_objs
471.75 ± 4% -27.0% 344.25 ± 12% slabinfo.kmem_cache.num_objs
2628 ± 0% +32.3% 3479 ± 1% slabinfo.mm_struct.active_objs
2709 ± 0% +31.0% 3549 ± 2% slabinfo.mm_struct.num_objs
5500 ± 4% -28.6% 3929 ± 11% slabinfo.signal_cache.active_objs
5656 ± 4% -28.3% 4055 ± 12% slabinfo.signal_cache.num_objs
21111 ± 3% -20.9% 16701 ± 3% slabinfo.vm_area_struct.active_objs
21189 ± 3% -21.2% 16701 ± 3% slabinfo.vm_area_struct.num_objs
1.30 ± 1% +60.3% 2.08 ± 6% perf-profile.cycles-pp.__alloc_pages_nodemask.alloc_pages_current.pte_alloc_one.__pte_alloc.copy_page_range
3.54 ± 5% -15.4% 3.00 ± 0% perf-profile.cycles-pp.__do_page_fault.do_page_fault.page_fault
0.72 ± 4% +55.2% 1.12 ± 9% perf-profile.cycles-pp.__pmd_alloc.copy_page_range.copy_process._do_fork.sys_clone
1.74 ± 1% +52.0% 2.64 ± 6% perf-profile.cycles-pp.__pte_alloc.copy_page_range.copy_process._do_fork.sys_clone
0.58 ± 18% +69.7% 0.98 ± 25% perf-profile.cycles-pp.__pud_alloc.copy_page_range.copy_process._do_fork.sys_clone
15.18 ± 1% +41.2% 21.42 ± 3% perf-profile.cycles-pp._do_fork.sys_clone.entry_SYSCALL_64_fastpath
0.01 ± 34% +5720.0% 0.73 ± 47% perf-profile.cycles-pp.activate_task.ttwu_do_activate.sched_ttwu_pending.cpu_startup_entry.start_secondary
0.58 ± 4% +56.7% 0.91 ± 9% perf-profile.cycles-pp.alloc_pages_current.__pmd_alloc.copy_page_range.copy_process._do_fork
1.31 ± 1% +61.0% 2.11 ± 6% perf-profile.cycles-pp.alloc_pages_current.pte_alloc_one.__pte_alloc.copy_page_range.copy_process
1.20 ± 4% +81.3% 2.18 ± 3% perf-profile.cycles-pp.anon_vma_clone.anon_vma_fork.copy_process._do_fork.sys_clone
2.36 ± 1% +60.7% 3.80 ± 3% perf-profile.cycles-pp.anon_vma_fork.copy_process._do_fork.sys_clone.entry_SYSCALL_64_fastpath
48.22 ± 2% -42.2% 27.88 ± 8% perf-profile.cycles-pp.call_cpuidle.cpu_startup_entry.start_secondary
0.91 ± 2% +52.8% 1.38 ± 9% perf-profile.cycles-pp.clear_page_c_e.__alloc_pages_nodemask.alloc_pages_current.pte_alloc_one.__pte_alloc
1.40 ± 2% -45.5% 0.76 ± 2% perf-profile.cycles-pp.copy_page.do_wp_page.handle_mm_fault.__do_page_fault.do_page_fault
4.96 ± 3% +55.1% 7.69 ± 7% perf-profile.cycles-pp.copy_page_range.copy_process._do_fork.sys_clone.entry_SYSCALL_64_fastpath
13.48 ± 1% +45.0% 19.55 ± 3% perf-profile.cycles-pp.copy_process._do_fork.sys_clone.entry_SYSCALL_64_fastpath
52.02 ± 2% -38.7% 31.89 ± 8% perf-profile.cycles-pp.cpu_startup_entry.start_secondary
48.19 ± 2% -42.2% 27.86 ± 8% perf-profile.cycles-pp.cpuidle_enter.call_cpuidle.cpu_startup_entry.start_secondary
47.69 ± 2% -42.4% 27.48 ± 7% perf-profile.cycles-pp.cpuidle_enter_state.cpuidle_enter.call_cpuidle.cpu_startup_entry.start_secondary
3.61 ± 5% -15.9% 3.03 ± 1% perf-profile.cycles-pp.do_page_fault.page_fault
2.12 ± 5% +27.1% 2.69 ± 1% perf-profile.cycles-pp.do_wait.sys_wait4.entry_SYSCALL_64_fastpath
18.04 ± 1% +35.3% 24.40 ± 3% perf-profile.cycles-pp.entry_SYSCALL_64_fastpath
3.16 ± 5% -13.9% 2.72 ± 2% perf-profile.cycles-pp.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
48.47 ± 2% -42.3% 27.98 ± 7% perf-profile.cycles-pp.intel_idle.cpuidle_enter_state.cpuidle_enter.call_cpuidle.cpu_startup_entry
1.49 ± 8% -16.6% 1.24 ± 6% perf-profile.cycles-pp.kthread.ret_from_fork
3.63 ± 5% -15.7% 3.06 ± 0% perf-profile.cycles-pp.page_fault
1.35 ± 1% +59.6% 2.16 ± 6% perf-profile.cycles-pp.pte_alloc_one.__pte_alloc.copy_page_range.copy_process._do_fork
1.51 ± 8% -17.7% 1.24 ± 6% perf-profile.cycles-pp.ret_from_fork
0.02 ± 19% +3611.1% 0.83 ± 47% perf-profile.cycles-pp.sched_ttwu_pending.cpu_startup_entry.start_secondary
1.00 ± 3% -18.9% 0.81 ± 13% perf-profile.cycles-pp.schedule.schedule_preempt_disabled.cpu_startup_entry.start_secondary
1.02 ± 3% -17.4% 0.84 ± 14% perf-profile.cycles-pp.schedule_preempt_disabled.cpu_startup_entry.start_secondary
0.80 ± 1% +25.7% 1.00 ± 4% perf-profile.cycles-pp.select_task_rq_fair.wake_up_new_task._do_fork.sys_clone.entry_SYSCALL_64_fastpath
52.15 ± 2% -38.7% 31.98 ± 8% perf-profile.cycles-pp.start_secondary
15.20 ± 1% +41.0% 21.42 ± 3% perf-profile.cycles-pp.sys_clone.entry_SYSCALL_64_fastpath
2.19 ± 5% +25.0% 2.74 ± 1% perf-profile.cycles-pp.sys_wait4.entry_SYSCALL_64_fastpath
0.96 ± 5% -28.7% 0.68 ± 24% perf-profile.cycles-pp.tick_nohz_idle_exit.cpu_startup_entry.start_secondary
0.02 ± 24% +4328.6% 0.78 ± 47% perf-profile.cycles-pp.ttwu_do_activate.constprop.85.sched_ttwu_pending.cpu_startup_entry.start_secondary
1.47 ± 4% +32.3% 1.94 ± 2% perf-profile.cycles-pp.wait_consider_task.do_wait.sys_wait4.entry_SYSCALL_64_fastpath
1.43 ± 1% +16.5% 1.66 ± 4% perf-profile.cycles-pp.wake_up_new_task._do_fork.sys_clone.entry_SYSCALL_64_fastpath
0.90 ± 2% +54.2% 1.39 ± 2% perf-profile.cycles-pp.wp_page_copy.isra.58.do_wp_page.handle_mm_fault.__do_page_fault.do_page_fault
7438 ± 4% +111.4% 15724 ± 23% sched_debug.cfs_rq[0]:/.exec_clock
52188 ± 3% -52.1% 25013 ± 24% sched_debug.cfs_rq[0]:/.min_vruntime
5.50 ± 20% -45.5% 3.00 ± 0% sched_debug.cfs_rq[0]:/.nr_spread_over
43475 ± 4% -78.7% 9245 ± 9% sched_debug.cfs_rq[10]:/.min_vruntime
1004 ± 5% +15.5% 1159 ± 4% sched_debug.cfs_rq[10]:/.tg_load_avg
58.75 ± 68% -67.7% 19.00 ± 77% sched_debug.cfs_rq[10]:/.util_avg
43461 ± 3% -77.3% 9856 ± 18% sched_debug.cfs_rq[11]:/.min_vruntime
2.75 ± 90% -100.0% 0.00 ± -1% sched_debug.cfs_rq[11]:/.nr_spread_over
1004 ± 5% +15.4% 1158 ± 4% sched_debug.cfs_rq[11]:/.tg_load_avg
47911 ± 3% -69.7% 14497 ± 32% sched_debug.cfs_rq[12]:/.min_vruntime
1005 ± 5% +15.6% 1163 ± 4% sched_debug.cfs_rq[12]:/.tg_load_avg
5613 ± 3% -21.3% 4417 ± 8% sched_debug.cfs_rq[13]:/.exec_clock
47734 ± 6% -78.6% 10195 ± 20% sched_debug.cfs_rq[13]:/.min_vruntime
-4454 ±-27% +232.7% -14818 ±-33% sched_debug.cfs_rq[13]:/.spread0
1005 ± 5% +16.8% 1174 ± 4% sched_debug.cfs_rq[13]:/.tg_load_avg
48698 ± 6% -74.2% 12581 ± 37% sched_debug.cfs_rq[14]:/.min_vruntime
-3490 ±-70% +256.2% -12432 ±-54% sched_debug.cfs_rq[14]:/.spread0
1004 ± 4% +16.9% 1175 ± 4% sched_debug.cfs_rq[14]:/.tg_load_avg
48999 ± 4% -72.3% 13582 ± 26% sched_debug.cfs_rq[15]:/.min_vruntime
-3190 ±-104% +258.4% -11431 ±-34% sched_debug.cfs_rq[15]:/.spread0
1005 ± 4% +17.1% 1177 ± 4% sched_debug.cfs_rq[15]:/.tg_load_avg
5221 ± 5% -19.5% 4203 ± 14% sched_debug.cfs_rq[16]:/.exec_clock
47805 ± 3% -77.2% 10903 ± 29% sched_debug.cfs_rq[16]:/.min_vruntime
-4384 ±-34% +221.8% -14110 ±-45% sched_debug.cfs_rq[16]:/.spread0
1004 ± 4% +17.2% 1178 ± 4% sched_debug.cfs_rq[16]:/.tg_load_avg
5574 ± 10% -30.2% 3891 ± 19% sched_debug.cfs_rq[17]:/.exec_clock
48817 ± 2% -74.5% 12470 ± 20% sched_debug.cfs_rq[17]:/.min_vruntime
-3371 ±-35% +272.0% -12543 ±-29% sched_debug.cfs_rq[17]:/.spread0
1006 ± 5% +17.1% 1179 ± 4% sched_debug.cfs_rq[17]:/.tg_load_avg
5402 ± 4% -28.4% 3867 ± 18% sched_debug.cfs_rq[18]:/.exec_clock
13.75 ± 56% +236.4% 46.25 ± 40% sched_debug.cfs_rq[18]:/.load_avg
49829 ± 4% -78.3% 10804 ± 33% sched_debug.cfs_rq[18]:/.min_vruntime
-2359 ±-65% +502.2% -14209 ±-22% sched_debug.cfs_rq[18]:/.spread0
1007 ± 5% +17.3% 1181 ± 4% sched_debug.cfs_rq[18]:/.tg_load_avg
13.75 ± 56% +236.4% 46.25 ± 40% sched_debug.cfs_rq[18]:/.tg_load_avg_contrib
5368 ± 1% -16.5% 4483 ± 9% sched_debug.cfs_rq[19]:/.exec_clock
48575 ± 3% -77.8% 10764 ± 20% sched_debug.cfs_rq[19]:/.min_vruntime
-3614 ±-17% +294.2% -14249 ±-31% sched_debug.cfs_rq[19]:/.spread0
1005 ± 5% +22.0% 1226 ± 5% sched_debug.cfs_rq[19]:/.tg_load_avg
47115 ± 4% -68.4% 14873 ± 19% sched_debug.cfs_rq[1]:/.min_vruntime
6026 ± 18% -36.2% 3843 ± 14% sched_debug.cfs_rq[20]:/.exec_clock
48599 ± 5% -80.1% 9666 ± 24% sched_debug.cfs_rq[20]:/.min_vruntime
-3589 ±-65% +327.5% -15347 ±-44% sched_debug.cfs_rq[20]:/.spread0
1000 ± 4% +22.9% 1229 ± 5% sched_debug.cfs_rq[20]:/.tg_load_avg
5429 ± 13% -28.4% 3888 ± 14% sched_debug.cfs_rq[21]:/.exec_clock
45705 ± 4% -78.1% 10014 ± 29% sched_debug.cfs_rq[21]:/.min_vruntime
-6484 ±-49% +131.3% -15000 ±-23% sched_debug.cfs_rq[21]:/.spread0
1001 ± 5% +23.2% 1233 ± 5% sched_debug.cfs_rq[21]:/.tg_load_avg
46085 ± 7% -71.1% 13325 ± 24% sched_debug.cfs_rq[22]:/.min_vruntime
1000 ± 5% +23.4% 1235 ± 5% sched_debug.cfs_rq[22]:/.tg_load_avg
49339 ± 7% -72.1% 13752 ± 31% sched_debug.cfs_rq[23]:/.min_vruntime
1001 ± 5% +23.4% 1235 ± 5% sched_debug.cfs_rq[23]:/.tg_load_avg
40.25 ± 42% +129.8% 92.50 ± 59% sched_debug.cfs_rq[23]:/.util_avg
26391 ± 4% -76.4% 6227 ± 89% sched_debug.cfs_rq[24]:/.min_vruntime
1001 ± 5% +22.6% 1228 ± 5% sched_debug.cfs_rq[24]:/.tg_load_avg
26332 ± 6% -71.4% 7521 ± 46% sched_debug.cfs_rq[25]:/.min_vruntime
-25857 ±-11% -32.3% -17493 ±-17% sched_debug.cfs_rq[25]:/.spread0
996.50 ± 4% +23.4% 1229 ± 5% sched_debug.cfs_rq[25]:/.tg_load_avg
31236 ± 5% -70.9% 9100 ± 60% sched_debug.cfs_rq[26]:/.min_vruntime
995.25 ± 4% +23.6% 1230 ± 5% sched_debug.cfs_rq[26]:/.tg_load_avg
29.50 ± 25% -63.6% 10.75 ± 90% sched_debug.cfs_rq[27]:/.load_avg
27824 ± 9% -76.8% 6448 ± 91% sched_debug.cfs_rq[27]:/.min_vruntime
995.75 ± 4% +23.5% 1230 ± 5% sched_debug.cfs_rq[27]:/.tg_load_avg
29.50 ± 25% -63.6% 10.75 ± 90% sched_debug.cfs_rq[27]:/.tg_load_avg_contrib
69.50 ± 21% -56.5% 30.25 ± 73% sched_debug.cfs_rq[27]:/.util_avg
27839 ± 14% -74.8% 7018 ± 37% sched_debug.cfs_rq[28]:/.min_vruntime
996.75 ± 4% +23.9% 1234 ± 5% sched_debug.cfs_rq[28]:/.tg_load_avg
27250 ± 11% -70.1% 8150 ± 61% sched_debug.cfs_rq[29]:/.min_vruntime
996.00 ± 4% +24.0% 1235 ± 5% sched_debug.cfs_rq[29]:/.tg_load_avg
49854 ± 5% -75.6% 12158 ± 19% sched_debug.cfs_rq[2]:/.min_vruntime
8.50 ± 62% -79.4% 1.75 ±173% sched_debug.cfs_rq[2]:/.nr_spread_over
-2334 ±-103% +450.7% -12855 ±-48% sched_debug.cfs_rq[2]:/.spread0
29309 ± 9% -67.5% 9514 ± 40% sched_debug.cfs_rq[30]:/.min_vruntime
995.75 ± 4% +24.3% 1237 ± 5% sched_debug.cfs_rq[30]:/.tg_load_avg
25892 ± 5% -62.4% 9737 ± 88% sched_debug.cfs_rq[31]:/.min_vruntime
996.00 ± 4% +24.4% 1239 ± 5% sched_debug.cfs_rq[31]:/.tg_load_avg
25217 ± 8% -63.9% 9106 ± 50% sched_debug.cfs_rq[32]:/.min_vruntime
-26972 ± -6% -41.0% -15908 ±-43% sched_debug.cfs_rq[32]:/.spread0
992.75 ± 4% +25.0% 1241 ± 5% sched_debug.cfs_rq[32]:/.tg_load_avg
24793 ± 9% -78.1% 5431 ± 59% sched_debug.cfs_rq[33]:/.min_vruntime
993.00 ± 4% +25.3% 1244 ± 5% sched_debug.cfs_rq[33]:/.tg_load_avg
24893 ± 2% -67.9% 7992 ± 69% sched_debug.cfs_rq[34]:/.min_vruntime
993.00 ± 4% +25.7% 1248 ± 5% sched_debug.cfs_rq[34]:/.tg_load_avg
26215 ± 10% -63.6% 9531 ± 49% sched_debug.cfs_rq[35]:/.min_vruntime
-25974 ± -6% -40.4% -15483 ±-40% sched_debug.cfs_rq[35]:/.spread0
992.75 ± 4% +25.8% 1248 ± 5% sched_debug.cfs_rq[35]:/.tg_load_avg
25672 ± 10% -58.6% 10616 ± 31% sched_debug.cfs_rq[36]:/.min_vruntime
-26517 ±-14% -45.7% -14398 ±-43% sched_debug.cfs_rq[36]:/.spread0
994.50 ± 4% +25.7% 1250 ± 5% sched_debug.cfs_rq[36]:/.tg_load_avg
27177 ± 7% -67.4% 8849 ± 33% sched_debug.cfs_rq[37]:/.min_vruntime
-25012 ±-14% -35.4% -16165 ±-22% sched_debug.cfs_rq[37]:/.spread0
993.75 ± 4% +25.8% 1250 ± 5% sched_debug.cfs_rq[37]:/.tg_load_avg
29350 ± 6% -69.9% 8846 ± 32% sched_debug.cfs_rq[38]:/.min_vruntime
-22839 ±-10% -29.2% -16168 ±-30% sched_debug.cfs_rq[38]:/.spread0
992.50 ± 4% +25.7% 1247 ± 6% sched_debug.cfs_rq[38]:/.tg_load_avg
25898 ± 8% -71.1% 7494 ± 34% sched_debug.cfs_rq[39]:/.min_vruntime
-26291 ±-12% -33.4% -17521 ±-30% sched_debug.cfs_rq[39]:/.spread0
992.00 ± 4% +25.6% 1246 ± 5% sched_debug.cfs_rq[39]:/.tg_load_avg
53201 ± 6% -75.2% 13202 ± 22% sched_debug.cfs_rq[3]:/.min_vruntime
1012 ±376% -1266.2% -11811 ±-29% sched_debug.cfs_rq[3]:/.spread0
28250 ± 9% -75.3% 6976 ± 47% sched_debug.cfs_rq[40]:/.min_vruntime
993.25 ± 4% +25.6% 1248 ± 5% sched_debug.cfs_rq[40]:/.tg_load_avg
27875 ± 7% -65.8% 9530 ± 35% sched_debug.cfs_rq[41]:/.min_vruntime
991.25 ± 4% +25.9% 1247 ± 6% sched_debug.cfs_rq[41]:/.tg_load_avg
26677 ± 3% -62.4% 10043 ± 21% sched_debug.cfs_rq[42]:/.min_vruntime
-25512 ±-10% -41.3% -14972 ±-36% sched_debug.cfs_rq[42]:/.spread0
991.75 ± 4% +25.9% 1248 ± 6% sched_debug.cfs_rq[42]:/.tg_load_avg
26871 ± 10% -60.2% 10691 ± 18% sched_debug.cfs_rq[43]:/.min_vruntime
-25319 ± -6% -43.4% -14323 ±-33% sched_debug.cfs_rq[43]:/.spread0
992.50 ± 4% +25.8% 1249 ± 6% sched_debug.cfs_rq[43]:/.tg_load_avg
25783 ± 8% -62.2% 9740 ± 34% sched_debug.cfs_rq[44]:/.min_vruntime
-26407 ± -8% -42.2% -15275 ±-33% sched_debug.cfs_rq[44]:/.spread0
992.25 ± 4% +25.9% 1248 ± 6% sched_debug.cfs_rq[44]:/.tg_load_avg
26020 ± 5% -60.5% 10286 ± 33% sched_debug.cfs_rq[45]:/.min_vruntime
-26170 ± -7% -43.7% -14728 ±-42% sched_debug.cfs_rq[45]:/.spread0
993.00 ± 4% +25.4% 1245 ± 6% sched_debug.cfs_rq[45]:/.tg_load_avg
25924 ± 7% -60.0% 10374 ± 20% sched_debug.cfs_rq[46]:/.min_vruntime
-26266 ± -2% -44.3% -14641 ±-36% sched_debug.cfs_rq[46]:/.spread0
993.50 ± 4% +25.7% 1248 ± 6% sched_debug.cfs_rq[46]:/.tg_load_avg
2534 ± 4% +82.3% 4619 ± 25% sched_debug.cfs_rq[47]:/.exec_clock
25776 ± 6% -61.3% 9962 ± 15% sched_debug.cfs_rq[47]:/.min_vruntime
1.00 ±-100% +250.0% 3.50 ± 31% sched_debug.cfs_rq[47]:/.nr_spread_over
-26414 ± -4% -43.0% -15052 ±-34% sched_debug.cfs_rq[47]:/.spread0
991.50 ± 4% +25.2% 1241 ± 5% sched_debug.cfs_rq[47]:/.tg_load_avg
51959 ± 10% -76.7% 12098 ± 48% sched_debug.cfs_rq[4]:/.min_vruntime
-229.37 ±-2692% +5530.7% -12915 ±-43% sched_debug.cfs_rq[4]:/.spread0
1004 ± 5% +14.2% 1146 ± 3% sched_debug.cfs_rq[4]:/.tg_load_avg
52996 ± 8% -78.9% 11183 ± 21% sched_debug.cfs_rq[5]:/.min_vruntime
807.43 ±586% -1812.9% -13830 ±-34% sched_debug.cfs_rq[5]:/.spread0
1004 ± 5% +14.5% 1150 ± 3% sched_debug.cfs_rq[5]:/.tg_load_avg
52809 ± 7% -77.8% 11721 ± 21% sched_debug.cfs_rq[6]:/.min_vruntime
620.80 ±727% -2241.2% -13292 ±-36% sched_debug.cfs_rq[6]:/.spread0
1004 ± 5% +14.8% 1152 ± 3% sched_debug.cfs_rq[6]:/.tg_load_avg
35.25 ± 29% -48.9% 18.00 ± 73% sched_debug.cfs_rq[7]:/.load_avg
48888 ± 5% -79.5% 10025 ± 8% sched_debug.cfs_rq[7]:/.min_vruntime
-3300 ±-60% +354.1% -14988 ±-42% sched_debug.cfs_rq[7]:/.spread0
1001 ± 5% +15.3% 1154 ± 3% sched_debug.cfs_rq[7]:/.tg_load_avg
35.25 ± 29% -48.9% 18.00 ± 73% sched_debug.cfs_rq[7]:/.tg_load_avg_contrib
48245 ± 0% -77.2% 11013 ± 13% sched_debug.cfs_rq[8]:/.min_vruntime
-3943 ±-40% +255.0% -14000 ±-47% sched_debug.cfs_rq[8]:/.spread0
1003 ± 5% +15.1% 1154 ± 3% sched_debug.cfs_rq[8]:/.tg_load_avg
48413 ± 6% -74.6% 12301 ± 35% sched_debug.cfs_rq[9]:/.min_vruntime
-3775 ±-68% +236.7% -12712 ±-50% sched_debug.cfs_rq[9]:/.spread0
1004 ± 5% +15.1% 1156 ± 3% sched_debug.cfs_rq[9]:/.tg_load_avg
97438 ± 6% +168.2% 261320 ± 53% sched_debug.cpu#0.nr_switches
-15.00 ±-14% -60.0% -6.00 ±-60% sched_debug.cpu#0.nr_uninterruptible
47608 ± 6% +170.1% 128600 ± 53% sched_debug.cpu#0.sched_goidle
41431 ± 4% -24.3% 31365 ± 4% sched_debug.cpu#1.nr_load_updates
99622 ± 8% +100.3% 199515 ± 27% sched_debug.cpu#1.nr_switches
100109 ± 8% +99.4% 199662 ± 27% sched_debug.cpu#1.sched_count
49558 ± 9% +101.0% 99591 ± 27% sched_debug.cpu#1.sched_goidle
39651 ± 2% -34.7% 25892 ± 14% sched_debug.cpu#10.nr_load_updates
90178 ± 5% +61.2% 145375 ± 18% sched_debug.cpu#10.nr_switches
90476 ± 5% +61.0% 145655 ± 19% sched_debug.cpu#10.sched_count
44962 ± 5% +61.5% 72595 ± 18% sched_debug.cpu#10.sched_goidle
39461 ± 1% -29.7% 27758 ± 7% sched_debug.cpu#11.nr_load_updates
90773 ± 4% +89.5% 172026 ± 24% sched_debug.cpu#11.nr_switches
91699 ± 3% +87.8% 172192 ± 24% sched_debug.cpu#11.sched_count
45261 ± 4% +89.8% 85900 ± 24% sched_debug.cpu#11.sched_goidle
40345 ± 3% -35.9% 25863 ± 22% sched_debug.cpu#12.nr_load_updates
1.00 ±234% -675.0% -5.75 ±-28% sched_debug.cpu#12.nr_uninterruptible
40501 ± 1% -47.2% 21369 ± 7% sched_debug.cpu#13.nr_load_updates
39578 ± 2% -40.4% 23597 ± 12% sched_debug.cpu#14.nr_load_updates
40732 ± 1% -49.0% 20766 ± 10% sched_debug.cpu#15.nr_load_updates
-5.75 ±-56% -82.6% -1.00 ±-122% sched_debug.cpu#15.nr_uninterruptible
39801 ± 2% -51.0% 19493 ± 21% sched_debug.cpu#16.nr_load_updates
39958 ± 2% -52.5% 18993 ± 25% sched_debug.cpu#17.nr_load_updates
39706 ± 2% -51.5% 19245 ± 25% sched_debug.cpu#18.nr_load_updates
39978 ± 2% -47.0% 21200 ± 9% sched_debug.cpu#19.nr_load_updates
42402 ± 3% -28.1% 30472 ± 9% sched_debug.cpu#2.nr_load_updates
99545 ± 7% +105.1% 204210 ± 30% sched_debug.cpu#2.nr_switches
100055 ± 6% +104.3% 204381 ± 30% sched_debug.cpu#2.sched_count
49394 ± 7% +106.5% 101994 ± 30% sched_debug.cpu#2.sched_goidle
39520 ± 2% -50.2% 19676 ± 23% sched_debug.cpu#20.nr_load_updates
40112 ± 2% -51.5% 19471 ± 22% sched_debug.cpu#21.nr_load_updates
39597 ± 2% -40.9% 23410 ± 11% sched_debug.cpu#22.nr_load_updates
39949 ± 1% -41.9% 23197 ± 8% sched_debug.cpu#23.nr_load_updates
0.75 ±404% -200.0% -0.75 ±-110% sched_debug.cpu#23.nr_uninterruptible
18834 ± 3% -51.9% 9068 ± 50% sched_debug.cpu#24.nr_load_updates
18926 ± 2% -50.7% 9325 ± 44% sched_debug.cpu#25.nr_load_updates
19984 ± 4% -53.1% 9374 ± 43% sched_debug.cpu#26.nr_load_updates
19759 ± 7% -54.4% 9017 ± 50% sched_debug.cpu#27.nr_load_updates
2.50 ± 87% -140.0% -1.00 ±-70% sched_debug.cpu#27.nr_uninterruptible
18776 ± 5% -51.7% 9061 ± 47% sched_debug.cpu#28.nr_load_updates
19183 ± 5% -50.1% 9573 ± 44% sched_debug.cpu#29.nr_load_updates
43139 ± 5% -30.8% 29861 ± 7% sched_debug.cpu#3.nr_load_updates
100982 ± 9% +90.7% 192544 ± 29% sched_debug.cpu#3.nr_switches
102078 ± 9% +88.8% 192718 ± 29% sched_debug.cpu#3.sched_count
50347 ± 9% +90.8% 96052 ± 29% sched_debug.cpu#3.sched_goidle
19680 ± 3% -55.3% 8793 ± 50% sched_debug.cpu#30.nr_load_updates
0.00 ± 0% +Inf% 2.00 ±111% sched_debug.cpu#30.nr_uninterruptible
372.25 ± 8% -25.7% 276.50 ± 18% sched_debug.cpu#30.ttwu_local
18524 ± 2% -50.1% 9247 ± 47% sched_debug.cpu#32.nr_load_updates
17977 ± 1% -50.9% 8818 ± 48% sched_debug.cpu#33.nr_load_updates
372.00 ± 14% -22.6% 287.75 ± 14% sched_debug.cpu#33.ttwu_local
17624 ± 1% -50.7% 8691 ± 52% sched_debug.cpu#35.nr_load_updates
4.50 ± 57% -83.3% 0.75 ±110% sched_debug.cpu#36.nr_uninterruptible
856925 ± 5% +14.2% 978679 ± 2% sched_debug.cpu#4.avg_idle
43374 ± 4% -41.4% 25428 ± 13% sched_debug.cpu#4.nr_load_updates
4.50 ± 59% -116.7% -0.75 ±-288% sched_debug.cpu#45.nr_uninterruptible
321.00 ± 3% +26.6% 406.50 ± 9% sched_debug.cpu#47.ttwu_local
42701 ± 4% -34.8% 27834 ± 23% sched_debug.cpu#5.nr_load_updates
948327 ± 7% -17.1% 786484 ± 6% sched_debug.cpu#6.avg_idle
43586 ± 2% -37.0% 27453 ± 11% sched_debug.cpu#6.nr_load_updates
99119 ± 6% +73.3% 171800 ± 26% sched_debug.cpu#6.nr_switches
99623 ± 6% +73.0% 172384 ± 26% sched_debug.cpu#6.sched_count
49415 ± 6% +73.6% 85805 ± 26% sched_debug.cpu#6.sched_goidle
43054 ± 2% -30.1% 30103 ± 12% sched_debug.cpu#7.nr_load_updates
101164 ± 8% +103.9% 206226 ± 40% sched_debug.cpu#7.nr_switches
101515 ± 8% +103.4% 206512 ± 40% sched_debug.cpu#7.sched_count
50453 ± 8% +104.2% 103019 ± 40% sched_debug.cpu#7.sched_goidle
41855 ± 2% -32.7% 28163 ± 15% sched_debug.cpu#8.nr_load_updates
94128 ± 6% +87.6% 176601 ± 34% sched_debug.cpu#8.nr_switches
94622 ± 6% +87.3% 177198 ± 34% sched_debug.cpu#8.sched_count
46932 ± 6% +87.9% 88204 ± 35% sched_debug.cpu#8.sched_goidle
41325 ± 2% -41.1% 24359 ± 14% sched_debug.cpu#9.nr_load_updates
93199 ± 5% +38.3% 128860 ± 21% sched_debug.cpu#9.nr_switches
93570 ± 5% +37.8% 128966 ± 21% sched_debug.cpu#9.sched_count
46460 ± 5% +38.5% 64331 ± 21% sched_debug.cpu#9.sched_goidle

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
lkp-ne04/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/btrfs/5K/400M/fsyncBeforeClose/16d/256fpd

commit:
1fda6948fbb2e756c8f7cabef40395cee2984298
9b7aaf11b8d61eeb87f3b99fb5ae59e61bb35f27

1fda6948fbb2e756 9b7aaf11b8d61eeb87f3b99fb5
---------------- --------------------------
%stddev %change %stddev
\ | \
1502962 ± 3% +67.8% 2522676 ± 2% fsmark.app_overhead
47459 ± 5% +153.7% 120415 ± 1% fsmark.time.involuntary_context_switches
3043 ± 0% -2.4% 2969 ± 1% fsmark.time.maximum_resident_set_size
26057 ± 9% -33.6% 17301 ± 2% fsmark.time.minor_page_faults
23.50 ± 4% +105.3% 48.25 ± 2% fsmark.time.percent_of_cpu_this_job_got
55.78 ± 4% +105.6% 114.69 ± 2% fsmark.time.system_time
1122907 ± 3% +44.6% 1623729 ± 1% fsmark.time.voluntary_context_switches
650.75 ±173% +793.2% 5812 ±101% latency_stats.sum.btrfs_commit_transaction.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
6.50 ± 45% +207.7% 20.00 ± 27% numa-numastat.node1.other_node
12659 ± 2% +29.3% 16370 ± 1% vmstat.system.cs
1494 ± 1% +111.1% 3154 ± 1% vmstat.system.in
16356 ± 15% -54.9% 7379 ± 6% proc-vmstat.numa_hint_faults
14104 ± 14% -61.8% 5391 ± 10% proc-vmstat.numa_hint_faults_local
20407 ± 12% -41.6% 11928 ± 4% proc-vmstat.numa_pte_updates
3033 ± 5% +12.6% 3416 ± 8% slabinfo.kmalloc-256.active_objs
7671 ± 6% +13.0% 8671 ± 6% slabinfo.vm_area_struct.active_objs
7707 ± 6% +13.0% 8707 ± 6% slabinfo.vm_area_struct.num_objs
48125 ± 1% +45.1% 69852 ± 1% softirqs.RCU
53676 ± 5% +46.4% 78604 ± 1% softirqs.SCHED
108653 ± 6% +29.8% 140978 ± 4% softirqs.TIMER
2.01 ± 4% +84.7% 3.71 ± 2% turbostat.%Busy
61.75 ± 4% +86.6% 115.25 ± 2% turbostat.Avg_MHz
3.21 ± 19% +59.8% 5.13 ± 9% turbostat.Pkg%pc6
670926 ± 1% +25.5% 842267 ± 3% cpuidle.C1-NHM.usage
61834777 ± 4% -22.8% 47757482 ± 2% cpuidle.C1E-NHM.time
64454 ± 2% +62.9% 105004 ± 4% cpuidle.C1E-NHM.usage
223888 ± 2% +11.5% 249724 ± 1% cpuidle.C3-NHM.usage
438281 ± 3% +28.0% 561031 ± 1% cpuidle.C6-NHM.usage
47459 ± 5% +153.7% 120415 ± 1% time.involuntary_context_switches
26057 ± 9% -33.6% 17301 ± 2% time.minor_page_faults
23.50 ± 4% +105.3% 48.25 ± 2% time.percent_of_cpu_this_job_got
55.78 ± 4% +105.6% 114.69 ± 2% time.system_time
0.72 ± 3% +21.7% 0.87 ± 3% time.user_time
1122907 ± 3% +44.6% 1623729 ± 1% time.voluntary_context_switches
78288 ± 3% -29.1% 55539 ± 3% numa-vmstat.node0.nr_active_file
145643 ± 2% -33.9% 96319 ± 4% numa-vmstat.node0.nr_dirtied
850.50 ± 1% -29.3% 601.00 ± 2% numa-vmstat.node0.nr_dirty
203686 ± 2% -24.1% 154626 ± 2% numa-vmstat.node0.nr_file_pages
124060 ± 1% -21.1% 97842 ± 2% numa-vmstat.node0.nr_inactive_file
213.00 ± 12% +27.0% 270.50 ± 6% numa-vmstat.node0.nr_kernel_stack
24657 ± 1% -29.6% 17369 ± 4% numa-vmstat.node0.nr_slab_reclaimable
144956 ± 2% -33.9% 95800 ± 4% numa-vmstat.node0.nr_written
1404 ± 11% -75.5% 343.75 ± 6% numa-vmstat.node0.numa_other
32986 ± 1% +67.0% 55076 ± 6% numa-vmstat.node1.nr_active_file
49187 ± 3% +96.1% 96433 ± 6% numa-vmstat.node1.nr_dirtied
287.50 ± 7% +92.0% 552.00 ± 2% numa-vmstat.node1.nr_dirty
107899 ± 1% +43.5% 154829 ± 4% numa-vmstat.node1.nr_file_pages
73708 ± 2% +33.7% 98510 ± 2% numa-vmstat.node1.nr_inactive_file
188.00 ± 14% -32.2% 127.50 ± 14% numa-vmstat.node1.nr_kernel_stack
10239 ± 3% +69.3% 17337 ± 4% numa-vmstat.node1.nr_slab_reclaimable
48857 ± 3% +96.2% 95862 ± 6% numa-vmstat.node1.nr_written
330088 ± 3% -27.7% 238545 ± 2% numa-meminfo.node0.Active
313158 ± 3% -29.1% 222167 ± 3% numa-meminfo.node0.Active(file)
3404 ± 1% -29.4% 2401 ± 2% numa-meminfo.node0.Dirty
814757 ± 2% -24.1% 618527 ± 2% numa-meminfo.node0.FilePages
500690 ± 1% -21.0% 395786 ± 2% numa-meminfo.node0.Inactive
496249 ± 1% -21.1% 391380 ± 2% numa-meminfo.node0.Inactive(file)
3416 ± 12% +26.9% 4334 ± 6% numa-meminfo.node0.KernelStack
996304 ± 1% -22.4% 773020 ± 1% numa-meminfo.node0.MemUsed
98632 ± 1% -29.6% 69481 ± 4% numa-meminfo.node0.SReclaimable
120967 ± 1% -24.6% 91223 ± 2% numa-meminfo.node0.Slab
147024 ± 2% +60.7% 236270 ± 6% numa-meminfo.node1.Active
131949 ± 1% +67.0% 220312 ± 6% numa-meminfo.node1.Active(file)
1152 ± 7% +91.5% 2207 ± 2% numa-meminfo.node1.Dirty
431603 ± 1% +43.5% 619342 ± 4% numa-meminfo.node1.FilePages
299227 ± 2% +33.2% 398472 ± 2% numa-meminfo.node1.Inactive
294836 ± 2% +33.7% 394051 ± 2% numa-meminfo.node1.Inactive(file)
3015 ± 14% -32.1% 2047 ± 14% numa-meminfo.node1.KernelStack
545740 ± 1% +39.4% 760561 ± 3% numa-meminfo.node1.MemUsed
40960 ± 3% +69.3% 69354 ± 4% numa-meminfo.node1.SReclaimable
59410 ± 2% +48.8% 88424 ± 3% numa-meminfo.node1.Slab
10.50 ± 15% -47.6% 5.50 ± 20% sched_debug.cfs_rq[0]:/.nr_spread_over
553.00 ± 5% +23.1% 680.75 ± 8% sched_debug.cfs_rq[0]:/.tg_load_avg
2296 ± 4% +45.7% 3346 ± 4% sched_debug.cfs_rq[10]:/.exec_clock
-7449 ±-18% -37.5% -4655 ± -5% sched_debug.cfs_rq[10]:/.spread0
540.50 ± 3% +26.7% 685.00 ± 8% sched_debug.cfs_rq[10]:/.tg_load_avg
1300 ± 7% +182.2% 3671 ± 12% sched_debug.cfs_rq[11]:/.exec_clock
4146 ± 16% +76.0% 7295 ± 11% sched_debug.cfs_rq[11]:/.min_vruntime
-8740 ±-16% -57.1% -3746 ±-18% sched_debug.cfs_rq[11]:/.spread0
544.75 ± 3% +25.7% 685.00 ± 9% sched_debug.cfs_rq[11]:/.tg_load_avg
2481 ± 19% +111.8% 5257 ± 25% sched_debug.cfs_rq[12]:/.exec_clock
5363 ± 8% +48.4% 7961 ± 14% sched_debug.cfs_rq[12]:/.min_vruntime
-7523 ±-16% -59.1% -3080 ±-41% sched_debug.cfs_rq[12]:/.spread0
546.00 ± 3% +25.9% 687.50 ± 8% sched_debug.cfs_rq[12]:/.tg_load_avg
47.75 ± 46% +77.5% 84.75 ± 30% sched_debug.cfs_rq[12]:/.util_avg
1326 ± 9% +168.9% 3567 ± 16% sched_debug.cfs_rq[13]:/.exec_clock
4316 ± 7% +67.3% 7220 ± 11% sched_debug.cfs_rq[13]:/.min_vruntime
-8571 ±-22% -55.4% -3821 ±-23% sched_debug.cfs_rq[13]:/.spread0
548.00 ± 3% +26.0% 690.25 ± 8% sched_debug.cfs_rq[13]:/.tg_load_avg
5.75 ± 52% -65.2% 2.00 ± 86% sched_debug.cfs_rq[14]:/.nr_spread_over
552.00 ± 3% +25.1% 690.75 ± 8% sched_debug.cfs_rq[14]:/.tg_load_avg
1622 ± 7% +136.8% 3842 ± 10% sched_debug.cfs_rq[15]:/.exec_clock
4452 ± 10% +54.4% 6875 ± 7% sched_debug.cfs_rq[15]:/.min_vruntime
-8435 ±-19% -50.6% -4166 ± -5% sched_debug.cfs_rq[15]:/.spread0
552.00 ± 3% +25.8% 694.50 ± 8% sched_debug.cfs_rq[15]:/.tg_load_avg
40.00 ± 55% +143.8% 97.50 ± 11% sched_debug.cfs_rq[15]:/.util_avg
555.50 ± 5% +22.4% 680.00 ± 8% sched_debug.cfs_rq[1]:/.tg_load_avg
555.50 ± 6% +21.8% 676.75 ± 8% sched_debug.cfs_rq[2]:/.tg_load_avg
36.00 ± 14% +29.2% 46.50 ± 20% sched_debug.cfs_rq[3]:/.load_avg
551.50 ± 6% +22.5% 675.50 ± 8% sched_debug.cfs_rq[3]:/.tg_load_avg
35.75 ± 15% +30.8% 46.75 ± 20% sched_debug.cfs_rq[3]:/.tg_load_avg_contrib
3063 ± 9% +49.3% 4574 ± 26% sched_debug.cfs_rq[4]:/.exec_clock
-5335 ±-15% -36.0% -3417 ±-20% sched_debug.cfs_rq[4]:/.spread0
552.25 ± 6% +22.7% 677.50 ± 9% sched_debug.cfs_rq[4]:/.tg_load_avg
3056 ± 13% +31.9% 4029 ± 7% sched_debug.cfs_rq[5]:/.exec_clock
-6246 ±-27% -46.0% -3374 ±-26% sched_debug.cfs_rq[5]:/.spread0
552.25 ± 6% +21.8% 672.50 ± 8% sched_debug.cfs_rq[5]:/.tg_load_avg
2929 ± 4% +37.9% 4041 ± 13% sched_debug.cfs_rq[6]:/.exec_clock
-5929 ±-26% -35.3% -3833 ±-14% sched_debug.cfs_rq[6]:/.spread0
537.75 ± 4% +25.1% 672.75 ± 8% sched_debug.cfs_rq[6]:/.tg_load_avg
3132 ± 26% +36.1% 4262 ± 11% sched_debug.cfs_rq[7]:/.exec_clock
-6184 ±-30% -49.0% -3151 ±-30% sched_debug.cfs_rq[7]:/.spread0
541.75 ± 5% +24.6% 675.25 ± 8% sched_debug.cfs_rq[7]:/.tg_load_avg
2411 ± 16% +35.0% 3256 ± 2% sched_debug.cfs_rq[8]:/.exec_clock
536.25 ± 3% +26.3% 677.50 ± 8% sched_debug.cfs_rq[8]:/.tg_load_avg
1367 ± 11% +209.6% 4232 ± 21% sched_debug.cfs_rq[9]:/.exec_clock
4109 ± 11% +74.6% 7177 ± 13% sched_debug.cfs_rq[9]:/.min_vruntime
-8777 ±-17% -56.0% -3864 ±-33% sched_debug.cfs_rq[9]:/.spread0
539.50 ± 2% +26.2% 681.00 ± 8% sched_debug.cfs_rq[9]:/.tg_load_avg
797029 ± 6% +19.7% 954108 ± 2% sched_debug.cpu#0.avg_idle
23324 ± 5% -13.6% 20164 ± 2% sched_debug.cpu#0.nr_load_updates
182645 ± 5% -34.7% 119281 ± 2% sched_debug.cpu#0.nr_switches
-14068 ± -2% -98.0% -282.00 ±-15% sched_debug.cpu#0.nr_uninterruptible
78351 ± 6% -37.8% 48707 ± 2% sched_debug.cpu#0.sched_goidle
143227 ± 3% -26.9% 104768 ± 2% sched_debug.cpu#0.ttwu_count
61522 ± 0% -72.4% 16979 ± 4% sched_debug.cpu#0.ttwu_local
11817 ± 11% +51.3% 17882 ± 4% sched_debug.cpu#1.nr_load_updates
71706 ± 19% +48.4% 106392 ± 4% sched_debug.cpu#1.nr_switches
428.50 ± 32% -137.9% -162.25 ±-25% sched_debug.cpu#1.nr_uninterruptible
74951 ± 18% +44.7% 108445 ± 5% sched_debug.cpu#1.sched_count
31348 ± 21% +38.0% 43275 ± 4% sched_debug.cpu#1.sched_goidle
32223 ± 10% +82.5% 58816 ± 3% sched_debug.cpu#1.ttwu_count
7130 ± 7% -28.8% 5074 ± 10% sched_debug.cpu#1.ttwu_local
1697 ± 20% -92.2% 132.00 ± 26% sched_debug.cpu#10.nr_uninterruptible
9465 ± 3% -83.6% 1549 ± 4% sched_debug.cpu#10.ttwu_local
923804 ± 4% -13.2% 801932 ± 11% sched_debug.cpu#11.avg_idle
8913 ± 8% +53.3% 13668 ± 4% sched_debug.cpu#11.nr_load_updates
41637 ± 11% +108.4% 86754 ± 8% sched_debug.cpu#11.nr_switches
488.50 ± 21% -67.9% 156.75 ± 19% sched_debug.cpu#11.nr_uninterruptible
41683 ± 11% +109.8% 87457 ± 8% sched_debug.cpu#11.sched_count
16228 ± 12% +110.7% 34196 ± 9% sched_debug.cpu#11.sched_goidle
30437 ± 25% +70.8% 51994 ± 9% sched_debug.cpu#11.ttwu_count
12868 ± 2% +20.7% 15528 ± 8% sched_debug.cpu#12.nr_load_updates
1746 ± 6% -91.7% 144.50 ± 13% sched_debug.cpu#12.nr_uninterruptible
10221 ± 2% -76.1% 2440 ± 25% sched_debug.cpu#12.ttwu_local
951235 ± 5% -15.3% 805287 ± 4% sched_debug.cpu#13.avg_idle
8965 ± 8% +50.4% 13480 ± 4% sched_debug.cpu#13.nr_load_updates
39834 ± 14% +126.0% 90034 ± 6% sched_debug.cpu#13.nr_switches
452.75 ± 22% -68.2% 144.00 ± 19% sched_debug.cpu#13.nr_uninterruptible
40599 ± 13% +130.4% 93534 ± 8% sched_debug.cpu#13.sched_count
15351 ± 16% +133.4% 35834 ± 7% sched_debug.cpu#13.sched_goidle
24825 ± 5% +103.7% 50576 ± 6% sched_debug.cpu#13.ttwu_count
6324 ± 7% -36.3% 4030 ± 4% sched_debug.cpu#13.ttwu_local
72443 ± 4% +17.9% 85429 ± 3% sched_debug.cpu#14.nr_switches
1972 ± 12% -93.8% 121.50 ± 19% sched_debug.cpu#14.nr_uninterruptible
72520 ± 4% +17.9% 85502 ± 3% sched_debug.cpu#14.sched_count
26452 ± 4% +27.1% 33620 ± 5% sched_debug.cpu#14.sched_goidle
10265 ± 4% -84.3% 1609 ± 17% sched_debug.cpu#14.ttwu_local
0.00 ± 0% +Inf% 3.25 ± 70% sched_debug.cpu#15.cpu_load[3]
0.00 ± 0% +Inf% 3.50 ± 82% sched_debug.cpu#15.cpu_load[4]
8890 ± 5% +55.5% 13824 ± 3% sched_debug.cpu#15.nr_load_updates
38824 ± 6% +112.9% 82640 ± 3% sched_debug.cpu#15.nr_switches
601.00 ± 19% -70.8% 175.25 ± 22% sched_debug.cpu#15.nr_uninterruptible
38868 ± 6% +112.8% 82715 ± 3% sched_debug.cpu#15.sched_count
14797 ± 6% +116.0% 31969 ± 4% sched_debug.cpu#15.sched_goidle
27494 ± 22% +86.8% 51373 ± 2% sched_debug.cpu#15.ttwu_count
6153 ± 4% -31.9% 4191 ± 3% sched_debug.cpu#15.ttwu_local
1078 ± 22% -110.4% -111.75 ±-23% sched_debug.cpu#2.nr_uninterruptible
12223 ± 5% -77.1% 2797 ± 5% sched_debug.cpu#2.ttwu_local
12798 ± 7% +37.1% 17543 ± 3% sched_debug.cpu#3.nr_load_updates
141.50 ± 65% -202.8% -145.50 ±-21% sched_debug.cpu#3.nr_uninterruptible
32902 ± 12% +87.2% 61593 ± 6% sched_debug.cpu#3.ttwu_count
8936 ± 25% -42.2% 5162 ± 7% sched_debug.cpu#3.ttwu_local
15863 ± 3% +17.3% 18602 ± 4% sched_debug.cpu#4.nr_load_updates
1631 ± 3% -108.5% -139.00 ±-31% sched_debug.cpu#4.nr_uninterruptible
9656 ± 6% -74.7% 2442 ± 7% sched_debug.cpu#4.ttwu_local
12597 ± 7% +40.9% 17748 ± 4% sched_debug.cpu#5.nr_load_updates
78454 ± 20% +33.0% 104376 ± 2% sched_debug.cpu#5.nr_switches
240.75 ± 34% -155.1% -132.75 ±-15% sched_debug.cpu#5.nr_uninterruptible
83088 ± 18% +29.5% 107594 ± 4% sched_debug.cpu#5.sched_count
32838 ± 10% +80.5% 59287 ± 2% sched_debug.cpu#5.ttwu_count
7323 ± 9% -33.0% 4907 ± 3% sched_debug.cpu#5.ttwu_local
15172 ± 1% +17.8% 17871 ± 3% sched_debug.cpu#6.nr_load_updates
1670 ± 15% -105.7% -96.00 ±-30% sched_debug.cpu#6.nr_uninterruptible
9007 ± 4% -77.3% 2043 ± 6% sched_debug.cpu#6.ttwu_local
12231 ± 8% +44.7% 17698 ± 3% sched_debug.cpu#7.nr_load_updates
66987 ± 12% +52.8% 102366 ± 2% sched_debug.cpu#7.nr_switches
211.25 ± 32% -182.1% -173.50 ±-16% sched_debug.cpu#7.nr_uninterruptible
68571 ± 12% +54.0% 105617 ± 2% sched_debug.cpu#7.sched_count
28991 ± 15% +42.4% 41283 ± 2% sched_debug.cpu#7.sched_goidle
32836 ± 12% +85.9% 61048 ± 7% sched_debug.cpu#7.ttwu_count
7504 ± 9% -33.0% 5031 ± 5% sched_debug.cpu#7.ttwu_local
12373 ± 3% +14.7% 14189 ± 4% sched_debug.cpu#8.nr_load_updates
72843 ± 3% +18.6% 86425 ± 3% sched_debug.cpu#8.nr_switches
1186 ± 8% -79.5% 243.50 ± 12% sched_debug.cpu#8.nr_uninterruptible
72933 ± 3% +18.6% 86507 ± 3% sched_debug.cpu#8.sched_count
27594 ± 3% +23.6% 34110 ± 4% sched_debug.cpu#8.sched_goidle
9400 ± 3% -76.5% 2205 ± 64% sched_debug.cpu#8.ttwu_local
9067 ± 7% +48.5% 13465 ± 3% sched_debug.cpu#9.nr_load_updates
39627 ± 7% +112.4% 84169 ± 2% sched_debug.cpu#9.nr_switches
540.25 ± 18% -73.4% 143.50 ± 22% sched_debug.cpu#9.nr_uninterruptible
39670 ± 7% +112.7% 84392 ± 2% sched_debug.cpu#9.sched_count
15298 ± 8% +114.4% 32795 ± 3% sched_debug.cpu#9.sched_goidle
32634 ± 29% +55.6% 50773 ± 0% sched_debug.cpu#9.ttwu_count
0.88 ±140% +88.4% 1.66 ± 65% sched_debug.rt_rq[1]:/.rt_time

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
lkp-ne04/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/f2fs/8K/400M/fsyncBeforeClose/16d/256fpd

commit:
1fda6948fbb2e756c8f7cabef40395cee2984298
9b7aaf11b8d61eeb87f3b99fb5ae59e61bb35f27

1fda6948fbb2e756 9b7aaf11b8d61eeb87f3b99fb5
---------------- --------------------------
%stddev %change %stddev
\ | \
4492452 ± 2% -6.4% 4206443 ± 3% fsmark.app_overhead
15381 ± 0% -33.1% 10293 ± 1% fsmark.time.involuntary_context_switches
21.25 ± 2% +20.0% 25.50 ± 1% fsmark.time.percent_of_cpu_this_job_got
546988 ± 0% +5.7% 577983 ± 0% fsmark.time.voluntary_context_switches
2.00 ± 35% -100.0% 0.00 ± 0% numa-numastat.node0.other_node
1446 ± 5% -37.5% 904.52 ± 7% uptime.idle
508.75 ± 4% -12.7% 444.25 ± 4% proc-vmstat.nr_alloc_batch
35281 ± 0% -42.0% 20449 ± 1% proc-vmstat.pgactivate
19547 ± 0% -10.9% 17420 ± 1% softirqs.BLOCK
21636 ± 0% +13.1% 24472 ± 0% softirqs.RCU
12276 ± 1% +4.1% 12774 ± 1% vmstat.system.cs
1126 ± 0% +23.0% 1385 ± 0% vmstat.system.in
15381 ± 0% -33.1% 10293 ± 1% time.involuntary_context_switches
21.25 ± 2% +20.0% 25.50 ± 1% time.percent_of_cpu_this_job_got
23.20 ± 0% +19.7% 27.78 ± 0% time.system_time
0.81 ± 6% +33.8% 1.09 ± 5% time.user_time
42276 ± 1% -10.8% 37701 ± 2% numa-vmstat.node0.nr_active_file
11195 ± 4% -12.5% 9798 ± 1% numa-vmstat.node0.nr_slab_reclaimable
1240 ± 9% -76.2% 295.75 ± 12% numa-vmstat.node0.numa_other
28794 ± 3% +26.1% 36296 ± 2% numa-vmstat.node1.nr_active_file
8316 ± 6% +16.3% 9669 ± 2% numa-vmstat.node1.nr_slab_reclaimable
216.25 ± 12% +3751.7% 8329 ± 97% latency_stats.avg.wait_on_page_bit.find_data_page.[f2fs].f2fs_find_entry.[f2fs].f2fs_lookup.[f2fs].lookup_real.__lookup_hash.filename_create.SyS_mkdir.entry_SYSCALL_64_fastpath
2832 ± 8% +219.4% 9045 ± 31% latency_stats.max.call_rwsem_down_read_failed.f2fs_wait_on_page_writeback.[f2fs].f2fs_wait_on_page_writeback.[f2fs].wait_on_node_pages_writeback.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
2460 ± 14% +305.5% 9977 ± 31% latency_stats.max.call_rwsem_down_write_failed.f2fs_submit_merged_bio.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
2851 ± 9% +264.0% 10379 ± 29% latency_stats.max.call_rwsem_down_write_failed.f2fs_submit_page_mbio.[f2fs].do_write_page.[f2fs].write_node_page.[f2fs].f2fs_write_node_page.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
216.25 ± 12% +3751.7% 8329 ± 97% latency_stats.max.wait_on_page_bit.find_data_page.[f2fs].f2fs_find_entry.[f2fs].f2fs_lookup.[f2fs].lookup_real.__lookup_hash.filename_create.SyS_mkdir.entry_SYSCALL_64_fastpath
216.25 ± 12% +3751.7% 8329 ± 97% latency_stats.sum.wait_on_page_bit.find_data_page.[f2fs].f2fs_find_entry.[f2fs].f2fs_lookup.[f2fs].lookup_real.__lookup_hash.filename_create.SyS_mkdir.entry_SYSCALL_64_fastpath
1.432e+08 ± 1% -84.3% 22481638 ± 1% cpuidle.C1-NHM.time
141844 ± 8% -63.1% 52346 ± 20% cpuidle.C1-NHM.usage
32847234 ± 4% -71.2% 9464251 ± 2% cpuidle.C1E-NHM.time
26193 ± 3% -65.0% 9156 ± 1% cpuidle.C1E-NHM.usage
5.112e+08 ± 0% +43.9% 7.354e+08 ± 0% cpuidle.C3-NHM.time
199235 ± 1% +47.9% 294717 ± 0% cpuidle.C3-NHM.usage
320518 ± 2% +25.4% 401876 ± 0% cpuidle.C6-NHM.usage
1.60 ± 1% +34.1% 2.14 ± 0% turbostat.%Busy
38.50 ± 1% +7.1% 41.25 ± 1% turbostat.Avg_MHz
2398 ± 0% -19.8% 1924 ± 0% turbostat.Bzy_MHz
18.74 ± 1% -65.2% 6.53 ± 0% turbostat.CPU%c1
35.40 ± 1% +63.4% 57.86 ± 0% turbostat.CPU%c3
44.26 ± 0% -24.4% 33.47 ± 1% turbostat.CPU%c6
20.05 ± 0% +140.1% 48.14 ± 3% turbostat.Pkg%pc3
1.49 ± 11% +39.5% 2.08 ± 11% turbostat.Pkg%pc6
181673 ± 2% -9.2% 164982 ± 2% numa-meminfo.node0.Active
169108 ± 1% -10.8% 150805 ± 2% numa-meminfo.node0.Active(file)
3080 ± 4% +23.1% 3791 ± 8% numa-meminfo.node0.KernelStack
44782 ± 4% -12.5% 39195 ± 1% numa-meminfo.node0.SReclaimable
131310 ± 3% +22.0% 160211 ± 3% numa-meminfo.node1.Active
115180 ± 3% +26.1% 145187 ± 2% numa-meminfo.node1.Active(file)
2702 ± 5% -25.4% 2016 ± 15% numa-meminfo.node1.KernelStack
474615 ± 2% +10.0% 521942 ± 2% numa-meminfo.node1.MemUsed
33264 ± 6% +16.3% 38676 ± 2% numa-meminfo.node1.SReclaimable
2845 ± 22% +62.7% 4630 ± 4% sched_debug.cfs_rq[0]:/.exec_clock
4147 ± 12% +32.1% 5476 ± 3% sched_debug.cfs_rq[0]:/.min_vruntime
37.50 ± 28% +83.3% 68.75 ± 39% sched_debug.cfs_rq[10]:/.load_avg
1.00 ±141% +1450.0% 15.50 ± 47% sched_debug.cfs_rq[10]:/.runnable_load_avg
-2773 ±-25% +52.5% -4231 ± -6% sched_debug.cfs_rq[10]:/.spread0
37.50 ± 28% +83.3% 68.75 ± 39% sched_debug.cfs_rq[10]:/.tg_load_avg_contrib
-2292 ±-30% +57.2% -3603 ±-16% sched_debug.cfs_rq[11]:/.spread0
832.60 ± 53% +304.8% 3370 ± 1% sched_debug.cfs_rq[12]:/.exec_clock
1458 ± 34% +152.8% 3686 ± 3% sched_debug.cfs_rq[12]:/.min_vruntime
-2793 ±-22% +48.2% -4139 ± -6% sched_debug.cfs_rq[13]:/.spread0
2009 ± 37% -61.2% 779.06 ± 15% sched_debug.cfs_rq[14]:/.exec_clock
2494 ± 27% -53.3% 1164 ± 11% sched_debug.cfs_rq[14]:/.min_vruntime
-1654 ±-54% +160.8% -4314 ± -3% sched_debug.cfs_rq[14]:/.spread0
741.45 ± 25% +169.0% 1994 ± 20% sched_debug.cfs_rq[15]:/.exec_clock
170.50 ±103% -72.7% 46.50 ± 35% sched_debug.cfs_rq[15]:/.load_avg
1359 ± 20% +114.3% 2912 ± 9% sched_debug.cfs_rq[15]:/.min_vruntime
170.50 ±103% -73.2% 45.75 ± 37% sched_debug.cfs_rq[15]:/.tg_load_avg_contrib
2566 ± 28% -45.0% 1412 ± 41% sched_debug.cfs_rq[1]:/.exec_clock
8.00 ± 23% +40.6% 11.25 ± 7% sched_debug.cfs_rq[1]:/.nr_spread_over
-771.37 ±-105% +306.4% -3135 ±-13% sched_debug.cfs_rq[1]:/.spread0
2342 ± 32% -55.7% 1038 ± 17% sched_debug.cfs_rq[2]:/.exec_clock
-827.09 ±-85% +255.9% -2943 ±-16% sched_debug.cfs_rq[2]:/.spread0
-2129 ±-11% +32.8% -2827 ±-14% sched_debug.cfs_rq[3]:/.spread0
-1589 ±-85% +149.2% -3960 ± -8% sched_debug.cfs_rq[4]:/.spread0
-2098 ±-32% +86.8% -3921 ±-17% sched_debug.cfs_rq[6]:/.spread0
-1676 ±-32% +77.2% -2971 ±-15% sched_debug.cfs_rq[7]:/.spread0
661.93 ± 20% +28.3% 849.17 ± 9% sched_debug.cfs_rq[8]:/.exec_clock
-2541 ±-37% +67.0% -4243 ± -6% sched_debug.cfs_rq[8]:/.spread0
559.96 ± 14% +26.0% 705.81 ± 10% sched_debug.cfs_rq[9]:/.exec_clock
-3118 ±-16% +31.7% -4107 ± -9% sched_debug.cfs_rq[9]:/.spread0
11.25 ± 92% -97.0% 0.33 ±141% sched_debug.cpu#0.cpu_load[0]
-2938 ± -3% -80.6% -569.75 ± -2% sched_debug.cpu#0.nr_uninterruptible
53209 ± 3% -12.6% 46509 ± 2% sched_debug.cpu#0.ttwu_count
14722 ± 2% -45.9% 7965 ± 8% sched_debug.cpu#0.ttwu_local
7849 ± 9% +29.9% 10195 ± 2% sched_debug.cpu#1.nr_load_updates
22636 ± 8% +19.4% 27022 ± 0% sched_debug.cpu#1.nr_switches
-595.50 ±-15% -77.0% -137.25 ± -7% sched_debug.cpu#1.nr_uninterruptible
23602 ± 11% +19.8% 28263 ± 5% sched_debug.cpu#1.sched_count
9753 ± 8% +25.6% 12251 ± 0% sched_debug.cpu#1.sched_goidle
3805 ± 11% -58.1% 1596 ± 13% sched_debug.cpu#1.ttwu_local
7063 ± 5% +21.6% 8589 ± 0% sched_debug.cpu#10.nr_load_updates
19925 ± 6% +19.4% 23800 ± 13% sched_debug.cpu#10.nr_switches
1036 ± 8% -85.1% 154.50 ± 14% sched_debug.cpu#10.nr_uninterruptible
20232 ± 8% +17.7% 23815 ± 13% sched_debug.cpu#10.sched_count
7808 ± 6% +39.1% 10862 ± 15% sched_debug.cpu#10.sched_goidle
2038 ± 3% -77.4% 460.50 ± 9% sched_debug.cpu#10.ttwu_local
6102 ± 8% +37.1% 8365 ± 0% sched_debug.cpu#11.nr_load_updates
617.25 ± 16% -75.6% 150.50 ± 9% sched_debug.cpu#11.nr_uninterruptible
2006 ± 6% -56.6% 870.25 ± 7% sched_debug.cpu#11.ttwu_local
7497 ± 7% +47.5% 11060 ± 2% sched_debug.cpu#12.nr_load_updates
21078 ± 9% +32.4% 27905 ± 10% sched_debug.cpu#12.nr_switches
994.00 ± 10% -82.8% 170.50 ± 2% sched_debug.cpu#12.nr_uninterruptible
21580 ± 12% +29.4% 27928 ± 10% sched_debug.cpu#12.sched_count
8237 ± 9% +42.6% 11747 ± 12% sched_debug.cpu#12.sched_goidle
9114 ± 7% +16.6% 10629 ± 10% sched_debug.cpu#12.ttwu_count
6373 ± 8% +29.6% 8259 ± 1% sched_debug.cpu#13.nr_load_updates
14413 ± 11% +45.4% 20958 ± 1% sched_debug.cpu#13.nr_switches
538.75 ± 10% -72.1% 150.50 ± 9% sched_debug.cpu#13.nr_uninterruptible
15126 ± 14% +38.7% 20972 ± 1% sched_debug.cpu#13.sched_count
5666 ± 13% +65.6% 9381 ± 1% sched_debug.cpu#13.sched_goidle
6399 ± 4% +53.5% 9825 ± 2% sched_debug.cpu#13.ttwu_count
2164 ± 3% -58.9% 890.00 ± 6% sched_debug.cpu#13.ttwu_local
1027 ± 5% -85.0% 154.00 ± 3% sched_debug.cpu#14.nr_uninterruptible
7727 ± 6% +24.0% 9585 ± 2% sched_debug.cpu#14.sched_goidle
2719 ± 20% -83.8% 441.50 ± 10% sched_debug.cpu#14.ttwu_local
6289 ± 6% +33.9% 8418 ± 5% sched_debug.cpu#15.nr_load_updates
13578 ± 7% +66.0% 22544 ± 6% sched_debug.cpu#15.nr_switches
626.25 ± 13% -79.7% 127.00 ± 12% sched_debug.cpu#15.nr_uninterruptible
13594 ± 7% +76.8% 24039 ± 10% sched_debug.cpu#15.sched_count
5280 ± 7% +90.4% 10052 ± 7% sched_debug.cpu#15.sched_goidle
6278 ± 7% +66.7% 10465 ± 8% sched_debug.cpu#15.ttwu_count
2049 ± 7% -45.0% 1126 ± 3% sched_debug.cpu#15.ttwu_local
-285.00 ±-32% -67.5% -92.50 ±-10% sched_debug.cpu#2.nr_uninterruptible
4026 ± 9% -49.2% 2047 ± 18% sched_debug.cpu#2.ttwu_local
8442 ± 6% +12.4% 9490 ± 6% sched_debug.cpu#3.nr_load_updates
-546.75 ±-13% -73.6% -144.25 ±-20% sched_debug.cpu#3.nr_uninterruptible
3751 ± 10% -57.4% 1599 ± 10% sched_debug.cpu#3.ttwu_local
38269 ± 20% -24.3% 28967 ± 8% sched_debug.cpu#4.nr_switches
-483.00 ± -4% -78.4% -104.25 ±-20% sched_debug.cpu#4.nr_uninterruptible
40064 ± 23% -27.6% 28987 ± 8% sched_debug.cpu#4.sched_count
16991 ± 22% -21.8% 13285 ± 9% sched_debug.cpu#4.sched_goidle
17083 ± 23% -31.6% 11685 ± 2% sched_debug.cpu#4.ttwu_count
6315 ± 52% -79.5% 1293 ± 14% sched_debug.cpu#4.ttwu_local
7934 ± 18% +28.5% 10194 ± 2% sched_debug.cpu#5.nr_load_updates
-548.00 ± -8% -71.9% -154.25 ± -3% sched_debug.cpu#5.nr_uninterruptible
4016 ± 10% -45.4% 2193 ± 37% sched_debug.cpu#5.ttwu_local
33750 ± 15% -18.8% 27416 ± 1% sched_debug.cpu#6.nr_switches
-496.25 ±-17% -79.8% -100.25 ±-16% sched_debug.cpu#6.nr_uninterruptible
34183 ± 15% -19.8% 27431 ± 1% sched_debug.cpu#6.sched_count
14905 ± 5% -24.2% 11303 ± 2% sched_debug.cpu#6.ttwu_count
3939 ± 10% -68.2% 1252 ± 18% sched_debug.cpu#6.ttwu_local
8448 ± 7% +18.4% 10006 ± 4% sched_debug.cpu#7.nr_load_updates
-561.25 ± -3% -75.4% -138.25 ± -9% sched_debug.cpu#7.nr_uninterruptible
12500 ± 6% +17.5% 14692 ± 11% sched_debug.cpu#7.ttwu_count
4091 ± 2% -47.6% 2144 ± 47% sched_debug.cpu#7.ttwu_local
7272 ± 6% +22.5% 8905 ± 0% sched_debug.cpu#8.nr_load_updates
1054 ± 8% -63.1% 389.25 ± 4% sched_debug.cpu#8.nr_uninterruptible
3888 ± 22% -66.0% 1322 ± 63% sched_debug.cpu#8.ttwu_local
6501 ± 8% +27.5% 8286 ± 1% sched_debug.cpu#9.nr_load_updates
13794 ± 4% +49.6% 20635 ± 4% sched_debug.cpu#9.nr_switches
572.00 ± 4% -72.6% 156.50 ± 14% sched_debug.cpu#9.nr_uninterruptible
13806 ± 4% +56.1% 21548 ± 10% sched_debug.cpu#9.sched_count
5400 ± 4% +71.8% 9276 ± 5% sched_debug.cpu#9.sched_goidle
2186 ± 2% -63.7% 794.25 ± 8% sched_debug.cpu#9.ttwu_local
57156 ± 7% -11.2% 50743 ± 8% sched_debug.ktime

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/mode/ipc:
wsm/hackbench/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1600%/process/pipe

commit:
1fda6948fbb2e756c8f7cabef40395cee2984298
9b7aaf11b8d61eeb87f3b99fb5ae59e61bb35f27

1fda6948fbb2e756 9b7aaf11b8d61eeb87f3b99fb5
---------------- --------------------------
%stddev %change %stddev
\ | \
120476 ± 0% -55.6% 53476 ± 2% hackbench.throughput
59658336 ± 19% +128.7% 1.364e+08 ± 11% hackbench.time.involuntary_context_switches
13406227 ± 11% -53.6% 6218943 ± 10% hackbench.time.minor_page_faults
1.89e+08 ± 14% +192.5% 5.529e+08 ± 12% hackbench.time.voluntary_context_switches
6473 ± 5% +19.8% 7754 ± 6% slabinfo.files_cache.active_objs
495518 ± 13% +52.9% 757653 ± 10% softirqs.RCU
2.69 ± 6% -54.9% 1.21 ± 3% turbostat.CPU%c1
1195988 ± 30% +70.3% 2036807 ± 17% meminfo.Committed_AS
97638 ± 30% +70.5% 166453 ± 17% meminfo.PageTables
422212 ± 15% +166.1% 1123695 ± 10% vmstat.system.cs
37971 ± 11% +307.4% 154684 ± 10% vmstat.system.in
380.00 ± 12% -54.2% 174.00 ± 10% time.file_system_outputs
59658336 ± 19% +128.7% 1.364e+08 ± 11% time.involuntary_context_switches
13406227 ± 11% -53.6% 6218943 ± 10% time.minor_page_faults
1.89e+08 ± 14% +192.5% 5.529e+08 ± 12% time.voluntary_context_switches
1.635e+08 ± 11% -62.9% 60612118 ± 9% proc-vmstat.numa_hit
1.635e+08 ± 11% -62.9% 60612118 ± 9% proc-vmstat.numa_local
88835997 ± 11% -62.8% 33080695 ± 9% proc-vmstat.pgalloc_dma32
77342277 ± 11% -62.8% 28767192 ± 9% proc-vmstat.pgalloc_normal
13627929 ± 11% -52.0% 6540912 ± 9% proc-vmstat.pgfault
1.661e+08 ± 11% -62.8% 61748704 ± 9% proc-vmstat.pgfree
1.151e+08 ± 8% -56.5% 50071736 ± 6% cpuidle.C1-NHM.time
1762819 ± 14% -59.1% 721352 ± 8% cpuidle.C1-NHM.usage
21760416 ± 7% -53.8% 10049639 ± 8% cpuidle.C1E-NHM.time
73018 ± 9% -58.3% 30479 ± 11% cpuidle.C1E-NHM.usage
20273243 ± 6% -57.3% 8653974 ± 4% cpuidle.C3-NHM.time
33620 ± 12% -51.9% 16179 ± 16% cpuidle.C3-NHM.usage
5676 ± 43% -74.5% 1445 ± 62% cpuidle.POLL.usage
33045 ±173% +876.6% 322737 ±171% latency_stats.avg.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execveat_common.SyS_execve.return_from_execve
23160 ±173% -79.4% 4779 ±101% latency_stats.avg.call_rwsem_down_write_failed.vma_adjust.__split_vma.split_vma.mprotect_fixup.SyS_mprotect.entry_SYSCALL_64_fastpath
1039123 ± 22% -82.0% 187519 ± 5% latency_stats.avg.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
2189 ± 0% +782.0% 19308 ± 80% latency_stats.avg.walk_component.path_lookupat.filename_lookup.user_path_at_empty.vfs_fstatat.SYSC_newstat.SyS_newstat.entry_SYSCALL_64_fastpath
1.61e+08 ± 15% +220.0% 5.15e+08 ± 12% latency_stats.hits.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
8087524 ± 14% -68.8% 2525419 ± 7% latency_stats.hits.pipe_wait.pipe_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
66090 ±173% +440.6% 357299 ±169% latency_stats.max.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execveat_common.SyS_execve.return_from_execve
208312 ±173% -86.7% 27797 ± 99% latency_stats.max.call_rwsem_down_write_failed.vma_adjust.__split_vma.split_vma.mprotect_fixup.SyS_mprotect.entry_SYSCALL_64_fastpath
2617 ± 0% +637.8% 19308 ± 80% latency_stats.max.walk_component.path_lookupat.filename_lookup.user_path_at_empty.vfs_fstatat.SYSC_newstat.SyS_newstat.entry_SYSCALL_64_fastpath
66091 ±173% +1857.8% 1293921 ±170% latency_stats.sum.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execveat_common.SyS_execve.return_from_execve
208361 ±173% -86.6% 27983 ± 98% latency_stats.sum.call_rwsem_down_write_failed.vma_adjust.__split_vma.split_vma.mprotect_fixup.SyS_mprotect.entry_SYSCALL_64_fastpath
4377941 ± 10% -75.8% 1059007 ± 9% latency_stats.sum.do_wait.SyS_wait4.entry_SYSCALL_64_fastpath
3.039e+10 ± 14% +209.0% 9.388e+10 ± 9% latency_stats.sum.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
5.46e+09 ± 13% -70.2% 1.628e+09 ± 5% latency_stats.sum.pipe_wait.pipe_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
4379 ± 0% +340.9% 19308 ± 80% latency_stats.sum.walk_component.path_lookupat.filename_lookup.user_path_at_empty.vfs_fstatat.SYSC_newstat.SyS_newstat.entry_SYSCALL_64_fastpath
4.76 ± 20% +150.9% 11.95 ± 25% perf-profile.cycles-pp.__account_scheduler_latency.enqueue_entity.enqueue_task_fair.activate_task.ttwu_do_activate
19.93 ± 26% +3.7% 20.66 ± -4% perf-profile.cycles-pp.__read_nocancel
2.69 ± 17% +338.6% 11.79 ± 22% perf-profile.cycles-pp.__schedule.schedule.pipe_wait.pipe_read.__vfs_read
8.59 ± 35% +180.4% 24.07 ± 23% perf-profile.cycles-pp.__vfs_read.vfs_read.sys_read.entry_SYSCALL_64_fastpath
9.67 ± 26% +16.0% 11.21 ± -8% perf-profile.cycles-pp.__vfs_read.vfs_read.sys_read.entry_SYSCALL_64_fastpath.__read_nocancel
16.29 ± 33% +148.3% 40.44 ± 17% perf-profile.cycles-pp.__vfs_write.vfs_write.sys_write.entry_SYSCALL_64_fastpath
8.40 ± 17% +215.1% 26.48 ± 21% perf-profile.cycles-pp.__wake_up_common.__wake_up_sync_key.pipe_write.__vfs_write.vfs_write
10.76 ± 15% +170.4% 29.09 ± 17% perf-profile.cycles-pp.__wake_up_sync_key.pipe_write.__vfs_write.vfs_write.sys_write
22.84 ± 26% +14.9% 26.24 ± -3% perf-profile.cycles-pp.__write_nocancel
1.05 ± 30% +416.9% 5.44 ± 29% perf-profile.cycles-pp._raw_spin_lock_irq.__schedule.schedule.pipe_wait.pipe_read
6.03 ± 18% +172.1% 16.39 ± 21% perf-profile.cycles-pp.activate_task.ttwu_do_activate.try_to_wake_up.default_wake_function.autoremove_wake_function
0.98 ± 24% -18.6% 0.80 ±-125% perf-profile.cycles-pp.atime_needs_update.touch_atime.pipe_read.__vfs_read.vfs_read
8.10 ± 18% +221.3% 26.02 ± 21% perf-profile.cycles-pp.autoremove_wake_function.__wake_up_common.__wake_up_sync_key.pipe_write.__vfs_write
2.08 ± 27% -23.4% 1.59 ±-62% perf-profile.cycles-pp.avc_has_perm.inode_has_perm.file_has_perm.selinux_file_permission.security_file_permission
3.16 ± 23% -72.5% 0.87 ±134% perf-profile.cycles-pp.copy_page_from_iter.pipe_write.__vfs_write.vfs_write.sys_write
1.18 ± 25% -54.2% 0.54 ± 98% perf-profile.cycles-pp.copy_page_from_iter_iovec.copy_page_from_iter.pipe_write.__vfs_write.vfs_write
3.99 ± 21% -66.9% 1.32 ± 95% perf-profile.cycles-pp.copy_page_to_iter.pipe_read.__vfs_read.vfs_read.sys_read
1.39 ± 24% -76.0% 0.33 ±168% perf-profile.cycles-pp.copy_page_to_iter_iovec.copy_page_to_iter.pipe_read.__vfs_read.vfs_read
1.80 ± 25% -75.3% 0.45 ±165% perf-profile.cycles-pp.copy_user_generic_string.copy_page_from_iter.pipe_write.__vfs_write.vfs_write
2.29 ± 25% -75.6% 0.56 ±154% perf-profile.cycles-pp.copy_user_generic_string.copy_page_to_iter.pipe_read.__vfs_read.vfs_read
0.79 ± 19% -74.9% 0.20 ±138% perf-profile.cycles-pp.cpu_startup_entry.start_secondary
8.08 ± 18% +221.0% 25.95 ± 21% perf-profile.cycles-pp.default_wake_function.autoremove_wake_function.__wake_up_common.__wake_up_sync_key.pipe_write
4.43 ± 20% +151.1% 11.12 ± 26% perf-profile.cycles-pp.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity.enqueue_task_fair
5.54 ± 18% +165.8% 14.74 ± 22% perf-profile.cycles-pp.enqueue_entity.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up
5.85 ± 18% +172.9% 15.97 ± 21% perf-profile.cycles-pp.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up.default_wake_function
18.31 ± 26% +3.7% 18.99 ± -5% perf-profile.cycles-pp.entry_SYSCALL_64_fastpath.__read_nocancel
21.32 ± 26% +14.7% 24.45 ± -4% perf-profile.cycles-pp.entry_SYSCALL_64_fastpath.__write_nocancel
1.49 ± 27% -78.7% 0.32 ±164% perf-profile.cycles-pp.file_has_perm.selinux_file_permission.security_file_permission.rw_verify_area.vfs_read
1.50 ± 27% -57.1% 0.65 ± 93% perf-profile.cycles-pp.file_has_perm.selinux_file_permission.security_file_permission.rw_verify_area.vfs_write
0.98 ± 26% -5.6% 0.93 ±-107% perf-profile.cycles-pp.file_update_time.pipe_write.__vfs_write.vfs_write.sys_write
2.32 ± 27% -60.4% 0.92 ± 98% perf-profile.cycles-pp.inode_has_perm.isra.28.file_has_perm.selinux_file_permission.security_file_permission.rw_verify_area
0.88 ± 28% -7.4% 0.81 ±-123% perf-profile.cycles-pp.mutex_lock.pipe_read.__vfs_read.vfs_read.sys_read
1.06 ± 26% -70.7% 0.31 ±160% perf-profile.cycles-pp.mutex_lock.pipe_write.__vfs_write.vfs_write.sys_write
0.65 ± 39% +467.0% 3.70 ± 61% perf-profile.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock.try_to_wake_up.default_wake_function.autoremove_wake_function
1.02 ± 28% +417.1% 5.30 ± 29% perf-profile.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irq.__schedule.schedule.pipe_wait
0.14 ± 37% +285.7% 0.54 ± 55% perf-profile.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irq.__schedule.schedule.prepare_exit_to_usermode
15.49 ± 2% +59.2% 24.67 ± 5% perf-profile.cycles-pp.pipe_read.__vfs_read.vfs_read.sys_read.entry_SYSCALL_64_fastpath
3.00 ± 18% +332.1% 12.95 ± 23% perf-profile.cycles-pp.pipe_wait.pipe_read.__vfs_read.vfs_read.sys_read
25.53 ± 6% +64.1% 41.90 ± 3% perf-profile.cycles-pp.pipe_write.__vfs_write.vfs_write.sys_write.entry_SYSCALL_64_fastpath
4.00 ± 20% +142.4% 9.70 ± 28% perf-profile.cycles-pp.print_context_stack.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity
4.40 ± 27% -12.4% 3.85 ±-25% perf-profile.cycles-pp.rw_verify_area.vfs_read.sys_read.entry_SYSCALL_64_fastpath.__read_nocancel
3.68 ± 27% -10.2% 3.31 ±-30% perf-profile.cycles-pp.rw_verify_area.vfs_write.sys_write.entry_SYSCALL_64_fastpath.__write_nocancel
4.45 ± 20% +151.2% 11.18 ± 26% perf-profile.cycles-pp.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity.enqueue_task_fair.activate_task
2.69 ± 18% +345.9% 11.99 ± 23% perf-profile.cycles-pp.schedule.pipe_wait.pipe_read.__vfs_read.vfs_read
4.14 ± 23% -69.5% 1.26 ± 99% perf-profile.cycles-pp.security_file_permission.rw_verify_area.vfs_read.sys_read.entry_SYSCALL_64_fastpath
3.39 ± 24% -73.3% 0.91 ±131% perf-profile.cycles-pp.security_file_permission.rw_verify_area.vfs_write.sys_write.entry_SYSCALL_64_fastpath
2.52 ± 27% -74.8% 0.64 ±137% perf-profile.cycles-pp.selinux_file_permission.security_file_permission.rw_verify_area.vfs_read.sys_read
2.67 ± 26% -75.7% 0.65 ±150% perf-profile.cycles-pp.selinux_file_permission.security_file_permission.rw_verify_area.vfs_write.sys_write
0.79 ± 19% -75.0% 0.20 ±138% perf-profile.cycles-pp.start_secondary
17.13 ± 26% +4.6% 17.91 ± -5% perf-profile.cycles-pp.sys_read.entry_SYSCALL_64_fastpath.__read_nocancel
27.27 ± 33% +76.9% 48.25 ± 22% perf-profile.cycles-pp.sys_write.entry_SYSCALL_64_fastpath
20.17 ± 26% +16.1% 23.43 ± -4% perf-profile.cycles-pp.sys_write.entry_SYSCALL_64_fastpath.__write_nocancel
1.27 ± 24% -77.0% 0.29 ±163% perf-profile.cycles-pp.touch_atime.pipe_read.__vfs_read.vfs_read.sys_read
8.42 ± 18% +207.7% 25.91 ± 21% perf-profile.cycles-pp.try_to_wake_up.default_wake_function.autoremove_wake_function.__wake_up_common.__wake_up_sync_key
6.32 ± 18% +171.5% 17.14 ± 21% perf-profile.cycles-pp.ttwu_do_activate.constprop.85.try_to_wake_up.default_wake_function.autoremove_wake_function.__wake_up_common
16.65 ± 36% +88.6% 31.40 ± 29% perf-profile.cycles-pp.vfs_read.sys_read.entry_SYSCALL_64_fastpath
15.66 ± 26% +5.7% 16.55 ± -6% perf-profile.cycles-pp.vfs_read.sys_read.entry_SYSCALL_64_fastpath.__read_nocancel
24.92 ± 35% +87.1% 46.62 ± 21% perf-profile.cycles-pp.vfs_write.sys_write.entry_SYSCALL_64_fastpath
18.65 ± 26% +17.5% 21.91 ± -4% perf-profile.cycles-pp.vfs_write.sys_write.entry_SYSCALL_64_fastpath.__write_nocancel
4675065 ± 10% +67.6% 7833279 ± 14% sched_debug.cfs_rq[0]:/.min_vruntime
1619 ± 11% -22.2% 1260 ± 11% sched_debug.cfs_rq[0]:/.tg_load_avg
59.25 ± 26% +88.2% 111.50 ± 34% sched_debug.cfs_rq[10]:/.load
5497564 ± 15% +53.5% 8436116 ± 21% sched_debug.cfs_rq[10]:/.min_vruntime
4291983 ± 10% +59.8% 6856517 ± 16% sched_debug.cfs_rq[11]:/.min_vruntime
-392469 ±-20% +167.7% -1050587 ±-53% sched_debug.cfs_rq[11]:/.spread0
5057030 ± 16% +40.6% 7109313 ± 15% sched_debug.cfs_rq[1]:/.min_vruntime
1542 ± 8% -17.5% 1272 ± 10% sched_debug.cfs_rq[1]:/.tg_load_avg
5441336 ± 11% +42.3% 7741417 ± 26% sched_debug.cfs_rq[2]:/.min_vruntime
1518 ± 7% -17.2% 1257 ± 9% sched_debug.cfs_rq[2]:/.tg_load_avg
4740752 ± 6% +76.2% 8355438 ± 22% sched_debug.cfs_rq[3]:/.min_vruntime
58.00 ± 13% +21.1% 70.25 ± 6% sched_debug.cfs_rq[3]:/.runnable_load_avg
1494 ± 9% -16.3% 1250 ± 10% sched_debug.cfs_rq[3]:/.tg_load_avg
5082181 ± 13% +56.0% 7929921 ± 22% sched_debug.cfs_rq[4]:/.min_vruntime
1474 ± 8% -14.8% 1256 ± 9% sched_debug.cfs_rq[4]:/.tg_load_avg
4100903 ± 11% +57.9% 6473827 ± 16% sched_debug.cfs_rq[5]:/.min_vruntime
-578478 ± -8% +142.6% -1403287 ±-28% sched_debug.cfs_rq[5]:/.spread0
1422 ± 8% -12.1% 1250 ± 9% sched_debug.cfs_rq[5]:/.tg_load_avg
5017377 ± 10% +67.6% 8409972 ± 14% sched_debug.cfs_rq[6]:/.min_vruntime
335870 ± 19% +56.5% 525673 ± 17% sched_debug.cfs_rq[6]:/.spread0
5485486 ± 15% +35.8% 7448926 ± 12% sched_debug.cfs_rq[7]:/.min_vruntime
803062 ± 46% -154.8% -440070 ±-157% sched_debug.cfs_rq[7]:/.spread0
5081728 ± 6% +75.7% 8928487 ± 22% sched_debug.cfs_rq[9]:/.min_vruntime
864030 ± 10% -34.7% 564006 ± 15% sched_debug.cpu#0.avg_idle
77.75 ± 16% +43.7% 111.75 ± 16% sched_debug.cpu#0.load
31.00 ± 47% +357.3% 141.75 ± 23% sched_debug.cpu#0.nr_running
11836165 ± 28% +124.7% 26592746 ± 13% sched_debug.cpu#0.nr_switches
11932896 ± 28% +123.7% 26689581 ± 13% sched_debug.cpu#0.sched_count
100724 ± 52% -70.6% 29596 ± 5% sched_debug.cpu#0.sched_goidle
8618675 ± 21% +134.6% 20218676 ± 16% sched_debug.cpu#0.ttwu_count
849313 ± 7% -27.3% 617355 ± 19% sched_debug.cpu#1.avg_idle
16.25 ± 57% +660.0% 123.50 ± 27% sched_debug.cpu#1.nr_running
10473305 ± 16% +162.6% 27502509 ± 12% sched_debug.cpu#1.nr_switches
42.75 ± 62% -112.3% -5.25 ±-107% sched_debug.cpu#1.nr_uninterruptible
10473343 ± 16% +162.6% 27502537 ± 12% sched_debug.cpu#1.sched_count
74792 ± 23% -50.6% 36968 ± 46% sched_debug.cpu#1.sched_goidle
7675323 ± 10% +188.8% 22163081 ± 16% sched_debug.cpu#1.ttwu_count
63.25 ± 34% +77.9% 112.50 ± 34% sched_debug.cpu#10.load
8.00 ± 39% +703.1% 64.25 ± 22% sched_debug.cpu#10.nr_running
10891329 ± 17% +162.7% 28610593 ± 17% sched_debug.cpu#10.nr_switches
10891376 ± 17% +162.7% 28610623 ± 17% sched_debug.cpu#10.sched_count
81573 ± 47% -70.7% 23924 ± 21% sched_debug.cpu#10.sched_goidle
8105340 ± 18% +158.9% 20983711 ± 15% sched_debug.cpu#10.ttwu_count
9.50 ± 15% +421.1% 49.50 ± 19% sched_debug.cpu#11.nr_running
10068993 ± 17% +142.2% 24385810 ± 21% sched_debug.cpu#11.nr_switches
10069032 ± 17% +142.2% 24385840 ± 21% sched_debug.cpu#11.sched_count
8730036 ± 16% +172.1% 23753845 ± 15% sched_debug.cpu#11.ttwu_count
20.25 ± 50% +414.8% 104.25 ± 26% sched_debug.cpu#2.nr_running
12541337 ± 18% +108.2% 26114953 ± 20% sched_debug.cpu#2.nr_switches
12541390 ± 18% +108.2% 26114983 ± 20% sched_debug.cpu#2.sched_count
112741 ± 59% -73.0% 30451 ± 6% sched_debug.cpu#2.sched_goidle
8644640 ± 15% +144.2% 21113672 ± 13% sched_debug.cpu#2.ttwu_count
4439780 ± 39% -46.3% 2384742 ± 17% sched_debug.cpu#2.ttwu_local
851543 ± 7% -34.4% 558588 ± 21% sched_debug.cpu#3.avg_idle
19.00 ±100% +367.1% 88.75 ± 21% sched_debug.cpu#3.nr_running
10126960 ± 11% +187.2% 29088085 ± 24% sched_debug.cpu#3.nr_switches
63.50 ± 57% -103.9% -2.50 ±-44% sched_debug.cpu#3.nr_uninterruptible
10126996 ± 11% +187.2% 29088117 ± 24% sched_debug.cpu#3.sched_count
83456 ± 15% -59.6% 33744 ± 33% sched_debug.cpu#3.sched_goidle
7998666 ± 13% +156.9% 20547508 ± 12% sched_debug.cpu#3.ttwu_count
17.00 ± 68% +514.7% 104.50 ± 20% sched_debug.cpu#4.nr_running
12100537 ± 31% +137.1% 28686880 ± 15% sched_debug.cpu#4.nr_switches
12100580 ± 31% +137.1% 28686907 ± 15% sched_debug.cpu#4.sched_count
93180 ± 38% -72.1% 25989 ± 14% sched_debug.cpu#4.sched_goidle
8789152 ± 25% +124.5% 19729121 ± 15% sched_debug.cpu#4.ttwu_count
11.00 ± 29% +663.6% 84.00 ± 21% sched_debug.cpu#5.nr_running
11572613 ± 32% +117.7% 25196368 ± 22% sched_debug.cpu#5.nr_switches
102.25 ± 28% -100.2% -0.25 ±-2304% sched_debug.cpu#5.nr_uninterruptible
11572652 ± 32% +117.7% 25196395 ± 22% sched_debug.cpu#5.sched_count
103273 ± 53% -73.0% 27895 ± 6% sched_debug.cpu#5.sched_goidle
9031893 ± 26% +152.0% 22760966 ± 13% sched_debug.cpu#5.ttwu_count
888372 ± 6% -27.7% 642576 ± 9% sched_debug.cpu#6.avg_idle
15.75 ± 63% +520.6% 97.75 ± 21% sched_debug.cpu#6.nr_running
10818260 ± 19% +142.0% 26184340 ± 11% sched_debug.cpu#6.nr_switches
-44.75 ±-25% -110.6% 4.75 ± 47% sched_debug.cpu#6.nr_uninterruptible
10818299 ± 19% +142.0% 26184367 ± 11% sched_debug.cpu#6.sched_count
80304 ± 57% -66.0% 27307 ± 25% sched_debug.cpu#6.sched_goidle
8181776 ± 15% +163.7% 21575648 ± 18% sched_debug.cpu#6.ttwu_count
848213 ± 9% -18.6% 690536 ± 17% sched_debug.cpu#7.avg_idle
13.75 ± 68% +556.4% 90.25 ± 30% sched_debug.cpu#7.nr_running
10935164 ± 16% +142.9% 26556814 ± 14% sched_debug.cpu#7.nr_switches
-117.50 ±-36% -105.5% 6.50 ± 83% sched_debug.cpu#7.nr_uninterruptible
10935198 ± 16% +142.9% 26556848 ± 14% sched_debug.cpu#7.sched_count
7660938 ± 13% +194.0% 22523956 ± 16% sched_debug.cpu#7.ttwu_count
840855 ± 9% -30.7% 582591 ± 28% sched_debug.cpu#8.avg_idle
12.25 ± 22% +575.5% 82.75 ± 29% sched_debug.cpu#8.nr_running
11334986 ± 8% +132.8% 26385100 ± 19% sched_debug.cpu#8.nr_switches
-126.00 ±-23% -101.2% 1.50 ±137% sched_debug.cpu#8.nr_uninterruptible
11335031 ± 8% +132.8% 26385132 ± 19% sched_debug.cpu#8.sched_count
67547 ± 7% -61.8% 25796 ± 6% sched_debug.cpu#8.sched_goidle
7950407 ± 7% +175.9% 21932052 ± 13% sched_debug.cpu#8.ttwu_count
3867102 ± 8% -24.1% 2934707 ± 16% sched_debug.cpu#8.ttwu_local
813902 ± 8% -27.2% 592609 ± 20% sched_debug.cpu#9.avg_idle
6.50 ± 35% +876.9% 63.50 ± 20% sched_debug.cpu#9.nr_running
10297288 ± 10% +177.4% 28563855 ± 20% sched_debug.cpu#9.nr_switches
40.50 ±126% -90.7% 3.75 ± 76% sched_debug.cpu#9.nr_uninterruptible
10297326 ± 10% +177.4% 28563889 ± 20% sched_debug.cpu#9.sched_count
8214771 ± 14% +161.3% 21461217 ± 13% sched_debug.cpu#9.ttwu_count
0.00 ± 65% +1.9e+05% 0.94 ±172% sched_debug.rt_rq[9]:/.rt_time

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/mode/ipc:
wsm/hackbench/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1600%/process/socket

commit:
1fda6948fbb2e756c8f7cabef40395cee2984298
9b7aaf11b8d61eeb87f3b99fb5ae59e61bb35f27

1fda6948fbb2e756 9b7aaf11b8d61eeb87f3b99fb5
---------------- --------------------------
%stddev %change %stddev
\ | \
61865 ± 0% -27.2% 45045 ± 0% hackbench.throughput
13303372 ± 10% +308.5% 54345911 ± 7% hackbench.time.involuntary_context_switches
7645296 ± 1% -30.3% 5328849 ± 10% hackbench.time.minor_page_faults
295.11 ± 1% -10.3% 264.80 ± 10% hackbench.time.user_time
74147910 ± 1% +208.4% 2.287e+08 ± 8% hackbench.time.voluntary_context_switches
1.47 ± 2% -28.4% 1.05 ± 2% turbostat.CPU%c1
771.00 ± 4% +49.1% 1149 ± 11% vmstat.procs.r
152893 ± 2% +207.3% 469896 ± 8% vmstat.system.cs
20470 ± 0% +226.0% 66726 ± 7% vmstat.system.in
12228 ± 2% -14.3% 10476 ± 11% slabinfo.kmalloc-128.active_objs
12332 ± 2% -12.9% 10740 ± 10% slabinfo.kmalloc-128.num_objs
208.00 ± 13% +61.5% 336.00 ± 15% slabinfo.kmem_cache_node.active_objs
208.00 ± 13% +61.5% 336.00 ± 15% slabinfo.kmem_cache_node.num_objs
210.00 ± 1% -30.5% 146.00 ± 10% time.file_system_outputs
13303372 ± 10% +308.5% 54345911 ± 7% time.involuntary_context_switches
7645296 ± 1% -30.3% 5328849 ± 10% time.minor_page_faults
74147910 ± 1% +208.4% 2.287e+08 ± 8% time.voluntary_context_switches
47866591 ± 2% -32.9% 32104345 ± 5% cpuidle.C1-NHM.time
881342 ± 13% -53.0% 413811 ± 4% cpuidle.C1-NHM.usage
25892984 ± 13% -30.9% 17890476 ± 16% cpuidle.C1E-NHM.time
113902 ± 5% -40.9% 67319 ± 9% cpuidle.C1E-NHM.usage
11513219 ± 14% -27.3% 8364874 ± 7% cpuidle.C3-NHM.time
27055 ± 12% -40.4% 16112 ± 18% cpuidle.C3-NHM.usage
2660 ± 23% -69.0% 825.00 ± 42% cpuidle.POLL.usage
8725926 ± 1% -28.0% 6284093 ± 9% proc-vmstat.numa_hit
8725926 ± 1% -28.0% 6284093 ± 9% proc-vmstat.numa_local
4760 ± 0% +7.0% 5092 ± 4% proc-vmstat.pgactivate
6785126 ± 1% -26.6% 4980509 ± 9% proc-vmstat.pgalloc_dma32
5833593 ± 1% -26.2% 4303142 ± 9% proc-vmstat.pgalloc_normal
7798582 ± 1% -29.0% 5537265 ± 9% proc-vmstat.pgfault
12467424 ± 2% -26.3% 9188443 ± 9% proc-vmstat.pgfree
7557548 ±141% +447.5% 41374481 ±148% latency_stats.avg.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
20617833 ± 2% -86.6% 2761701 ± 11% latency_stats.hits.sock_alloc_send_pskb.unix_stream_sendmsg.sock_sendmsg.sock_write_iter.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
30957927 ± 4% +537.4% 1.973e+08 ± 8% latency_stats.hits.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
2.50 ±173% +1.9e+06% 48667 ±163% latency_stats.max.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execveat_common.SyS_execve.return_from_execve
8488903 ±141% +400.3% 42473837 ±143% latency_stats.max.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
3.25 ±173% +2.5e+06% 81849 ±167% latency_stats.sum.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execveat_common.SyS_execve.return_from_execve
15102599 ±141% +249.6% 52805181 ±113% latency_stats.sum.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
1911465 ± 11% +241.9% 6534721 ± 15% latency_stats.sum.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
3.897e+10 ± 1% -88.5% 4.487e+09 ± 11% latency_stats.sum.sock_alloc_send_pskb.unix_stream_sendmsg.sock_sendmsg.sock_write_iter.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
7698 ± 8% +4.3e+05% 32871122 ±173% latency_stats.sum.wait_on_page_bit.filemap_fdatawait_range.filemap_write_and_wait_range.nfs4_file_fsync.[nfsv4].vfs_fsync_range.vfs_fsync.nfs4_file_flush.[nfsv4].filp_close.do_dup2.SyS_dup2.entry_SYSCALL_64_fastpath
0.12 ± 10% +8010.4% 9.73 ± 61% perf-profile.cycles-pp.__account_scheduler_latency.enqueue_entity.enqueue_task_fair.activate_task.ttwu_do_activate
0.01 ±103% +38140.0% 4.78 ± 66% perf-profile.cycles-pp.__kernel_text_address.print_context_stack.dump_trace.save_stack_trace_tsk.__account_scheduler_latency
0.01 ± 57% +11900.0% 0.90 ± 81% perf-profile.cycles-pp.__kmalloc_node_track_caller.__kmalloc_reserve.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb
0.03 ± 64% +3872.7% 1.09 ± 81% perf-profile.cycles-pp.__kmalloc_reserve.isra.31.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb.unix_stream_sendmsg
0.00 ± -1% +Inf% 0.62 ± 89% perf-profile.cycles-pp.__schedule.schedule.prepare_exit_to_usermode.syscall_return_slowpath.int_ret_from_sys_call
0.00 ± -1% +Inf% 6.31 ± 63% perf-profile.cycles-pp.__schedule.schedule.schedule_timeout.unix_stream_read_generic.unix_stream_recvmsg
46.68 ± 0% -54.0% 21.49 ± 65% perf-profile.cycles-pp.__vfs_read.vfs_read.sys_read.entry_SYSCALL_64_fastpath
0.01 ± 0% +1.9e+05% 18.93 ± 56% perf-profile.cycles-pp.__wake_up_common.__wake_up_sync_key.sock_def_readable.unix_stream_sendmsg.sock_sendmsg
0.01 ± 0% +2.1e+05% 20.80 ± 48% perf-profile.cycles-pp.__wake_up_sync_key.sock_def_readable.unix_stream_sendmsg.sock_sendmsg.sock_write_iter
0.00 ± -1% +Inf% 2.85 ± 71% perf-profile.cycles-pp._raw_spin_lock.try_to_wake_up.default_wake_function.autoremove_wake_function.__wake_up_common
0.00 ± -1% +Inf% 2.94 ± 68% perf-profile.cycles-pp._raw_spin_lock_irq.__schedule.schedule.schedule_timeout.unix_stream_read_generic
0.00 ± -1% +Inf% 0.63 ± 78% perf-profile.cycles-pp._raw_spin_lock_irqsave.__wake_up_sync_key.sock_def_readable.unix_stream_sendmsg.sock_sendmsg
0.28 ± 12% +4379.3% 12.43 ± 58% perf-profile.cycles-pp.activate_task.ttwu_do_activate.try_to_wake_up.default_wake_function.autoremove_wake_function
0.01 ± 0% +1.9e+05% 18.78 ± 56% perf-profile.cycles-pp.autoremove_wake_function.__wake_up_common.__wake_up_sync_key.sock_def_readable.unix_stream_sendmsg
14.30 ± 2% -47.3% 7.53 ± 39% perf-profile.cycles-pp.consume_skb.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter
0.01 ± 0% +1.9e+05% 18.68 ± 56% perf-profile.cycles-pp.default_wake_function.autoremove_wake_function.__wake_up_common.__wake_up_sync_key.sock_def_readable
0.08 ± 23% +11309.4% 9.13 ± 61% perf-profile.cycles-pp.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity.enqueue_task_fair
0.21 ± 10% +5245.9% 11.36 ± 59% perf-profile.cycles-pp.enqueue_entity.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up
0.24 ± 10% +5022.1% 12.17 ± 58% perf-profile.cycles-pp.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up.default_wake_function
0.00 ± -1% +Inf% 0.66 ± 89% perf-profile.cycles-pp.int_ret_from_sys_call
0.00 ± -1% +Inf% 1.49 ± 66% perf-profile.cycles-pp.is_module_text_address.__kernel_text_address.print_context_stack.dump_trace.save_stack_trace_tsk
0.00 ± -1% +Inf% 3.03 ± 71% perf-profile.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock.try_to_wake_up.default_wake_function.autoremove_wake_function
0.00 ± -1% +Inf% 0.57 ± 78% perf-profile.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irq.__schedule.schedule.prepare_exit_to_usermode
0.00 ± -1% +Inf% 2.91 ± 68% perf-profile.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irq.__schedule.schedule.schedule_timeout
0.00 ± -1% +Inf% 0.80 ± 53% perf-profile.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irqsave.__wake_up_sync_key.sock_def_readable.unix_stream_sendmsg
0.00 ± -1% +Inf% 0.61 ± 89% perf-profile.cycles-pp.prepare_exit_to_usermode.syscall_return_slowpath.int_ret_from_sys_call
0.06 ± 31% +14226.1% 8.24 ± 62% perf-profile.cycles-pp.print_context_stack.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity
0.08 ± 23% +11350.0% 9.16 ± 61% perf-profile.cycles-pp.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity.enqueue_task_fair.activate_task
0.00 ± -1% +Inf% 0.58 ± 90% perf-profile.cycles-pp.schedule.prepare_exit_to_usermode.syscall_return_slowpath.int_ret_from_sys_call
0.00 ± -1% +Inf% 6.34 ± 63% perf-profile.cycles-pp.schedule.schedule_timeout.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg
0.00 ± -1% +Inf% 6.48 ± 63% perf-profile.cycles-pp.schedule_timeout.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter
11.76 ± 3% -37.8% 7.32 ± 37% perf-profile.cycles-pp.sock_alloc_send_pskb.unix_stream_sendmsg.sock_sendmsg.sock_write_iter.__vfs_write
0.03 ± 44% +86980.0% 21.77 ± 45% perf-profile.cycles-pp.sock_def_readable.unix_stream_sendmsg.sock_sendmsg.sock_write_iter.__vfs_write
45.88 ± 0% -22.1% 35.72 ± 10% perf-profile.cycles-pp.sock_read_iter.__vfs_read.vfs_read.sys_read.entry_SYSCALL_64_fastpath
44.91 ± 0% -21.9% 35.06 ± 10% perf-profile.cycles-pp.sock_recvmsg.sock_read_iter.__vfs_read.vfs_read.sys_read
35.31 ± 0% +20.9% 42.71 ± 8% perf-profile.cycles-pp.sock_sendmsg.sock_write_iter.__vfs_write.vfs_write.sys_write
35.96 ± 0% +20.7% 43.40 ± 7% perf-profile.cycles-pp.sock_write_iter.__vfs_write.vfs_write.sys_write.entry_SYSCALL_64_fastpath
55.69 ± 0% -55.7% 24.66 ± 66% perf-profile.cycles-pp.sys_read.entry_SYSCALL_64_fastpath
0.00 ± -1% +Inf% 0.63 ± 90% perf-profile.cycles-pp.syscall_return_slowpath.int_ret_from_sys_call
0.55 ± 17% +3347.5% 18.88 ± 55% perf-profile.cycles-pp.try_to_wake_up.default_wake_function.autoremove_wake_function.__wake_up_common.__wake_up_sync_key
0.31 ± 15% +4141.8% 12.94 ± 58% perf-profile.cycles-pp.ttwu_do_activate.constprop.85.try_to_wake_up.default_wake_function.autoremove_wake_function.__wake_up_common
41.78 ± 0% -21.5% 32.80 ± 10% perf-profile.cycles-pp.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter.__vfs_read
43.30 ± 0% -21.8% 33.88 ± 10% perf-profile.cycles-pp.unix_stream_recvmsg.sock_recvmsg.sock_read_iter.__vfs_read.vfs_read
33.20 ± 0% +23.4% 40.96 ± 9% perf-profile.cycles-pp.unix_stream_sendmsg.sock_sendmsg.sock_write_iter.__vfs_write.vfs_write
54.04 ± 0% -55.6% 23.97 ± 66% perf-profile.cycles-pp.vfs_read.sys_read.entry_SYSCALL_64_fastpath
3851227 ± 4% +15.4% 4443109 ± 6% sched_debug.cfs_rq[0]:/.min_vruntime
3941688 ± 5% +15.7% 4562136 ± 3% sched_debug.cfs_rq[10]:/.min_vruntime
3969451 ± 5% +11.4% 4421502 ± 4% sched_debug.cfs_rq[11]:/.min_vruntime
3926203 ± 3% +14.4% 4491088 ± 8% sched_debug.cfs_rq[1]:/.min_vruntime
1863 ± 7% -21.6% 1460 ± 10% sched_debug.cfs_rq[1]:/.tg_load_avg
189.00 ± 7% -43.4% 107.00 ± 12% sched_debug.cfs_rq[2]:/.load_avg
3953398 ± 5% +15.2% 4552813 ± 10% sched_debug.cfs_rq[2]:/.min_vruntime
97.25 ± 12% -27.8% 70.25 ± 9% sched_debug.cfs_rq[2]:/.runnable_load_avg
1981 ± 8% -24.1% 1503 ± 10% sched_debug.cfs_rq[2]:/.tg_load_avg
180.50 ± 12% -40.3% 107.75 ± 11% sched_debug.cfs_rq[2]:/.tg_load_avg_contrib
1015 ± 3% -14.8% 865.25 ± 9% sched_debug.cfs_rq[2]:/.util_avg
2001 ± 6% -23.7% 1527 ± 13% sched_debug.cfs_rq[3]:/.tg_load_avg
184.75 ± 4% -35.2% 119.75 ± 18% sched_debug.cfs_rq[4]:/.load_avg
3929671 ± 6% +11.7% 4388297 ± 4% sched_debug.cfs_rq[4]:/.min_vruntime
2034 ± 7% -24.5% 1536 ± 11% sched_debug.cfs_rq[4]:/.tg_load_avg
174.25 ± 9% -31.3% 119.75 ± 18% sched_debug.cfs_rq[4]:/.tg_load_avg_contrib
195.00 ± 25% -36.5% 123.75 ± 11% sched_debug.cfs_rq[5]:/.load_avg
2027 ± 6% -23.5% 1550 ± 10% sched_debug.cfs_rq[5]:/.tg_load_avg
188.75 ± 25% -34.4% 123.75 ± 11% sched_debug.cfs_rq[5]:/.tg_load_avg_contrib
167.25 ± 1% -25.6% 124.50 ± 17% sched_debug.cfs_rq[6]:/.load_avg
3925342 ± 4% +17.3% 4603806 ± 6% sched_debug.cfs_rq[6]:/.min_vruntime
95.00 ± 6% -25.8% 70.50 ± 18% sched_debug.cfs_rq[6]:/.runnable_load_avg
2002 ± 6% -20.2% 1597 ± 10% sched_debug.cfs_rq[6]:/.tg_load_avg
161.50 ± 2% -22.9% 124.50 ± 17% sched_debug.cfs_rq[6]:/.tg_load_avg_contrib
146.50 ± 10% -20.8% 116.00 ± 10% sched_debug.cfs_rq[7]:/.load_avg
4006338 ± 4% +18.1% 4732367 ± 9% sched_debug.cfs_rq[7]:/.min_vruntime
79.00 ± 8% -16.8% 65.75 ± 5% sched_debug.cfs_rq[7]:/.runnable_load_avg
1980 ± 5% -18.4% 1616 ± 11% sched_debug.cfs_rq[7]:/.tg_load_avg
143.75 ± 9% -18.8% 116.75 ± 10% sched_debug.cfs_rq[7]:/.tg_load_avg_contrib
0.00 ± 0% +2e+13% 200550 ±101% sched_debug.cfs_rq[8]:/.MIN_vruntime
0.00 ± 0% +2e+13% 200550 ±101% sched_debug.cfs_rq[8]:/.max_vruntime
3977925 ± 5% +17.4% 4670793 ± 10% sched_debug.cfs_rq[8]:/.min_vruntime
1981 ± 5% -16.1% 1663 ± 10% sched_debug.cfs_rq[8]:/.tg_load_avg
1971 ± 4% -14.6% 1682 ± 9% sched_debug.cfs_rq[9]:/.tg_load_avg
30.00 ± 7% +55.0% 46.50 ± 6% sched_debug.cpu#0.nr_running
3254240 ± 5% +240.2% 11071073 ± 7% sched_debug.cpu#0.nr_switches
3351054 ± 5% +233.3% 11167494 ± 7% sched_debug.cpu#0.sched_count
34300 ± 21% -39.6% 20708 ± 20% sched_debug.cpu#0.sched_goidle
2866474 ± 4% +240.3% 9754459 ± 2% sched_debug.cpu#0.ttwu_count
2546182 ± 4% -41.6% 1486788 ± 5% sched_debug.cpu#0.ttwu_local
697842 ± 5% -18.6% 568211 ± 15% sched_debug.cpu#1.avg_idle
24.50 ± 16% +44.9% 35.50 ± 11% sched_debug.cpu#1.nr_running
4994889 ± 59% +188.3% 14400709 ± 18% sched_debug.cpu#1.nr_switches
4994904 ± 59% +188.3% 14400741 ± 18% sched_debug.cpu#1.sched_count
3811656 ± 40% +179.5% 10654366 ± 10% sched_debug.cpu#1.ttwu_count
82.50 ± 8% -11.8% 72.75 ± 8% sched_debug.cpu#10.cpu_load[2]
82.50 ± 8% -12.1% 72.50 ± 9% sched_debug.cpu#10.cpu_load[3]
3389364 ± 4% +260.7% 12226143 ± 11% sched_debug.cpu#10.nr_switches
3389386 ± 4% +260.7% 12226174 ± 11% sched_debug.cpu#10.sched_count
2932787 ± 3% +238.6% 9929944 ± 4% sched_debug.cpu#10.ttwu_count
100.25 ± 29% -35.2% 65.00 ± 7% sched_debug.cpu#11.load
3763818 ± 20% +212.3% 11753872 ± 3% sched_debug.cpu#11.nr_switches
3763834 ± 20% +212.3% 11753899 ± 3% sched_debug.cpu#11.sched_count
32782 ± 17% -48.2% 16996 ± 5% sched_debug.cpu#11.sched_goidle
3141135 ± 12% +223.8% 10171787 ± 6% sched_debug.cpu#11.ttwu_count
2802165 ± 11% -34.5% 1835136 ± 14% sched_debug.cpu#11.ttwu_local
94.50 ± 12% -27.0% 69.00 ± 6% sched_debug.cpu#2.cpu_load[0]
95.75 ± 12% -28.2% 68.75 ± 7% sched_debug.cpu#2.cpu_load[1]
96.25 ± 12% -28.8% 68.50 ± 7% sched_debug.cpu#2.cpu_load[2]
95.75 ± 11% -28.7% 68.25 ± 8% sched_debug.cpu#2.cpu_load[3]
93.50 ± 10% -27.0% 68.25 ± 8% sched_debug.cpu#2.cpu_load[4]
17.00 ± 29% +101.5% 34.25 ± 13% sched_debug.cpu#2.nr_running
6789528 ± 54% +82.2% 12370279 ± 8% sched_debug.cpu#2.nr_switches
6789547 ± 54% +82.2% 12370304 ± 8% sched_debug.cpu#2.sched_count
67044 ± 50% -73.9% 17526 ± 18% sched_debug.cpu#2.sched_goidle
4696974 ± 42% +98.6% 9327473 ± 1% sched_debug.cpu#2.ttwu_count
4325255 ± 44% -66.5% 1449741 ± 3% sched_debug.cpu#2.ttwu_local
716240 ± 14% -25.5% 533320 ± 26% sched_debug.cpu#3.avg_idle
3313498 ± 3% +277.7% 12515262 ± 2% sched_debug.cpu#3.nr_switches
3313518 ± 3% +277.7% 12515289 ± 2% sched_debug.cpu#3.sched_count
37743 ± 12% -57.5% 16029 ± 13% sched_debug.cpu#3.sched_goidle
2870327 ± 4% +231.2% 9507577 ± 5% sched_debug.cpu#3.ttwu_count
2550329 ± 4% -39.7% 1537542 ± 2% sched_debug.cpu#3.ttwu_local
17.00 ± 26% +57.4% 26.75 ± 23% sched_debug.cpu#4.nr_running
3253231 ± 4% +298.0% 12948659 ± 20% sched_debug.cpu#4.nr_switches
3253252 ± 4% +298.0% 12948687 ± 20% sched_debug.cpu#4.sched_count
2935924 ± 4% +246.0% 10157457 ± 6% sched_debug.cpu#4.ttwu_count
4840472 ± 57% +161.6% 12660891 ± 16% sched_debug.cpu#5.nr_switches
4840492 ± 57% +161.6% 12660919 ± 16% sched_debug.cpu#5.sched_count
3697043 ± 37% +188.3% 10657760 ± 8% sched_debug.cpu#5.ttwu_count
93.00 ± 9% -23.7% 71.00 ± 16% sched_debug.cpu#6.cpu_load[2]
93.75 ± 9% -24.5% 70.75 ± 16% sched_debug.cpu#6.cpu_load[3]
93.75 ± 8% -24.5% 70.75 ± 16% sched_debug.cpu#6.cpu_load[4]
3206562 ± 5% +246.8% 11120071 ± 9% sched_debug.cpu#6.nr_switches
3206583 ± 5% +246.8% 11120099 ± 9% sched_debug.cpu#6.sched_count
28907 ± 19% -49.1% 14708 ± 29% sched_debug.cpu#6.sched_goidle
2874452 ± 3% +241.7% 9820603 ± 2% sched_debug.cpu#6.ttwu_count
2556360 ± 4% -39.6% 1544429 ± 1% sched_debug.cpu#6.ttwu_local
79.25 ± 8% -17.4% 65.50 ± 5% sched_debug.cpu#7.cpu_load[0]
79.00 ± 8% -16.8% 65.75 ± 5% sched_debug.cpu#7.cpu_load[1]
79.00 ± 9% -16.5% 66.00 ± 4% sched_debug.cpu#7.cpu_load[2]
79.00 ± 8% -16.1% 66.25 ± 4% sched_debug.cpu#7.cpu_load[3]
78.75 ± 7% -15.6% 66.50 ± 4% sched_debug.cpu#7.cpu_load[4]
3665057 ± 16% +252.6% 12923128 ± 15% sched_debug.cpu#7.nr_switches
3665077 ± 16% +252.6% 12923157 ± 15% sched_debug.cpu#7.sched_count
3051381 ± 9% +230.3% 10079599 ± 6% sched_debug.cpu#7.ttwu_count
3852209 ± 15% +220.3% 12339721 ± 5% sched_debug.cpu#8.nr_switches
-6.25 ±-44% -124.0% 1.50 ±152% sched_debug.cpu#8.nr_uninterruptible
3852229 ± 15% +220.3% 12339747 ± 5% sched_debug.cpu#8.sched_count
69242 ± 63% -82.2% 12336 ± 16% sched_debug.cpu#8.sched_goidle
3226535 ± 10% +198.3% 9623833 ± 1% sched_debug.cpu#8.ttwu_count
2836002 ± 10% -42.6% 1627278 ± 5% sched_debug.cpu#8.ttwu_local
3409492 ± 5% +269.5% 12599488 ± 2% sched_debug.cpu#9.nr_switches
-3.75 ±-189% -253.3% 5.75 ± 18% sched_debug.cpu#9.nr_uninterruptible
3409515 ± 5% +269.5% 12599511 ± 2% sched_debug.cpu#9.sched_count
27958 ± 16% -52.1% 13392 ± 14% sched_debug.cpu#9.sched_goidle
2904962 ± 4% +229.1% 9559191 ± 5% sched_debug.cpu#9.ttwu_count
2600209 ± 4% -38.9% 1589299 ± 1% sched_debug.cpu#9.ttwu_local
2.02 ±101% -100.0% 0.00 ± 1% sched_debug.rt_rq[8]:/.rt_time

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/mode/ipc:
xps2/hackbench/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1600%/threads/socket

commit:
1fda6948fbb2e756c8f7cabef40395cee2984298
9b7aaf11b8d61eeb87f3b99fb5ae59e61bb35f27

1fda6948fbb2e756 9b7aaf11b8d61eeb87f3b99fb5
---------------- --------------------------
%stddev %change %stddev
\ | \
36995 ± 0% -34.0% 24408 ± 3% hackbench.throughput
608.69 ± 0% +2.9% 626.30 ± 0% hackbench.time.elapsed_time
608.69 ± 0% +2.9% 626.30 ± 0% hackbench.time.elapsed_time.max
9328243 ± 3% +505.8% 56514733 ± 5% hackbench.time.involuntary_context_switches
246449 ± 11% -27.8% 178028 ± 2% hackbench.time.minor_page_faults
4329 ± 11% +11.0% 4804 ± 0% hackbench.time.system_time
205.68 ± 11% -20.9% 162.64 ± 1% hackbench.time.user_time
45062029 ± 10% +344.3% 2.002e+08 ± 4% hackbench.time.voluntary_context_switches
351511 ± 9% +26.6% 444910 ± 1% softirqs.RCU
40853 ± 30% +50.5% 61488 ± 3% slabinfo.kmalloc-256.active_objs
36884 ± 30% +53.3% 56555 ± 3% slabinfo.kmalloc-512.active_objs
1247088 ± 7% -16.1% 1046673 ± 2% proc-vmstat.numa_hit
1247088 ± 7% -16.1% 1046673 ± 2% proc-vmstat.numa_local
426221 ± 5% -15.4% 360479 ± 1% proc-vmstat.pgfault
0.41 ± 26% -45.1% 0.22 ± 3% turbostat.CPU%c1
0.12 ± 94% -72.3% 0.03 ± 13% turbostat.CPU%c3
5.93 ±166% -96.8% 0.19 ± 2% turbostat.CPU%c6
539.75 ± 14% +85.8% 1003 ± 4% vmstat.procs.r
96329 ± 8% +330.7% 414895 ± 4% vmstat.system.cs
13860 ± 6% +334.3% 60194 ± 5% vmstat.system.in
180.00 ± 11% -27.8% 130.00 ± 2% time.file_system_outputs
9328243 ± 3% +505.8% 56514733 ± 5% time.involuntary_context_switches
246449 ± 11% -27.8% 178028 ± 2% time.minor_page_faults
205.68 ± 11% -20.9% 162.64 ± 1% time.user_time
45062029 ± 10% +344.3% 2.002e+08 ± 4% time.voluntary_context_switches
9143136 ± 5% -36.8% 5777705 ± 3% cpuidle.C1-NHM.time
488261 ± 5% -40.5% 290378 ± 5% cpuidle.C1-NHM.usage
1089103 ± 9% -55.2% 488110 ± 11% cpuidle.C1E-NHM.time
17092 ± 2% -60.6% 6727 ± 16% cpuidle.C1E-NHM.usage
4120777 ± 69% -59.8% 1658190 ± 8% cpuidle.C3-NHM.time
4586 ± 35% -40.5% 2727 ± 7% cpuidle.C3-NHM.usage
3e+08 ±162% -95.0% 14960207 ± 1% cpuidle.C6-NHM.time
37433 ±139% -85.9% 5273 ± 4% cpuidle.C6-NHM.usage
1643 ± 26% -56.1% 722.00 ± 40% cpuidle.POLL.usage
11367875 ± 12% -88.8% 1273092 ± 7% latency_stats.hits.sock_alloc_send_pskb.unix_stream_sendmsg.sock_sendmsg.sock_write_iter.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
20123002 ± 8% +784.6% 1.78e+08 ± 4% latency_stats.hits.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
10424 ± 85% -79.5% 2135 ±162% latency_stats.max.call_rwsem_down_read_failed.do_exit.SyS_exit.entry_SYSCALL_64_fastpath
65139742 ±103% -77.8% 14446152 ±141% latency_stats.max.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
191484 ± 86% -87.3% 24322 ±154% latency_stats.sum.call_rwsem_down_read_failed.do_exit.SyS_exit.entry_SYSCALL_64_fastpath
35201 ± 61% -88.3% 4134 ± 92% latency_stats.sum.call_rwsem_down_write_failed.vm_munmap.SyS_munmap.entry_SYSCALL_64_fastpath
88590234 ± 77% -81.9% 16038634 ±141% latency_stats.sum.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
1388432 ± 14% +373.5% 6574540 ± 3% latency_stats.sum.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
3555 ± 61% +411.7% 18195 ± 52% latency_stats.sum.pipe_wait.pipe_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
24976 ± 81% -52.4% 11890 ± 24% latency_stats.sum.rpc_wait_bit_killable.__rpc_execute.rpc_execute.rpc_run_task.nfs4_call_sync_sequence.[nfsv4]._nfs4_proc_access.[nfsv4].nfs4_proc_access.[nfsv4].nfs_do_access.nfs_permission.__inode_permission.inode_permission.link_path_walk
2.198e+10 ± 11% -90.3% 2.122e+09 ± 7% latency_stats.sum.sock_alloc_send_pskb.unix_stream_sendmsg.sock_sendmsg.sock_write_iter.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
34821344 ±173% -100.0% 5144 ± 4% latency_stats.sum.wait_on_page_bit.filemap_fdatawait_range.filemap_write_and_wait_range.nfs4_file_fsync.[nfsv4].vfs_fsync_range.vfs_fsync.nfs4_file_flush.[nfsv4].filp_close.do_dup2.SyS_dup2.entry_SYSCALL_64_fastpath
5.50 ± 5% -41.2% 3.23 ± 15% perf-profile.cycles-pp.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb.unix_stream_sendmsg.sock_sendmsg
1.19 ± 14% -95.0% 0.06 ± 45% perf-profile.cycles-pp.__kmalloc_node_track_caller.__kmalloc_reserve.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb
1.44 ± 13% -92.0% 0.11 ± 39% perf-profile.cycles-pp.__kmalloc_reserve.isra.31.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb.unix_stream_sendmsg
0.68 ± 32% +442.6% 3.66 ± 37% perf-profile.cycles-pp.__schedule.schedule.schedule_timeout.unix_stream_read_generic.unix_stream_recvmsg
41.20 ± 0% -12.1% 36.21 ± 4% perf-profile.cycles-pp.__vfs_read.vfs_read.sys_read.entry_SYSCALL_64_fastpath
32.61 ± 0% +30.6% 42.61 ± 6% perf-profile.cycles-pp.__vfs_write.vfs_write.sys_write.entry_SYSCALL_64_fastpath
2.69 ± 13% +323.5% 11.39 ± 36% perf-profile.cycles-pp.__wake_up_common.__wake_up_sync_key.sock_def_readable.unix_stream_sendmsg.sock_sendmsg
2.94 ± 14% +410.9% 15.01 ± 32% perf-profile.cycles-pp.__wake_up_sync_key.sock_def_readable.unix_stream_sendmsg.sock_sendmsg.sock_write_iter
0.29 ± 50% +469.8% 1.65 ± 40% perf-profile.cycles-pp._raw_spin_lock_irq.__schedule.schedule.schedule_timeout.unix_stream_read_generic
0.15 ± 39% +677.4% 1.20 ± 31% perf-profile.cycles-pp._raw_spin_lock_irqsave.__wake_up_sync_key.sock_def_readable.unix_stream_sendmsg.sock_sendmsg
2.26 ± 13% +230.6% 7.47 ± 38% perf-profile.cycles-pp.activate_task.ttwu_do_activate.try_to_wake_up.default_wake_function.autoremove_wake_function
6.58 ± 4% -31.7% 4.50 ± 13% perf-profile.cycles-pp.alloc_skb_with_frags.sock_alloc_send_pskb.unix_stream_sendmsg.sock_sendmsg.sock_write_iter
2.66 ± 13% +323.3% 11.25 ± 36% perf-profile.cycles-pp.autoremove_wake_function.__wake_up_common.__wake_up_sync_key.sock_def_readable.unix_stream_sendmsg
10.73 ± 1% -31.8% 7.32 ± 25% perf-profile.cycles-pp.consume_skb.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter
3.23 ± 6% -80.3% 0.64 ± 45% perf-profile.cycles-pp.copy_user_generic_string.skb_copy_datagram_iter.unix_stream_read_actor.unix_stream_read_generic.unix_stream_recvmsg
2.65 ± 13% +322.9% 11.20 ± 36% perf-profile.cycles-pp.default_wake_function.autoremove_wake_function.__wake_up_common.__wake_up_sync_key.sock_def_readable
2.06 ± 13% +228.2% 6.75 ± 40% perf-profile.cycles-pp.enqueue_entity.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up
2.20 ± 13% +230.1% 7.25 ± 39% perf-profile.cycles-pp.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up.default_wake_function
1.54 ± 6% -93.8% 0.10 ± 50% perf-profile.cycles-pp.kfree.skb_free_head.skb_release_data.skb_release_all.consume_skb
2.00 ± 8% -89.5% 0.21 ± 21% perf-profile.cycles-pp.kfree_skbmem.consume_skb.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg
1.18 ± 13% -94.3% 0.07 ± 80% perf-profile.cycles-pp.kmem_cache_alloc_node.__alloc_skb.alloc_skb_with_frags.sock_alloc_send_pskb.unix_stream_sendmsg
1.92 ± 7% -90.5% 0.18 ± 15% perf-profile.cycles-pp.kmem_cache_free.kfree_skbmem.consume_skb.unix_stream_read_generic.unix_stream_recvmsg
0.28 ± 54% +476.1% 1.63 ± 40% perf-profile.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irq.__schedule.schedule.schedule_timeout
0.18 ± 40% +772.9% 1.53 ± 35% perf-profile.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irqsave.__wake_up_sync_key.sock_def_readable.unix_stream_sendmsg
1.98 ± 10% -81.1% 0.38 ± 47% perf-profile.cycles-pp.rw_verify_area.vfs_read.sys_read.entry_SYSCALL_64_fastpath
1.56 ± 7% -84.8% 0.24 ± 28% perf-profile.cycles-pp.rw_verify_area.vfs_write.sys_write.entry_SYSCALL_64_fastpath
0.70 ± 33% +427.7% 3.67 ± 36% perf-profile.cycles-pp.schedule.schedule_timeout.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg
0.71 ± 31% +431.1% 3.76 ± 37% perf-profile.cycles-pp.schedule_timeout.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter
1.67 ± 9% -86.5% 0.23 ± 38% perf-profile.cycles-pp.security_file_permission.rw_verify_area.vfs_read.sys_read.entry_SYSCALL_64_fastpath
1.31 ± 9% -89.3% 0.14 ± 15% perf-profile.cycles-pp.security_file_permission.rw_verify_area.vfs_write.sys_write.entry_SYSCALL_64_fastpath
0.98 ± 7% -95.4% 0.04 ± 59% perf-profile.cycles-pp.selinux_file_permission.security_file_permission.rw_verify_area.vfs_read.sys_read
0.97 ± 10% -93.3% 0.07 ± 31% perf-profile.cycles-pp.selinux_file_permission.security_file_permission.rw_verify_area.vfs_write.sys_write
1.94 ± 12% -85.4% 0.28 ± 42% perf-profile.cycles-pp.skb_copy_datagram_from_iter.unix_stream_sendmsg.sock_sendmsg.sock_write_iter.__vfs_write
7.00 ± 2% -53.8% 3.23 ± 35% perf-profile.cycles-pp.skb_copy_datagram_iter.unix_stream_read_actor.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg
1.62 ± 6% -93.5% 0.11 ± 48% perf-profile.cycles-pp.skb_free_head.skb_release_data.skb_release_all.consume_skb.unix_stream_read_generic
6.70 ± 2% -48.3% 3.46 ± 32% perf-profile.cycles-pp.skb_release_all.consume_skb.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg
3.81 ± 4% -75.0% 0.95 ± 44% perf-profile.cycles-pp.skb_release_data.skb_release_all.consume_skb.unix_stream_read_generic.unix_stream_recvmsg
1.28 ± 7% -86.2% 0.18 ± 42% perf-profile.cycles-pp.skb_release_head_state.skb_release_all.consume_skb.unix_stream_read_generic.unix_stream_recvmsg
10.07 ± 1% -20.8% 7.97 ± 15% perf-profile.cycles-pp.sock_alloc_send_pskb.unix_stream_sendmsg.sock_sendmsg.sock_write_iter.__vfs_write
3.42 ± 13% +365.6% 15.91 ± 30% perf-profile.cycles-pp.sock_def_readable.unix_stream_sendmsg.sock_sendmsg.sock_write_iter.__vfs_write
40.53 ± 0% -12.0% 35.68 ± 4% perf-profile.cycles-pp.sock_read_iter.__vfs_read.vfs_read.sys_read.entry_SYSCALL_64_fastpath
39.39 ± 0% -11.6% 34.83 ± 4% perf-profile.cycles-pp.sock_recvmsg.sock_read_iter.__vfs_read.vfs_read.sys_read
30.61 ± 1% +34.0% 41.01 ± 6% perf-profile.cycles-pp.sock_sendmsg.sock_write_iter.__vfs_write.vfs_write.sys_write
31.75 ± 0% +32.1% 41.93 ± 6% perf-profile.cycles-pp.sock_write_iter.__vfs_write.vfs_write.sys_write.entry_SYSCALL_64_fastpath
49.79 ± 0% -13.1% 43.25 ± 5% perf-profile.cycles-pp.sys_read.entry_SYSCALL_64_fastpath
40.87 ± 1% +22.6% 50.12 ± 4% perf-profile.cycles-pp.sys_write.entry_SYSCALL_64_fastpath
2.96 ± 11% +278.5% 11.20 ± 36% perf-profile.cycles-pp.try_to_wake_up.default_wake_function.autoremove_wake_function.__wake_up_common.__wake_up_sync_key
2.33 ± 12% +237.0% 7.87 ± 38% perf-profile.cycles-pp.ttwu_do_activate.constprop.85.try_to_wake_up.default_wake_function.autoremove_wake_function.__wake_up_common
1.06 ± 7% -86.1% 0.15 ± 41% perf-profile.cycles-pp.unix_destruct_scm.skb_release_head_state.skb_release_all.consume_skb.unix_stream_read_generic
7.13 ± 2% -52.6% 3.38 ± 35% perf-profile.cycles-pp.unix_stream_read_actor.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter
36.07 ± 0% -11.1% 32.06 ± 3% perf-profile.cycles-pp.unix_stream_read_generic.unix_stream_recvmsg.sock_recvmsg.sock_read_iter.__vfs_read
37.73 ± 0% -11.6% 33.35 ± 3% perf-profile.cycles-pp.unix_stream_recvmsg.sock_recvmsg.sock_read_iter.__vfs_read.vfs_read
27.66 ± 1% +40.0% 38.71 ± 7% perf-profile.cycles-pp.unix_stream_sendmsg.sock_sendmsg.sock_write_iter.__vfs_write.vfs_write
47.73 ± 0% -12.9% 41.56 ± 5% perf-profile.cycles-pp.vfs_read.sys_read.entry_SYSCALL_64_fastpath
38.65 ± 0% +24.2% 47.99 ± 4% perf-profile.cycles-pp.vfs_write.sys_write.entry_SYSCALL_64_fastpath
2763960 ± 7% +23.2% 3405876 ± 11% sched_debug.cfs_rq[0]:/.min_vruntime
1062 ± 13% +17.8% 1251 ± 2% sched_debug.cfs_rq[0]:/.tg_load_avg
1060 ± 13% +18.5% 1257 ± 2% sched_debug.cfs_rq[1]:/.tg_load_avg
17.25 ± 7% -27.5% 12.50 ± 29% sched_debug.cfs_rq[2]:/.nr_spread_over
2786477 ± 8% +25.8% 3504396 ± 5% sched_debug.cfs_rq[3]:/.min_vruntime
6.25 ± 28% +68.0% 10.50 ± 10% sched_debug.cfs_rq[3]:/.nr_spread_over
2910076 ± 8% +22.7% 3571629 ± 11% sched_debug.cfs_rq[4]:/.min_vruntime
2851625 ± 7% +32.2% 3769293 ± 4% sched_debug.cfs_rq[7]:/.min_vruntime
7.50 ± 29% +90.0% 14.25 ± 14% sched_debug.cfs_rq[7]:/.nr_spread_over
685452 ± 11% -35.2% 444246 ± 23% sched_debug.cpu#0.avg_idle
31.25 ± 13% +233.6% 104.25 ± 5% sched_debug.cpu#0.nr_running
4690362 ± 53% +265.6% 17147827 ± 10% sched_debug.cpu#0.nr_switches
4738489 ± 53% +262.9% 17195923 ± 10% sched_debug.cpu#0.sched_count
3528422 ± 36% +242.2% 12073723 ± 3% sched_debug.cpu#0.ttwu_count
711053 ± 4% -20.0% 568520 ± 19% sched_debug.cpu#1.avg_idle
32.00 ± 14% +207.0% 98.25 ± 7% sched_debug.cpu#1.nr_running
4515626 ± 49% +231.3% 14958200 ± 8% sched_debug.cpu#1.nr_switches
4515650 ± 49% +231.3% 14958228 ± 8% sched_debug.cpu#1.sched_count
33162 ± 18% -44.5% 18419 ± 6% sched_debug.cpu#1.sched_goidle
3546392 ± 30% +250.5% 12428531 ± 8% sched_debug.cpu#1.ttwu_count
712923 ± 6% -15.0% 606333 ± 5% sched_debug.cpu#2.avg_idle
29.25 ± 27% +263.2% 106.25 ± 6% sched_debug.cpu#2.nr_running
3175885 ± 4% +398.4% 15827946 ± 13% sched_debug.cpu#2.nr_switches
3175904 ± 4% +398.4% 15827972 ± 13% sched_debug.cpu#2.sched_count
2831946 ± 5% +337.9% 12400824 ± 5% sched_debug.cpu#2.ttwu_count
671235 ± 9% -19.8% 538230 ± 8% sched_debug.cpu#3.avg_idle
101.25 ± 15% +26.2% 127.75 ± 18% sched_debug.cpu#3.cpu_load[1]
17309 ± 3% -35.2% 11221 ± 38% sched_debug.cpu#3.curr->pid
31.50 ± 15% +224.6% 102.25 ± 9% sched_debug.cpu#3.nr_running
5870774 ± 48% +178.9% 16374035 ± 2% sched_debug.cpu#3.nr_switches
5870796 ± 48% +178.9% 16374064 ± 2% sched_debug.cpu#3.sched_count
37561 ± 44% -61.6% 14414 ± 15% sched_debug.cpu#3.sched_goidle
4219291 ± 35% +172.5% 11498279 ± 5% sched_debug.cpu#3.ttwu_count
3848777 ± 38% -44.6% 2132838 ± 3% sched_debug.cpu#3.ttwu_local
762115 ± 8% -29.0% 540876 ± 18% sched_debug.cpu#4.avg_idle
28.00 ± 17% +198.2% 83.50 ± 16% sched_debug.cpu#4.nr_running
3378378 ± 7% +357.6% 15459385 ± 5% sched_debug.cpu#4.nr_switches
3378398 ± 7% +357.6% 15459415 ± 5% sched_debug.cpu#4.sched_count
2926567 ± 7% +301.9% 11763330 ± 4% sched_debug.cpu#4.ttwu_count
2577652 ± 5% -11.5% 2282453 ± 3% sched_debug.cpu#4.ttwu_local
24.00 ± 7% +262.5% 87.00 ± 16% sched_debug.cpu#5.nr_running
3790760 ± 19% +289.2% 14753042 ± 3% sched_debug.cpu#5.nr_switches
-13.75 ±-71% -118.2% 2.50 ± 91% sched_debug.cpu#5.nr_uninterruptible
3790783 ± 19% +289.2% 14753074 ± 3% sched_debug.cpu#5.sched_count
3190949 ± 11% +303.2% 12866258 ± 10% sched_debug.cpu#5.ttwu_count
767337 ± 5% -17.9% 630139 ± 11% sched_debug.cpu#6.avg_idle
25.25 ± 20% +221.8% 81.25 ± 11% sched_debug.cpu#6.nr_running
3377066 ± 3% +346.5% 15077076 ± 11% sched_debug.cpu#6.nr_switches
3377088 ± 3% +346.5% 15077103 ± 11% sched_debug.cpu#6.sched_count
2920233 ± 4% +321.4% 12306595 ± 2% sched_debug.cpu#6.ttwu_count
2595947 ± 3% -11.6% 2295380 ± 8% sched_debug.cpu#6.ttwu_local
18.25 ± 17% +311.0% 75.00 ± 13% sched_debug.cpu#7.nr_running
4060347 ± 21% +288.6% 15779161 ± 5% sched_debug.cpu#7.nr_switches
4060369 ± 21% +288.6% 15779196 ± 5% sched_debug.cpu#7.sched_count
58321 ± 52% -73.7% 15320 ± 19% sched_debug.cpu#7.sched_goidle
3284695 ± 16% +261.8% 11884348 ± 4% sched_debug.cpu#7.ttwu_count
2891042 ± 14% -25.5% 2154619 ± 2% sched_debug.cpu#7.ttwu_local

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test/cluster:
ivb42/netpipe/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/tcp/cs-localhost

commit:
1fda6948fbb2e756c8f7cabef40395cee2984298
9b7aaf11b8d61eeb87f3b99fb5ae59e61bb35f27

1fda6948fbb2e756 9b7aaf11b8d61eeb87f3b99fb5
---------------- --------------------------
%stddev %change %stddev
\ | \
51004 ± 0% -48.7% 26147 ± 1% netpipe.bigger_5M_Mbps.avg
8.54 ± 1% +35.6% 11.58 ± 1% netpipe.less_8K_usec.avg
90.94 ± 0% +86.4% 169.55 ± 0% netpipe.time.elapsed_time
90.94 ± 0% +86.4% 169.55 ± 0% netpipe.time.elapsed_time.max
82.25 ± 0% -7.0% 76.50 ± 0% netpipe.time.percent_of_cpu_this_job_got
75.08 ± 0% +73.6% 130.31 ± 0% netpipe.time.system_time
884329 ± 3% +58.7% 1403395 ± 2% netpipe.time.voluntary_context_switches
111670 ± 7% +31.3% 146580 ± 3% meminfo.Committed_AS
2262 ± 13% -44.7% 1251 ± 53% numa-meminfo.node0.PageTables
274787 ± 3% -16.2% 230168 ± 1% softirqs.TIMER
118.63 ± 3% +66.4% 197.37 ± 0% uptime.boot
5483 ± 3% +66.8% 9147 ± 0% uptime.idle
38673 ± 1% -13.2% 33557 ± 1% vmstat.system.cs
2245 ± 3% +11.6% 2506 ± 3% vmstat.system.in
22681 ± 56% -57.6% 9618 ± 10% proc-vmstat.numa_hint_faults
5251 ± 50% -100.0% 0.00 ± -1% proc-vmstat.numa_pages_migrated
43542 ± 59% -62.3% 16421 ± 7% proc-vmstat.numa_pte_updates
239188 ± 5% +62.6% 389005 ± 0% proc-vmstat.pgfault
5251 ± 50% -100.0% 0.00 ± -1% proc-vmstat.pgmigrate_success
90.94 ± 0% +86.4% 169.55 ± 0% time.elapsed_time
90.94 ± 0% +86.4% 169.55 ± 0% time.elapsed_time.max
75.08 ± 0% +73.6% 130.31 ± 0% time.system_time
0.23 ± 2% +16.0% 0.27 ± 3% time.user_time
884329 ± 3% +58.7% 1403395 ± 2% time.voluntary_context_switches
72133896 ± 10% +55.5% 1.121e+08 ± 7% cpuidle.C1-IVT.time
1742889 ± 1% +19.1% 2076365 ± 2% cpuidle.C1-IVT.usage
10968 ± 14% +570.6% 73551 ± 23% cpuidle.C1E-IVT.usage
1750 ± 58% +130.8% 4039 ± 15% cpuidle.C3-IVT.usage
4.173e+09 ± 0% +86.6% 7.785e+09 ± 0% cpuidle.C6-IVT.time
125.00 ± 50% +195.0% 368.75 ± 67% cpuidle.POLL.usage
578.25 ± 5% -15.1% 490.75 ± 3% numa-vmstat.node0.nr_alloc_batch
109.50 ± 30% -44.7% 60.50 ±100% numa-vmstat.node0.nr_dirtied
564.00 ± 13% -44.6% 312.25 ± 53% numa-vmstat.node0.nr_page_table_pages
50126 ± 52% -98.2% 913.50 ± 40% numa-vmstat.node0.numa_other
36.00 ±100% +186.1% 103.00 ± 49% numa-vmstat.node1.nr_dirtied
34.00 ±100% +178.7% 94.75 ± 50% numa-vmstat.node1.nr_written
29929 ± 87% +163.3% 78814 ± 0% numa-vmstat.node1.numa_other
3257 ± 2% -13.0% 2834 ± 2% slabinfo.kmalloc-2048.active_objs
3328 ± 2% -11.9% 2932 ± 2% slabinfo.kmalloc-2048.num_objs
14729 ± 4% -14.4% 12606 ± 7% slabinfo.kmalloc-256.active_objs
446.25 ± 4% -20.0% 357.00 ± 0% slabinfo.kmem_cache.active_objs
446.25 ± 4% -20.0% 357.00 ± 0% slabinfo.kmem_cache.num_objs
2058 ± 1% +11.2% 2288 ± 2% slabinfo.trace_event_file.active_objs
2058 ± 1% +11.2% 2288 ± 2% slabinfo.trace_event_file.num_objs
3.69 ± 2% -5.6% 3.48 ± 2% turbostat.%Busy
7.69 ± 3% -13.4% 6.66 ± 3% turbostat.CPU%c1
0.04 ± 61% -66.7% 0.01 ± 35% turbostat.CPU%c3
59.64 ± 1% +26.7% 75.56 ± 0% turbostat.CorWatt
62.50 ± 3% +16.0% 72.50 ± 3% turbostat.CoreTmp
31.21 ± 5% -99.2% 0.26 ± 7% turbostat.Pkg%pc2
61.75 ± 5% +17.0% 72.25 ± 4% turbostat.PkgTmp
88.34 ± 1% +20.1% 106.10 ± 0% turbostat.PkgWatt
3.94 ± 2% +90.2% 7.50 ± 5% turbostat.RAMWatt
54.50 ± 58% -81.2% 10.25 ±151% sched_debug.cfs_rq[10]:/.load_avg
54.50 ± 58% -81.2% 10.25 ±151% sched_debug.cfs_rq[10]:/.tg_load_avg_contrib
77.00 ± 42% -64.6% 27.25 ±147% sched_debug.cfs_rq[10]:/.util_avg
10968 ± 28% -91.9% 883.05 ± 34% sched_debug.cfs_rq[12]:/.exec_clock
24653 ± 17% -77.2% 5621 ± 38% sched_debug.cfs_rq[12]:/.min_vruntime
1.25 ± 66% +180.0% 3.50 ± 24% sched_debug.cfs_rq[12]:/.nr_spread_over
4137 ±142% -626.5% -21782 ±-97% sched_debug.cfs_rq[12]:/.spread0
6210 ± 90% +272.4% 23126 ± 88% sched_debug.cfs_rq[16]:/.min_vruntime
3.50 ±111% +1585.7% 59.00 ± 42% sched_debug.cfs_rq[17]:/.load_avg
3.50 ±111% +1585.7% 59.00 ± 42% sched_debug.cfs_rq[17]:/.tg_load_avg_contrib
4.50 ±147% +3766.7% 174.00 ± 63% sched_debug.cfs_rq[17]:/.util_avg
8373 ± 25% -65.7% 2868 ±108% sched_debug.cfs_rq[1]:/.exec_clock
90.75 ± 61% -90.4% 8.75 ±146% sched_debug.cfs_rq[1]:/.load_avg
91.25 ± 60% -90.4% 8.75 ±146% sched_debug.cfs_rq[1]:/.tg_load_avg_contrib
315.25 ± 83% -96.7% 10.50 ±136% sched_debug.cfs_rq[1]:/.util_avg
343.38 ± 37% +1061.0% 3986 ±111% sched_debug.cfs_rq[21]:/.exec_clock
5.75 ±120% +660.9% 43.75 ± 70% sched_debug.cfs_rq[21]:/.load_avg
4069 ± 71% +176.8% 11262 ± 74% sched_debug.cfs_rq[21]:/.min_vruntime
5.75 ±120% +660.9% 43.75 ± 70% sched_debug.cfs_rq[21]:/.tg_load_avg_contrib
9.00 ±147% +1105.6% 108.50 ± 93% sched_debug.cfs_rq[21]:/.util_avg
419.82 ± 41% +3356.1% 14509 ±122% sched_debug.cfs_rq[22]:/.exec_clock
-16629 ±-23% -122.1% 3673 ±361% sched_debug.cfs_rq[22]:/.spread0
230.54 ± 49% +131.2% 533.06 ± 30% sched_debug.cfs_rq[24]:/.exec_clock
3.75 ± 54% -46.7% 2.00 ± 50% sched_debug.cfs_rq[26]:/.nr_spread_over
39.75 ± 71% -73.6% 10.50 ±136% sched_debug.cfs_rq[28]:/.util_avg
45.25 ± 58% -91.7% 3.75 ±128% sched_debug.cfs_rq[2]:/.load_avg
6.50 ± 35% -53.8% 3.00 ±100% sched_debug.cfs_rq[2]:/.nr_spread_over
45.25 ± 58% -91.7% 3.75 ±128% sched_debug.cfs_rq[2]:/.tg_load_avg_contrib
83.00 ± 24% -84.0% 13.25 ±139% sched_debug.cfs_rq[2]:/.util_avg
162.98 ± 64% +114.1% 348.87 ± 36% sched_debug.cfs_rq[30]:/.exec_clock
40.25 ± 23% -77.6% 9.00 ± 62% sched_debug.cfs_rq[35]:/.load_avg
40.25 ± 23% -77.6% 9.00 ± 62% sched_debug.cfs_rq[35]:/.tg_load_avg_contrib
100.25 ± 20% -88.4% 11.67 ± 77% sched_debug.cfs_rq[35]:/.util_avg
1.00 ± 0% +1800.0% 19.00 ± 54% sched_debug.cfs_rq[37]:/.load_avg
1.00 ± 0% +1800.0% 19.00 ± 54% sched_debug.cfs_rq[37]:/.tg_load_avg_contrib
2.75 ± 47% -63.6% 1.00 ±100% sched_debug.cfs_rq[3]:/.nr_spread_over
3.00 ±137% +783.3% 26.50 ± 75% sched_debug.cfs_rq[41]:/.load_avg
3.00 ±137% +783.3% 26.50 ± 75% sched_debug.cfs_rq[41]:/.tg_load_avg_contrib
1.33 ± 93% +2468.8% 34.25 ± 59% sched_debug.cfs_rq[43]:/.util_avg
290.09 ± 47% +549.3% 1883 ± 7% sched_debug.cfs_rq[47]:/.exec_clock
38.75 ± 50% -78.1% 8.50 ±112% sched_debug.cfs_rq[4]:/.load_avg
6247 ± 45% +86.4% 11644 ± 33% sched_debug.cfs_rq[4]:/.min_vruntime
39.00 ± 49% -78.2% 8.50 ±112% sched_debug.cfs_rq[4]:/.tg_load_avg_contrib
99.75 ± 53% -86.5% 13.50 ±135% sched_debug.cfs_rq[4]:/.util_avg
42.25 ± 56% -86.4% 5.75 ±143% sched_debug.cfs_rq[7]:/.load_avg
42.25 ± 56% -86.4% 5.75 ±143% sched_debug.cfs_rq[7]:/.tg_load_avg_contrib
106.75 ± 65% -65.3% 37.00 ±159% sched_debug.cfs_rq[7]:/.util_avg
31.00 ± 52% -68.5% 9.75 ±126% sched_debug.cfs_rq[9]:/.load_avg
31.00 ± 52% -68.5% 9.75 ±126% sched_debug.cfs_rq[9]:/.tg_load_avg_contrib
73.50 ± 53% -73.8% 19.25 ±134% sched_debug.cfs_rq[9]:/.util_avg
57811 ± 7% +56.3% 90337 ± 0% sched_debug.cpu#0.clock
57811 ± 7% +56.3% 90337 ± 0% sched_debug.cpu#0.clock_task
-7.25 ±-46% -65.5% -2.50 ±-128% sched_debug.cpu#0.nr_uninterruptible
57811 ± 7% +56.3% 90335 ± 0% sched_debug.cpu#1.clock
57811 ± 7% +56.3% 90335 ± 0% sched_debug.cpu#1.clock_task
58.00 ± 94% -100.0% 0.00 ± -1% sched_debug.cpu#1.cpu_load[0]
57.25 ± 97% -100.0% 0.00 ± -1% sched_debug.cpu#1.cpu_load[1]
19125 ± 11% -60.7% 7510 ± 54% sched_debug.cpu#1.nr_load_updates
3064 ± 29% -66.0% 1043 ± 5% sched_debug.cpu#1.ttwu_local
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#10.clock
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#10.clock_task
12292 ± 12% -64.4% 4377 ± 12% sched_debug.cpu#10.nr_load_updates
124.50 ± 23% +144.2% 304.00 ± 24% sched_debug.cpu#10.ttwu_local
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#11.clock
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#11.clock_task
11997 ± 4% -65.5% 4134 ± 8% sched_debug.cpu#11.nr_load_updates
35749 ± 75% -81.2% 6733 ±117% sched_debug.cpu#11.ttwu_count
57812 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#12.clock
57812 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#12.clock_task
20997 ± 14% -81.5% 3875 ± 9% sched_debug.cpu#12.nr_load_updates
200703 ± 29% -87.3% 25459 ± 47% sched_debug.cpu#12.nr_switches
211710 ± 29% -87.8% 25783 ± 47% sched_debug.cpu#12.sched_count
99983 ± 29% -87.3% 12690 ± 47% sched_debug.cpu#12.sched_goidle
101421 ± 27% -89.3% 10884 ± 62% sched_debug.cpu#12.ttwu_count
5337 ± 38% -95.5% 242.75 ± 39% sched_debug.cpu#12.ttwu_local
57812 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#13.clock
57812 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#13.clock_task
4688 ± 26% -96.0% 186.75 ± 51% sched_debug.cpu#13.ttwu_local
57812 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#14.clock
57812 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#14.clock_task
14405 ± 26% -51.9% 6925 ± 65% sched_debug.cpu#14.nr_load_updates
3184 ± 86% -92.3% 246.50 ± 40% sched_debug.cpu#14.ttwu_local
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#15.clock
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#15.clock_task
11551 ± 3% -58.4% 4804 ± 50% sched_debug.cpu#15.nr_load_updates
57812 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#16.clock
57812 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#16.clock_task
17762 ±139% +1247.7% 239393 ± 95% sched_debug.cpu#16.nr_switches
19045 ±128% +1198.7% 247333 ± 94% sched_debug.cpu#16.sched_count
8845 ±140% +1252.6% 119648 ± 95% sched_debug.cpu#16.sched_goidle
11908 ±117% +1012.6% 132494 ±108% sched_debug.cpu#16.ttwu_count
1000000 ± 0% -19.9% 800514 ± 15% sched_debug.cpu#17.avg_idle
57812 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#17.clock
57812 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#17.clock_task
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#18.clock
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#18.clock_task
11453 ± 7% -61.4% 4419 ± 41% sched_debug.cpu#18.nr_load_updates
57812 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#19.clock
57812 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#19.clock_task
57810 ± 7% +56.2% 90311 ± 0% sched_debug.cpu#2.clock
57810 ± 7% +56.2% 90311 ± 0% sched_debug.cpu#2.clock_task
92432 ± 83% -60.1% 36916 ±127% sched_debug.cpu#2.nr_switches
95373 ± 81% -60.1% 38038 ±128% sched_debug.cpu#2.sched_count
45921 ± 84% -59.9% 18408 ±127% sched_debug.cpu#2.sched_goidle
46108 ± 82% -56.2% 20191 ±124% sched_debug.cpu#2.ttwu_count
57812 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#20.clock
57812 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#20.clock_task
11250 ± 1% -61.1% 4372 ± 51% sched_debug.cpu#20.nr_load_updates
57812 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#21.clock
57812 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#21.clock_task
1896 ± 53% +4458.6% 86443 ± 88% sched_debug.cpu#21.nr_switches
914.50 ± 54% +4622.6% 43188 ± 88% sched_debug.cpu#21.sched_goidle
801.75 ± 65% +5447.3% 44475 ± 89% sched_debug.cpu#21.ttwu_count
57813 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#22.clock
57813 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#22.clock_task
1646 ± 34% +22511.3% 372295 ±114% sched_debug.cpu#22.nr_switches
1757 ± 25% +21611.8% 381639 ±114% sched_debug.cpu#22.sched_count
789.00 ± 37% +23483.2% 186071 ±114% sched_debug.cpu#22.sched_goidle
607.00 ± 35% +31533.1% 192013 ±116% sched_debug.cpu#22.ttwu_count
57813 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#23.clock
57813 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#23.clock_task
11382 ± 2% -65.0% 3987 ± 23% sched_debug.cpu#23.nr_load_updates
-3.50 ±-71% -128.6% 1.00 ± 0% sched_debug.cpu#23.nr_uninterruptible
446.25 ± 40% -55.2% 200.00 ± 21% sched_debug.cpu#23.ttwu_local
57813 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#24.clock
57813 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#24.clock_task
10709 ± 1% -71.5% 3051 ± 8% sched_debug.cpu#24.nr_load_updates
609.50 ± 5% +99.9% 1218 ± 30% sched_debug.cpu#24.nr_switches
613.00 ± 4% +100.0% 1225 ± 30% sched_debug.cpu#24.sched_count
280.75 ± 5% +102.6% 568.75 ± 32% sched_debug.cpu#24.sched_goidle
192.75 ± 4% +140.6% 463.75 ± 33% sched_debug.cpu#24.ttwu_count
57813 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#25.clock
57813 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#25.clock_task
10759 ± 2% -72.5% 2962 ± 3% sched_debug.cpu#25.nr_load_updates
57813 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#26.clock
57813 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#26.clock_task
10769 ± 2% -70.4% 3183 ± 15% sched_debug.cpu#26.nr_load_updates
6.00 ± 81% -150.0% -3.00 ±-72% sched_debug.cpu#26.nr_uninterruptible
57814 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#27.clock
57814 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#27.clock_task
10762 ± 1% -70.2% 3205 ± 8% sched_debug.cpu#27.nr_load_updates
1.25 ±142% -200.0% -1.25 ±-118% sched_debug.cpu#27.nr_uninterruptible
57814 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#28.clock
57814 ± 7% +56.3% 90343 ± 0% sched_debug.cpu#28.clock_task
10744 ± 1% -72.6% 2948 ± 5% sched_debug.cpu#28.nr_load_updates
654.50 ± 22% +70.8% 1117 ± 20% sched_debug.cpu#28.nr_switches
657.75 ± 22% +70.9% 1124 ± 19% sched_debug.cpu#28.sched_count
297.25 ± 23% +76.6% 525.00 ± 20% sched_debug.cpu#28.sched_goidle
57814 ± 7% +56.3% 90344 ± 0% sched_debug.cpu#29.clock
57814 ± 7% +56.3% 90344 ± 0% sched_debug.cpu#29.clock_task
2.25 ±110% -144.4% -1.00 ±-141% sched_debug.cpu#29.nr_uninterruptible
57811 ± 7% +56.3% 90338 ± 0% sched_debug.cpu#3.clock
57811 ± 7% +56.3% 90338 ± 0% sched_debug.cpu#3.clock_task
57814 ± 7% +56.3% 90344 ± 0% sched_debug.cpu#30.clock
57814 ± 7% +56.3% 90344 ± 0% sched_debug.cpu#30.clock_task
10616 ± 1% -72.3% 2937 ± 4% sched_debug.cpu#30.nr_load_updates
716.00 ± 40% +1610.9% 12249 ±155% sched_debug.cpu#30.nr_switches
718.50 ± 40% +1606.1% 12258 ±155% sched_debug.cpu#30.sched_count
265.00 ± 15% +2190.5% 6069 ±157% sched_debug.cpu#30.sched_goidle
270.00 ± 27% +2120.5% 5995 ±131% sched_debug.cpu#30.ttwu_count
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#31.clock
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#31.clock_task
10682 ± 1% -70.2% 3180 ± 11% sched_debug.cpu#31.nr_load_updates
610.25 ± 15% +2592.1% 16428 ±162% sched_debug.cpu#31.nr_switches
613.25 ± 15% +2579.8% 16434 ±161% sched_debug.cpu#31.sched_count
278.75 ± 17% +2831.7% 8172 ±162% sched_debug.cpu#31.sched_goidle
248.75 ± 25% +3406.6% 8722 ±156% sched_debug.cpu#31.ttwu_count
97.00 ± 5% +85.8% 180.25 ± 14% sched_debug.cpu#31.ttwu_local
57814 ± 7% +56.3% 90344 ± 0% sched_debug.cpu#32.clock
57814 ± 7% +56.3% 90344 ± 0% sched_debug.cpu#32.clock_task
10718 ± 2% -73.1% 2887 ± 5% sched_debug.cpu#32.nr_load_updates
183.25 ± 11% +312.7% 756.25 ± 90% sched_debug.cpu#32.ttwu_count
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#33.clock
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#33.clock_task
10681 ± 3% -71.4% 3050 ± 10% sched_debug.cpu#33.nr_load_updates
79.25 ± 15% +406.0% 401.00 ±100% sched_debug.cpu#33.ttwu_local
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#34.clock
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#34.clock_task
9764 ± 18% -70.5% 2885 ± 4% sched_debug.cpu#34.nr_load_updates
95.00 ± 19% +62.9% 154.75 ± 41% sched_debug.cpu#34.ttwu_local
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#35.clock
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#35.clock_task
10680 ± 1% -73.6% 2819 ± 12% sched_debug.cpu#35.nr_load_updates
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#36.clock
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#36.clock_task
10605 ± 1% -74.3% 2723 ± 2% sched_debug.cpu#36.nr_load_updates
126.50 ± 13% +147.2% 312.75 ± 29% sched_debug.cpu#36.ttwu_count
59.50 ± 14% +146.2% 146.50 ± 34% sched_debug.cpu#36.ttwu_local
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#37.clock
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#37.clock_task
11015 ± 5% -75.8% 2670 ± 4% sched_debug.cpu#37.nr_load_updates
1.50 ±110% -188.9% -1.33 ±-35% sched_debug.cpu#37.nr_uninterruptible
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#38.clock
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#38.clock_task
10614 ± 1% -75.7% 2575 ± 4% sched_debug.cpu#38.nr_load_updates
96.25 ± 13% +65.7% 159.50 ± 10% sched_debug.cpu#38.ttwu_local
57814 ± 7% +56.3% 90346 ± 0% sched_debug.cpu#39.clock
57814 ± 7% +56.3% 90346 ± 0% sched_debug.cpu#39.clock_task
9969 ± 11% -72.9% 2700 ± 6% sched_debug.cpu#39.nr_load_updates
590.75 ± 8% +131.7% 1368 ± 47% sched_debug.cpu#39.nr_switches
0.00 ± 0% +Inf% 2.75 ± 64% sched_debug.cpu#39.nr_uninterruptible
594.50 ± 8% +131.4% 1375 ± 47% sched_debug.cpu#39.sched_count
270.25 ± 7% +138.6% 644.75 ± 52% sched_debug.cpu#39.sched_goidle
194.00 ± 28% +419.8% 1008 ±102% sched_debug.cpu#39.ttwu_count
85.00 ± 25% +95.6% 166.25 ± 18% sched_debug.cpu#39.ttwu_local
57811 ± 7% +56.3% 90339 ± 0% sched_debug.cpu#4.clock
57811 ± 7% +56.3% 90339 ± 0% sched_debug.cpu#4.clock_task
12128 ± 3% -43.6% 6841 ± 39% sched_debug.cpu#4.nr_load_updates
185.75 ± 30% +346.8% 830.00 ± 92% sched_debug.cpu#4.ttwu_local
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#40.clock
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#40.clock_task
10625 ± 2% -76.1% 2544 ± 6% sched_debug.cpu#40.nr_load_updates
-0.50 ±-300% -350.0% 1.25 ±118% sched_debug.cpu#40.nr_uninterruptible
269.75 ± 37% +66.0% 447.75 ± 13% sched_debug.cpu#40.sched_goidle
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#41.clock
57814 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#41.clock_task
10644 ± 1% -74.2% 2748 ± 11% sched_debug.cpu#41.nr_load_updates
57810 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#42.clock
57810 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#42.clock_task
9252 ± 25% -72.0% 2589 ± 4% sched_debug.cpu#42.nr_load_updates
57815 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#43.clock
57815 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#43.clock_task
10646 ± 3% -75.5% 2606 ± 6% sched_debug.cpu#43.nr_load_updates
57815 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#44.clock
57815 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#44.clock_task
10758 ± 2% -76.4% 2541 ± 5% sched_debug.cpu#44.nr_load_updates
57815 ± 7% +56.3% 90346 ± 0% sched_debug.cpu#45.clock
57815 ± 7% +56.3% 90346 ± 0% sched_debug.cpu#45.clock_task
10459 ± 1% -75.4% 2574 ± 3% sched_debug.cpu#45.nr_load_updates
145.50 ± 18% +174.6% 399.50 ± 37% sched_debug.cpu#45.ttwu_count
66.50 ± 11% +121.1% 147.00 ± 33% sched_debug.cpu#45.ttwu_local
57815 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#46.clock
57815 ± 7% +56.3% 90345 ± 0% sched_debug.cpu#46.clock_task
10475 ± 2% -75.0% 2616 ± 9% sched_debug.cpu#46.nr_load_updates
557.25 ± 50% +60.4% 893.75 ± 29% sched_debug.cpu#46.nr_switches
560.25 ± 50% +60.3% 898.00 ± 28% sched_debug.cpu#46.sched_count
256.50 ± 51% +55.1% 397.75 ± 28% sched_debug.cpu#46.sched_goidle
57815 ± 7% +56.3% 90346 ± 0% sched_debug.cpu#47.clock
57815 ± 7% +56.3% 90346 ± 0% sched_debug.cpu#47.clock_task
10449 ± 2% -76.9% 2416 ± 7% sched_debug.cpu#47.nr_load_updates
67.50 ± 25% +185.9% 193.00 ± 27% sched_debug.cpu#47.ttwu_local
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#5.clock
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#5.clock_task
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#6.clock
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#6.clock_task
731.25 ±122% +318.7% 3062 ± 88% sched_debug.cpu#6.ttwu_local
57811 ± 7% +56.3% 90339 ± 0% sched_debug.cpu#7.clock
57811 ± 7% +56.3% 90339 ± 0% sched_debug.cpu#7.clock_task
11629 ± 1% -64.3% 4148 ± 3% sched_debug.cpu#7.nr_load_updates
174.50 ± 25% +239.1% 591.75 ± 32% sched_debug.cpu#7.ttwu_local
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#8.clock
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#8.clock_task
11571 ± 1% -63.7% 4202 ± 8% sched_debug.cpu#8.nr_load_updates
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#9.clock
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu#9.clock_task
12295 ± 10% -63.0% 4548 ± 16% sched_debug.cpu#9.nr_load_updates
151.75 ± 11% +405.1% 766.50 ± 51% sched_debug.cpu#9.ttwu_local
57811 ± 7% +56.3% 90342 ± 0% sched_debug.cpu_clk
56784 ± 7% +53.0% 86905 ± 0% sched_debug.ktime
57811 ± 7% +56.3% 90342 ± 0% sched_debug.sched_clk

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/blocksize:
xps2/pigz/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/100%/128K

commit:
1fda6948fbb2e756c8f7cabef40395cee2984298
9b7aaf11b8d61eeb87f3b99fb5ae59e61bb35f27

1fda6948fbb2e756 9b7aaf11b8d61eeb87f3b99fb5
---------------- --------------------------
%stddev %change %stddev
\ | \
4815744 ± 5% -14.9% 4099045 ± 0% pigz.time.involuntary_context_switches
1502733 ± 0% -35.4% 970605 ± 0% pigz.time.voluntary_context_switches
3969 ± 16% +4889.7% 198080 ± 89% latency_stats.sum.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
35129 ± 5% -18.5% 28613 ± 0% vmstat.system.cs
154419 ± 11% -19.8% 123840 ± 4% cpuidle.C1-NHM.usage
176.50 ± 11% -28.5% 126.25 ± 13% cpuidle.C1E-NHM.usage
4815744 ± 5% -14.9% 4099045 ± 0% time.involuntary_context_switches
1502733 ± 0% -35.4% 970605 ± 0% time.voluntary_context_switches
6175 ± 2% +7.2% 6623 ± 2% slabinfo.kmalloc-32.active_objs
6175 ± 2% +7.2% 6623 ± 2% slabinfo.kmalloc-32.num_objs
1373 ± 9% -20.1% 1096 ± 5% slabinfo.kmalloc-512.num_objs
1204535 ± 4% -9.7% 1087512 ± 0% sched_debug.cfs_rq[0]:/.min_vruntime
958.00 ± 4% -6.7% 893.50 ± 3% sched_debug.cfs_rq[0]:/.util_avg
13243 ±476% +794.7% 118493 ± 5% sched_debug.cfs_rq[1]:/.spread0
-1668 ±-4317% -7180.2% 118167 ± 3% sched_debug.cfs_rq[2]:/.spread0
188.25 ± 26% -38.0% 116.75 ± 24% sched_debug.cfs_rq[3]:/.load
29536 ±168% +302.7% 118933 ± 4% sched_debug.cfs_rq[3]:/.spread0
96.75 ± 12% +12.1% 108.50 ± 8% sched_debug.cfs_rq[4]:/.load
29027 ±173% +321.2% 122268 ± 3% sched_debug.cfs_rq[4]:/.spread0
166.50 ± 6% +16.4% 193.75 ± 6% sched_debug.cfs_rq[5]:/.load_avg
22902 ±242% +414.7% 117874 ± 6% sched_debug.cfs_rq[5]:/.spread0
166.50 ± 6% +17.0% 194.75 ± 6% sched_debug.cfs_rq[5]:/.tg_load_avg_contrib
15851 ±373% +655.2% 119700 ± 4% sched_debug.cfs_rq[6]:/.spread0
161.25 ± 5% +23.6% 199.25 ± 12% sched_debug.cfs_rq[7]:/.load_avg
10.50 ± 15% -61.9% 4.00 ± 46% sched_debug.cfs_rq[7]:/.nr_spread_over
113.25 ± 4% +19.6% 135.50 ± 19% sched_debug.cfs_rq[7]:/.runnable_load_avg
31212 ±154% +279.9% 118576 ± 5% sched_debug.cfs_rq[7]:/.spread0
161.50 ± 5% +23.5% 199.50 ± 12% sched_debug.cfs_rq[7]:/.tg_load_avg_contrib
24884 ±115% +272.2% 92622 ± 16% sched_debug.cpu#0.sched_goidle
772814 ±146% +250.7% 2710566 ± 1% sched_debug.cpu#0.ttwu_count
947742 ±135% -83.5% 156048 ± 14% sched_debug.cpu#1.nr_switches
947833 ±135% -83.5% 156415 ± 14% sched_debug.cpu#1.sched_count
482248 ±133% -84.4% 75226 ± 11% sched_debug.cpu#1.ttwu_count
470550 ±136% -95.3% 22123 ± 17% sched_debug.cpu#1.ttwu_local
1804 ± 13% +18.2% 2133 ± 4% sched_debug.cpu#2.curr->pid
1700803 ± 88% -90.4% 162528 ± 19% sched_debug.cpu#2.nr_switches
1700897 ± 88% -90.4% 162850 ± 19% sched_debug.cpu#2.sched_count
859100 ± 87% -91.6% 72315 ± 10% sched_debug.cpu#2.ttwu_count
846365 ± 88% -97.7% 19706 ± 8% sched_debug.cpu#2.ttwu_local
188.25 ± 26% -38.0% 116.75 ± 24% sched_debug.cpu#3.load
208390 ± 5% -26.2% 153887 ± 19% sched_debug.cpu#3.nr_switches
208505 ± 5% -26.0% 154206 ± 19% sched_debug.cpu#3.sched_count
5087 ± 12% -60.0% 2035 ± 15% sched_debug.cpu#3.sched_goidle
110537 ± 2% -27.1% 80574 ± 15% sched_debug.cpu#3.ttwu_count
99352 ± 3% -80.5% 19364 ± 11% sched_debug.cpu#3.ttwu_local
96.75 ± 12% +12.4% 108.75 ± 9% sched_debug.cpu#4.load
106522 ± 20% -66.7% 35512 ± 77% sched_debug.cpu#4.ttwu_local
112.75 ± 3% +7.8% 121.50 ± 4% sched_debug.cpu#5.cpu_load[3]
112.25 ± 2% +8.2% 121.50 ± 4% sched_debug.cpu#5.cpu_load[4]
1055550 ±140% -84.8% 160666 ± 15% sched_debug.cpu#5.nr_switches
-9.00 ±-56% -180.6% 7.25 ± 57% sched_debug.cpu#5.nr_uninterruptible
1055658 ±140% -84.8% 160966 ± 15% sched_debug.cpu#5.sched_count
538573 ±138% -85.6% 77426 ± 20% sched_debug.cpu#5.ttwu_count
527280 ±141% -96.2% 19993 ± 22% sched_debug.cpu#5.ttwu_local
480670 ± 12% +25.7% 604142 ± 9% sched_debug.cpu#6.avg_idle
1881968 ± 89% -91.4% 162480 ± 15% sched_debug.cpu#6.nr_switches
1882052 ± 89% -91.3% 162951 ± 15% sched_debug.cpu#6.sched_count
37540 ± 97% -96.8% 1208 ± 24% sched_debug.cpu#6.sched_goidle
952428 ± 89% -92.2% 73822 ± 19% sched_debug.cpu#6.ttwu_count
939038 ± 90% -97.8% 20620 ± 19% sched_debug.cpu#6.ttwu_local
112.75 ± 5% +30.6% 147.25 ± 18% sched_debug.cpu#7.cpu_load[0]
112.50 ± 4% +25.8% 141.50 ± 17% sched_debug.cpu#7.cpu_load[1]
112.50 ± 2% +23.6% 139.00 ± 17% sched_debug.cpu#7.cpu_load[2]
112.25 ± 1% +22.5% 137.50 ± 16% sched_debug.cpu#7.cpu_load[3]
113.00 ± 1% +20.8% 136.50 ± 16% sched_debug.cpu#7.cpu_load[4]
203104 ± 3% -20.5% 161466 ± 9% sched_debug.cpu#7.nr_switches
203195 ± 3% -20.4% 161814 ± 9% sched_debug.cpu#7.sched_count
109093 ± 4% -26.6% 80094 ± 8% sched_debug.cpu#7.ttwu_count
99150 ± 3% -81.2% 18616 ± 15% sched_debug.cpu#7.ttwu_local
2.26 ±109% +768.5% 19.60 ± 79% sched_debug.rt_rq[0]:/.rt_time
11.04 ± 4% -44.3% 6.14 ± 3% perf-profile.cycles-pp.__account_scheduler_latency.enqueue_entity.enqueue_task_fair.activate_task.ttwu_do_activate
2.45 ± 11% +29.8% 3.18 ± 9% perf-profile.cycles-pp.__alloc_pages_nodemask.alloc_pages_current.pipe_write.__vfs_write.vfs_write
5.74 ± 7% -50.2% 2.86 ± 4% perf-profile.cycles-pp.__kernel_text_address.print_context_stack.dump_trace.save_stack_trace_tsk.__account_scheduler_latency
1.21 ± 6% -71.3% 0.35 ± 24% perf-profile.cycles-pp.__module_text_address.is_module_text_address.__kernel_text_address.print_context_stack.dump_trace
0.71 ± 26% -39.2% 0.43 ± 8% perf-profile.cycles-pp.__schedule.schedule.pipe_wait.pipe_write.__vfs_write
0.21 ± 70% +585.5% 1.42 ± 23% perf-profile.cycles-pp.__schedule.schedule.prepare_exit_to_usermode.retint_user
1.35 ± 16% -82.9% 0.23 ± 98% perf-profile.cycles-pp.__schedule.schedule.prepare_exit_to_usermode.syscall_return_slowpath.int_ret_from_sys_call
18.32 ± 3% +15.0% 21.06 ± 4% perf-profile.cycles-pp.__vfs_write.vfs_write.sys_write.entry_SYSCALL_64_fastpath
11.62 ± 6% -48.0% 6.05 ± 5% perf-profile.cycles-pp.activate_task.ttwu_do_activate.try_to_wake_up.wake_up_process.wake_up_q
2.79 ± 10% +32.8% 3.70 ± 3% perf-profile.cycles-pp.alloc_pages_current.pipe_write.__vfs_write.vfs_write.sys_write
1.98 ± 21% +58.7% 3.14 ± 7% perf-profile.cycles-pp.anon_pipe_buf_release.pipe_read.__vfs_read.vfs_read.sys_read
0.58 ± 62% +187.9% 1.66 ± 31% perf-profile.cycles-pp.call_cpuidle.cpu_startup_entry.start_secondary
13.42 ± 5% +16.7% 15.66 ± 6% perf-profile.cycles-pp.copy_page_from_iter.pipe_write.__vfs_write.vfs_write.sys_write
11.72 ± 5% +20.9% 14.17 ± 6% perf-profile.cycles-pp.copy_user_generic_string.copy_page_from_iter.pipe_write.__vfs_write.vfs_write
0.60 ± 55% +181.2% 1.68 ± 31% perf-profile.cycles-pp.cpu_startup_entry.start_secondary
0.58 ± 62% +187.9% 1.66 ± 31% perf-profile.cycles-pp.cpuidle_enter.call_cpuidle.cpu_startup_entry.start_secondary
0.58 ± 62% +187.9% 1.66 ± 31% perf-profile.cycles-pp.cpuidle_enter_state.cpuidle_enter.call_cpuidle.cpu_startup_entry.start_secondary
1.00 ± 24% -39.9% 0.60 ± 38% perf-profile.cycles-pp.dequeue_entity.dequeue_task_fair.deactivate_task.__schedule.schedule
0.92 ± 15% +50.3% 1.38 ± 17% perf-profile.cycles-pp.do_execveat_common.isra.29.sys_execve.return_from_execve.execve
18.85 ± 6% -39.5% 11.39 ± 5% perf-profile.cycles-pp.do_futex.sys_futex.entry_SYSCALL_64_fastpath
10.16 ± 5% -45.0% 5.59 ± 6% perf-profile.cycles-pp.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity.enqueue_task_fair
12.49 ± 3% -43.9% 7.01 ± 7% perf-profile.cycles-pp.enqueue_entity.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up
11.36 ± 6% -47.9% 5.92 ± 5% perf-profile.cycles-pp.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up.wake_up_process
0.92 ± 15% +52.2% 1.39 ± 16% perf-profile.cycles-pp.execve
0.90 ± 14% +155.2% 2.29 ± 5% perf-profile.cycles-pp.free_hot_cold_page.put_page.anon_pipe_buf_release.pipe_read.__vfs_read
0.05 ± 70% +3103.6% 1.49 ± 17% perf-profile.cycles-pp.free_pcppages_bulk.free_hot_cold_page.put_page.anon_pipe_buf_release.pipe_read
6.76 ± 5% -84.5% 1.04 ± 21% perf-profile.cycles-pp.futex_wake.do_futex.sys_futex.entry_SYSCALL_64_fastpath
1.81 ± 13% +33.9% 2.42 ± 11% perf-profile.cycles-pp.get_page_from_freelist.__alloc_pages_nodemask.alloc_pages_current.pipe_write.__vfs_write
1.78 ± 16% -82.8% 0.31 ± 73% perf-profile.cycles-pp.int_ret_from_sys_call
0.58 ± 62% +187.9% 1.66 ± 31% perf-profile.cycles-pp.intel_idle.cpuidle_enter_state.cpuidle_enter.call_cpuidle.cpu_startup_entry
0.67 ± 23% +50.2% 1.00 ± 13% perf-profile.cycles-pp.irq_entries_start
2.17 ± 15% -64.8% 0.76 ± 12% perf-profile.cycles-pp.is_module_text_address.__kernel_text_address.print_context_stack.dump_trace.save_stack_trace_tsk
0.17 ± 65% +417.6% 0.88 ± 51% perf-profile.cycles-pp.pick_next_task_fair.__schedule.schedule.prepare_exit_to_usermode.retint_user
0.94 ± 22% -71.3% 0.27 ±100% perf-profile.cycles-pp.pick_next_task_fair.__schedule.schedule.prepare_exit_to_usermode.syscall_return_slowpath
15.43 ± 4% +19.8% 18.49 ± 10% perf-profile.cycles-pp.pipe_read.__vfs_read.vfs_read.sys_read.entry_SYSCALL_64_fastpath
0.78 ± 18% -34.9% 0.51 ± 28% perf-profile.cycles-pp.pipe_wait.pipe_write.__vfs_write.vfs_write.sys_write
18.17 ± 3% +15.6% 21.00 ± 4% perf-profile.cycles-pp.pipe_write.__vfs_write.vfs_write.sys_write.entry_SYSCALL_64_fastpath
0.41 ± 47% +339.6% 1.80 ± 26% perf-profile.cycles-pp.prepare_exit_to_usermode.retint_user
1.56 ± 14% -82.3% 0.28 ± 78% perf-profile.cycles-pp.prepare_exit_to_usermode.syscall_return_slowpath.int_ret_from_sys_call
9.41 ± 6% -45.7% 5.10 ± 6% perf-profile.cycles-pp.print_context_stack.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity
1.30 ± 16% +107.7% 2.71 ± 2% perf-profile.cycles-pp.put_page.anon_pipe_buf_release.pipe_read.__vfs_read.vfs_read
0.43 ± 47% +343.3% 1.89 ± 27% perf-profile.cycles-pp.retint_user
0.92 ± 15% +52.2% 1.39 ± 16% perf-profile.cycles-pp.return_from_execve.execve
10.18 ± 6% -44.5% 5.65 ± 5% perf-profile.cycles-pp.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity.enqueue_task_fair.activate_task
0.76 ± 20% -45.7% 0.41 ± 39% perf-profile.cycles-pp.schedule.pipe_wait.pipe_write.__vfs_write.vfs_write
0.26 ± 77% +472.4% 1.50 ± 26% perf-profile.cycles-pp.schedule.prepare_exit_to_usermode.retint_user
1.34 ± 13% -83.1% 0.23 ± 98% perf-profile.cycles-pp.schedule.prepare_exit_to_usermode.syscall_return_slowpath.int_ret_from_sys_call
7.82 ± 8% -13.9% 6.73 ± 8% perf-profile.cycles-pp.scheduler_tick.update_process_times.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues
0.77 ± 20% -57.9% 0.33 ± 53% perf-profile.cycles-pp.select_idle_sibling.select_task_rq_fair.try_to_wake_up.wake_up_process.wake_up_q
0.60 ± 55% +181.2% 1.68 ± 31% perf-profile.cycles-pp.start_secondary
0.92 ± 15% +52.2% 1.39 ± 16% perf-profile.cycles-pp.sys_execve.return_from_execve.execve
19.05 ± 6% -39.8% 11.47 ± 6% perf-profile.cycles-pp.sys_futex.entry_SYSCALL_64_fastpath
18.65 ± 5% +14.9% 21.43 ± 4% perf-profile.cycles-pp.sys_write.entry_SYSCALL_64_fastpath
1.71 ± 17% -83.6% 0.28 ± 78% perf-profile.cycles-pp.syscall_return_slowpath.int_ret_from_sys_call
8.13 ± 8% -15.7% 6.86 ± 4% perf-profile.cycles-pp.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt.hpet_interrupt_handler.handle_irq_event_percpu
6.42 ± 6% -86.7% 0.86 ± 15% perf-profile.cycles-pp.try_to_wake_up.wake_up_process.wake_up_q.futex_wake.do_futex
5.71 ± 6% -88.3% 0.67 ± 16% perf-profile.cycles-pp.ttwu_do_activate.constprop.85.try_to_wake_up.wake_up_process.wake_up_q.futex_wake
18.56 ± 4% +14.7% 21.29 ± 4% perf-profile.cycles-pp.vfs_write.sys_write.entry_SYSCALL_64_fastpath
6.48 ± 6% -86.7% 0.86 ± 15% perf-profile.cycles-pp.wake_up_process.wake_up_q.futex_wake.do_futex.sys_futex
7.83 ± 5% -12.0% 6.90 ± 6% perf-profile.cycles-pp.wake_up_q.futex_requeue.do_futex.sys_futex.entry_SYSCALL_64_fastpath
6.50 ± 6% -86.8% 0.86 ± 15% perf-profile.cycles-pp.wake_up_q.futex_wake.do_futex.sys_futex.entry_SYSCALL_64_fastpath

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/blocksize:
xps2/pigz/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/100%/512K

commit:
1fda6948fbb2e756c8f7cabef40395cee2984298
9b7aaf11b8d61eeb87f3b99fb5ae59e61bb35f27

1fda6948fbb2e756 9b7aaf11b8d61eeb87f3b99fb5
---------------- --------------------------
%stddev %change %stddev
\ | \
421289 ± 0% -41.5% 246276 ± 0% pigz.time.voluntary_context_switches
29846 ± 2% -7.7% 27554 ± 7% vmstat.system.cs
20.45 ± 0% +13.2% 23.14 ± 2% time.system_time
421289 ± 0% -41.5% 246276 ± 0% time.voluntary_context_switches
45714285 ±124% -95.7% 1984224 ±141% latency_stats.avg.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
48348210 ±115% -93.5% 3139821 ±141% latency_stats.max.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
49349347 ±111% -92.0% 3968449 ±141% latency_stats.sum.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
1375 ± 14% -27.1% 1002 ± 6% slabinfo.kmalloc-512.active_objs
1383 ± 13% -22.9% 1066 ± 4% slabinfo.kmalloc-512.num_objs
288.00 ± 11% -27.8% 208.00 ± 13% slabinfo.kmem_cache_node.active_objs
288.00 ± 11% -27.8% 208.00 ± 13% slabinfo.kmem_cache_node.num_objs
185.25 ± 20% -31.8% 126.25 ± 27% sched_debug.cfs_rq[1]:/.load
197.75 ± 38% -46.9% 105.00 ± 7% sched_debug.cfs_rq[4]:/.load
612137 ± 10% -30.7% 424430 ± 39% sched_debug.cpu#0.avg_idle
185.50 ± 20% -31.9% 126.25 ± 27% sched_debug.cpu#1.load
4279 ± 38% +433.7% 22836 ± 90% sched_debug.cpu#1.sched_goidle
3.75 ± 66% -293.3% -7.25 ±-71% sched_debug.cpu#2.nr_uninterruptible
49454 ± 11% -33.7% 32794 ± 21% sched_debug.cpu#2.ttwu_count
46367 ± 10% -73.0% 12537 ± 36% sched_debug.cpu#2.ttwu_local
2101 ± 2% -19.6% 1689 ± 31% sched_debug.cpu#3.curr->pid
3172457 ± 13% -75.8% 768326 ±159% sched_debug.cpu#3.nr_switches
3172792 ± 13% -75.8% 768559 ±159% sched_debug.cpu#3.sched_count
31071 ± 63% -70.6% 9145 ±152% sched_debug.cpu#3.sched_goidle
1591262 ± 13% -75.3% 393479 ±154% sched_debug.cpu#3.ttwu_count
1585043 ± 13% -77.0% 363933 ±168% sched_debug.cpu#3.ttwu_local
1563 ± 3% +24.7% 1950 ± 2% sched_debug.cpu#4.curr->pid
197.75 ± 38% -46.9% 105.00 ± 7% sched_debug.cpu#4.load
40124 ± 11% -67.6% 12997 ± 52% sched_debug.cpu#4.ttwu_local
-7.75 ±-64% -116.1% 1.25 ±261% sched_debug.cpu#5.nr_uninterruptible
43049 ± 9% -36.9% 27181 ± 23% sched_debug.cpu#6.ttwu_count
39979 ± 9% -79.5% 8203 ± 27% sched_debug.cpu#6.ttwu_local
503206 ± 8% +27.6% 642256 ± 14% sched_debug.cpu#7.avg_idle
108.50 ± 2% +19.6% 129.75 ± 19% sched_debug.cpu#7.cpu_load[4]
3350838 ± 19% -71.4% 958032 ±162% sched_debug.cpu#7.nr_switches
3351130 ± 19% -71.4% 958871 ±162% sched_debug.cpu#7.sched_count
44871 ± 85% -69.4% 13748 ±166% sched_debug.cpu#7.sched_goidle
1679394 ± 19% -71.1% 486037 ±159% sched_debug.cpu#7.ttwu_count
1674846 ± 19% -72.2% 465439 ±167% sched_debug.cpu#7.ttwu_local
0.11 ± 60% +15333.0% 16.37 ±121% sched_debug.rt_rq[1]:/.rt_time
4.84 ± 14% -46.4% 2.59 ± 22% perf-profile.cycles-pp.__account_scheduler_latency.enqueue_entity.enqueue_task_fair.activate_task.ttwu_do_activate
1.31 ± 17% +40.8% 1.84 ± 14% perf-profile.cycles-pp.__do_softirq.irq_exit.smp_apic_timer_interrupt.apic_timer_interrupt
2.50 ± 8% -48.0% 1.30 ± 9% perf-profile.cycles-pp.__kernel_text_address.print_context_stack.dump_trace.save_stack_trace_tsk.__account_scheduler_latency
0.03 ±141% +5341.7% 1.63 ± 16% perf-profile.cycles-pp.__mutex_lock_slowpath.mutex_lock.pipe_read.__vfs_read.vfs_read
0.84 ± 15% -61.1% 0.33 ± 15% perf-profile.cycles-pp.__schedule.schedule.futex_wait_queue_me.futex_wait.do_futex
0.24 ± 48% +238.8% 0.83 ± 20% perf-profile.cycles-pp.__schedule.schedule.prepare_exit_to_usermode.retint_user
16.31 ± 5% +13.3% 18.48 ± 1% perf-profile.cycles-pp.__vfs_read.vfs_read.sys_read.entry_SYSCALL_64_fastpath
20.50 ± 4% +15.7% 23.72 ± 0% perf-profile.cycles-pp.__vfs_write.vfs_write.sys_write.entry_SYSCALL_64_fastpath
2.24 ± 7% -33.1% 1.50 ± 41% perf-profile.cycles-pp.__wake_up_common.__wake_up_sync_key.pipe_write.__vfs_write.vfs_write
2.42 ± 8% -32.1% 1.64 ± 38% perf-profile.cycles-pp.__wake_up_sync_key.pipe_write.__vfs_write.vfs_write.sys_write
0.55 ± 23% +80.5% 1.00 ± 12% perf-profile.cycles-pp.account_process_tick.update_process_times.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues
0.48 ± 36% +68.9% 0.82 ± 27% perf-profile.cycles-pp.account_user_time.account_process_tick.update_process_times.tick_sched_handle.tick_sched_timer
3.63 ± 22% -57.9% 1.53 ± 14% perf-profile.cycles-pp.activate_task.ttwu_do_activate.try_to_wake_up.wake_up_process.wake_up_q
2.94 ± 18% +22.4% 3.60 ± 2% perf-profile.cycles-pp.anon_pipe_buf_release.pipe_read.__vfs_read.vfs_read.sys_read
15.36 ± 5% +18.1% 18.14 ± 1% perf-profile.cycles-pp.copy_page_from_iter.pipe_write.__vfs_write.vfs_write.sys_write
13.54 ± 4% +19.4% 16.17 ± 1% perf-profile.cycles-pp.copy_user_generic_string.copy_page_from_iter.pipe_write.__vfs_write.vfs_write
27.96 ± 1% -7.2% 25.94 ± 4% perf-profile.cycles-pp.do_IRQ.ret_from_intr
6.18 ± 11% -51.2% 3.02 ± 15% perf-profile.cycles-pp.do_futex.sys_futex.entry_SYSCALL_64_fastpath
4.37 ± 12% -42.6% 2.51 ± 23% perf-profile.cycles-pp.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity.enqueue_task_fair
5.66 ± 18% -41.2% 3.33 ± 25% perf-profile.cycles-pp.enqueue_entity.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up
3.68 ± 22% -55.5% 1.64 ± 10% perf-profile.cycles-pp.enqueue_task_fair.activate_task.ttwu_do_activate.try_to_wake_up.wake_up_process
1.95 ± 21% +45.2% 2.83 ± 9% perf-profile.cycles-pp.free_hot_cold_page.put_page.anon_pipe_buf_release.pipe_read.__vfs_read
1.02 ± 36% +88.7% 1.92 ± 3% perf-profile.cycles-pp.free_pcppages_bulk.free_hot_cold_page.put_page.anon_pipe_buf_release.pipe_read
1.19 ± 17% -52.2% 0.57 ± 15% perf-profile.cycles-pp.futex_wait.do_futex.sys_futex.entry_SYSCALL_64_fastpath
0.95 ± 11% -52.9% 0.45 ± 13% perf-profile.cycles-pp.futex_wait_queue_me.futex_wait.do_futex.sys_futex.entry_SYSCALL_64_fastpath
2.33 ± 22% -87.9% 0.28 ± 42% perf-profile.cycles-pp.futex_wake.do_futex.sys_futex.entry_SYSCALL_64_fastpath
1.26 ± 11% -24.1% 0.96 ± 21% perf-profile.cycles-pp.irq_exit.do_IRQ.ret_from_intr
1.59 ± 17% +42.1% 2.26 ± 7% perf-profile.cycles-pp.irq_exit.smp_apic_timer_interrupt.apic_timer_interrupt
1.10 ± 16% -50.5% 0.55 ± 28% perf-profile.cycles-pp.is_module_text_address.__kernel_text_address.print_context_stack.dump_trace.save_stack_trace_tsk
0.06 ±141% +2762.5% 1.72 ± 15% perf-profile.cycles-pp.mutex_lock.pipe_read.__vfs_read.vfs_read.sys_read
0.03 ±141% +5341.7% 1.63 ± 16% perf-profile.cycles-pp.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.pipe_read.__vfs_read
0.03 ±141% +4725.0% 1.45 ± 17% perf-profile.cycles-pp.mutex_spin_on_owner.isra.4.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.pipe_read
16.84 ± 4% +14.3% 19.26 ± 3% perf-profile.cycles-pp.pipe_read.__vfs_read.vfs_read.sys_read.entry_SYSCALL_64_fastpath
23.56 ± 4% +10.8% 26.09 ± 2% perf-profile.cycles-pp.pipe_write.__vfs_write.vfs_write.sys_write.entry_SYSCALL_64_fastpath
0.34 ± 51% +197.0% 1.00 ± 17% perf-profile.cycles-pp.prepare_exit_to_usermode.retint_user
4.00 ± 9% -44.8% 2.21 ± 29% perf-profile.cycles-pp.print_context_stack.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity
0.36 ± 43% +181.9% 1.02 ± 14% perf-profile.cycles-pp.retint_user
0.44 ± 24% +121.5% 0.98 ± 29% perf-profile.cycles-pp.run_timer_softirq.__do_softirq.irq_exit.smp_apic_timer_interrupt.apic_timer_interrupt
4.42 ± 12% -43.2% 2.51 ± 23% perf-profile.cycles-pp.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity.enqueue_task_fair.activate_task
0.86 ± 13% -57.3% 0.37 ± 16% perf-profile.cycles-pp.schedule.futex_wait_queue_me.futex_wait.do_futex.sys_futex
0.27 ± 58% +217.8% 0.85 ± 19% perf-profile.cycles-pp.schedule.prepare_exit_to_usermode.retint_user
6.31 ± 10% -50.9% 3.10 ± 17% perf-profile.cycles-pp.sys_futex.entry_SYSCALL_64_fastpath
20.72 ± 4% +15.2% 23.87 ± 0% perf-profile.cycles-pp.sys_write.entry_SYSCALL_64_fastpath
2.28 ± 23% -90.2% 0.22 ± 28% perf-profile.cycles-pp.try_to_wake_up.wake_up_process.wake_up_q.futex_wake.do_futex
2.10 ± 23% -91.3% 0.18 ± 18% perf-profile.cycles-pp.ttwu_do_activate.constprop.85.try_to_wake_up.wake_up_process.wake_up_q.futex_wake
20.70 ± 4% +15.2% 23.85 ± 0% perf-profile.cycles-pp.vfs_write.sys_write.entry_SYSCALL_64_fastpath
2.28 ± 23% -88.5% 0.26 ± 33% perf-profile.cycles-pp.wake_up_process.wake_up_q.futex_wake.do_futex.sys_futex
2.28 ± 23% -88.5% 0.26 ± 33% perf-profile.cycles-pp.wake_up_q.futex_wake.do_futex.sys_futex.entry_SYSCALL_64_fastpath

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test:
ivb42/unixbench/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/context1

commit:
1fda6948fbb2e756c8f7cabef40395cee2984298
9b7aaf11b8d61eeb87f3b99fb5ae59e61bb35f27

1fda6948fbb2e756 9b7aaf11b8d61eeb87f3b99fb5
---------------- --------------------------
%stddev %change %stddev
\ | \
288.03 ± 0% -25.6% 214.25 ± 0% unixbench.score
58.00 ± 1% -43.1% 33.00 ± 9% unixbench.time.percent_of_cpu_this_job_got
72.34 ± 0% -35.7% 46.50 ± 1% unixbench.time.system_time
11420136 ± 0% -26.3% 8421203 ± 0% unixbench.time.voluntary_context_switches
344407 ± 0% -32.4% 232953 ± 9% vmstat.system.cs
254673 ± 1% +11.1% 282981 ± 6% proc-vmstat.pgalloc_normal
269784 ± 1% +9.0% 294162 ± 8% proc-vmstat.pgfree
45666 ± 5% -21.8% 35703 ± 0% softirqs.RCU
98669 ± 0% -32.6% 66493 ± 0% softirqs.SCHED
255585 ± 2% -41.6% 149165 ± 0% softirqs.TIMER
1.737e+08 ± 8% +73.6% 3.016e+08 ± 15% cpuidle.C1-IVT.time
22888771 ± 0% -26.2% 16889065 ± 0% cpuidle.C1-IVT.usage
2819263 ± 37% +78.3% 5026446 ± 36% cpuidle.POLL.time
215.75 ± 7% +27.6% 275.25 ± 13% cpuidle.POLL.usage
48303 ± 52% -98.5% 746.00 ± 13% numa-vmstat.node0.numa_other
625.25 ± 2% -11.6% 552.50 ± 1% numa-vmstat.node1.nr_alloc_batch
27.00 ±117% +263.9% 98.25 ± 32% numa-vmstat.node1.nr_dirtied
26.00 ±119% +274.0% 97.25 ± 32% numa-vmstat.node1.nr_written
29792 ± 84% +158.6% 77047 ± 0% numa-vmstat.node1.numa_other
186.00 ± 3% -12.4% 163.00 ± 2% time.involuntary_context_switches
58.00 ± 1% -43.1% 33.00 ± 9% time.percent_of_cpu_this_job_got
72.34 ± 0% -35.7% 46.50 ± 1% time.system_time
4.30 ± 3% -67.0% 1.42 ± 2% time.user_time
11420136 ± 0% -26.3% 8421203 ± 0% time.voluntary_context_switches
1068857 ± 0% +2337.3% 26051614 ± 21% latency_stats.avg.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
25448 ±169% +2538.9% 671565 ±107% latency_stats.avg.wait_on_page_bit.filemap_fdatawait_range.filemap_fdatawait.sync_inodes_sb.sync_inodes_one_sb.iterate_supers.sys_sync.entry_SYSCALL_64_fastpath
1068857 ± 0% +2337.3% 26051614 ± 21% latency_stats.max.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
501985 ±171% +2586.6% 13486271 ±104% latency_stats.max.wait_on_page_bit.filemap_fdatawait_range.filemap_fdatawait.sync_inodes_sb.sync_inodes_one_sb.iterate_supers.sys_sync.entry_SYSCALL_64_fastpath
1068857 ± 0% +2337.3% 26051614 ± 21% latency_stats.sum.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
508200 ±169% +2555.0% 13492792 ±103% latency_stats.sum.wait_on_page_bit.filemap_fdatawait_range.filemap_fdatawait.sync_inodes_sb.sync_inodes_one_sb.iterate_supers.sys_sync.entry_SYSCALL_64_fastpath
2.25 ± 1% -11.8% 1.99 ± 10% turbostat.%Busy
7.92 ± 5% +37.7% 10.91 ± 7% turbostat.CPU%c1
44.22 ± 3% +26.4% 55.89 ± 9% turbostat.CorWatt
36.77 ± 8% -71.7% 10.39 ± 32% turbostat.Pkg%pc2
70.59 ± 2% +16.5% 82.22 ± 7% turbostat.PkgWatt
3.17 ± 0% +42.5% 4.52 ± 4% turbostat.RAMWatt
4.00 ±-25% +6.2% 4.25 ± 34% sched_debug.cfs_rq[12]:/.nr_spread_over
12636 ± 45% -46.1% 6808 ± 18% sched_debug.cfs_rq[13]:/.min_vruntime
13202 ± 46% -57.5% 5615 ± 17% sched_debug.cfs_rq[14]:/.min_vruntime
10421 ± 31% -29.5% 7347 ± 8% sched_debug.cfs_rq[18]:/.min_vruntime
3380 ± 52% -50.3% 1680 ± 72% sched_debug.cfs_rq[19]:/.exec_clock
6209 ± 42% -74.2% 1603 ± 26% sched_debug.cfs_rq[1]:/.exec_clock
32.75 ± 24% -76.3% 7.75 ± 69% sched_debug.cfs_rq[1]:/.load_avg
16499 ± 28% -56.5% 7182 ± 21% sched_debug.cfs_rq[1]:/.min_vruntime
-35.06 ±-10948% +18331.7% -6461 ±-20% sched_debug.cfs_rq[1]:/.spread0
33.00 ± 24% -76.5% 7.75 ± 69% sched_debug.cfs_rq[1]:/.tg_load_avg_contrib
7.50 ±142% +243.3% 25.75 ± 72% sched_debug.cfs_rq[21]:/.load_avg
5687 ± 24% +101.6% 11466 ± 44% sched_debug.cfs_rq[21]:/.min_vruntime
-10848 ±-58% -79.9% -2178 ±-281% sched_debug.cfs_rq[21]:/.spread0
7.50 ±142% +243.3% 25.75 ± 72% sched_debug.cfs_rq[21]:/.tg_load_avg_contrib
20.25 ±156% +328.4% 86.75 ± 52% sched_debug.cfs_rq[21]:/.util_avg
692.16 ± 30% +39.6% 966.37 ± 19% sched_debug.cfs_rq[23]:/.exec_clock
4978 ± 24% -60.2% 1983 ± 16% sched_debug.cfs_rq[2]:/.exec_clock
15545 ± 29% -48.2% 8048 ± 26% sched_debug.cfs_rq[2]:/.min_vruntime
7.00 ± 78% -71.4% 2.00 ±100% sched_debug.cfs_rq[2]:/.nr_spread_over
-13574 ±-39% -54.5% -6170 ±-55% sched_debug.cfs_rq[35]:/.spread0
4.75 ±112% +436.8% 25.50 ± 86% sched_debug.cfs_rq[36]:/.load_avg
1.00 ±-100% +125.0% 2.25 ± 36% sched_debug.cfs_rq[36]:/.nr_spread_over
4.75 ±112% +436.8% 25.50 ± 86% sched_debug.cfs_rq[36]:/.tg_load_avg_contrib
3410 ± 18% -51.0% 1672 ± 40% sched_debug.cfs_rq[3]:/.exec_clock
17.50 ± 47% -65.7% 6.00 ±115% sched_debug.cfs_rq[3]:/.load_avg
11719 ± 21% -56.7% 5070 ± 28% sched_debug.cfs_rq[3]:/.min_vruntime
17.50 ± 47% -67.1% 5.75 ±123% sched_debug.cfs_rq[3]:/.tg_load_avg_contrib
10.00 ±-10% +170.0% 27.00 ± 59% sched_debug.cfs_rq[42]:/.load_avg
10.00 ±-10% +170.0% 27.00 ± 59% sched_debug.cfs_rq[42]:/.tg_load_avg_contrib
10.00 ±-10% +385.0% 48.50 ± 60% sched_debug.cfs_rq[42]:/.util_avg
17.00 ± -5% +97.1% 33.50 ± 58% sched_debug.cfs_rq[47]:/.load_avg
17.00 ± -5% +97.1% 33.50 ± 58% sched_debug.cfs_rq[47]:/.tg_load_avg_contrib
21.00 ±100% +135.7% 49.50 ± 65% sched_debug.cfs_rq[47]:/.util_avg
3465 ± 59% -54.7% 1570 ± 17% sched_debug.cfs_rq[4]:/.exec_clock
11228 ± 27% -37.8% 6988 ± 15% sched_debug.cfs_rq[4]:/.min_vruntime
10.25 ± 95% -96.7% 0.33 ±141% sched_debug.cfs_rq[4]:/.nr_spread_over
3900 ± 11% -63.2% 1434 ± 77% sched_debug.cfs_rq[6]:/.exec_clock
12352 ± 16% -45.7% 6710 ± 46% sched_debug.cfs_rq[6]:/.min_vruntime
10852 ± 34% -51.4% 5275 ± 20% sched_debug.cfs_rq[8]:/.min_vruntime
157.75 ± 66% -86.1% 22.00 ±155% sched_debug.cfs_rq[9]:/.util_avg
11194 ± 34% -33.5% 7438 ± 7% sched_debug.cpu#0.nr_load_updates
1005965 ± 97% -80.7% 193952 ± 78% sched_debug.cpu#0.nr_switches
-16.50 ±-38% -65.2% -5.75 ±-14% sched_debug.cpu#0.nr_uninterruptible
501957 ± 98% -81.1% 94817 ± 80% sched_debug.cpu#0.sched_goidle
502829 ± 98% -80.4% 98361 ± 78% sched_debug.cpu#0.ttwu_count
13584 ± 24% -51.7% 6565 ± 15% sched_debug.cpu#1.nr_load_updates
1561606 ± 48% -79.6% 319093 ± 60% sched_debug.cpu#1.nr_switches
-23.25 ±-14% -87.1% -3.00 ±-33% sched_debug.cpu#1.nr_uninterruptible
1562351 ± 48% -79.6% 319325 ± 60% sched_debug.cpu#1.sched_count
780735 ± 48% -79.6% 159485 ± 60% sched_debug.cpu#1.sched_goidle
779956 ± 48% -79.6% 159415 ± 60% sched_debug.cpu#1.ttwu_count
9568 ± 15% -34.0% 6314 ± 25% sched_debug.cpu#11.nr_load_updates
695.25 ±107% -73.9% 181.75 ± 20% sched_debug.cpu#12.ttwu_local
8113 ± 34% -38.8% 4962 ± 22% sched_debug.cpu#13.nr_load_updates
0.00 ± 1% -Inf% -1.75 ±-102% sched_debug.cpu#14.nr_uninterruptible
794942 ± 18% +20.5% 957681 ± 4% sched_debug.cpu#15.avg_idle
441.75 ± 62% -61.6% 169.50 ± 27% sched_debug.cpu#15.ttwu_local
6234 ± 7% -37.4% 3902 ± 10% sched_debug.cpu#16.nr_load_updates
363.00 ± 36% -53.2% 169.75 ± 15% sched_debug.cpu#17.ttwu_local
12142 ± 10% -40.8% 7188 ± 11% sched_debug.cpu#2.nr_load_updates
1272905 ± 24% -64.9% 446977 ± 31% sched_debug.cpu#2.nr_switches
1274639 ± 24% -64.9% 447088 ± 31% sched_debug.cpu#2.sched_count
636392 ± 24% -64.9% 223377 ± 31% sched_debug.cpu#2.sched_goidle
632056 ± 24% -64.7% 223188 ± 31% sched_debug.cpu#2.ttwu_count
671.50 ± 71% -72.9% 182.00 ± 14% sched_debug.cpu#22.ttwu_local
5901 ± 13% -32.3% 3993 ± 9% sched_debug.cpu#23.nr_load_updates
47101 ±149% +261.0% 170053 ± 34% sched_debug.cpu#23.nr_switches
47106 ±149% +261.4% 170223 ± 34% sched_debug.cpu#23.sched_count
23517 ±149% +261.4% 84984 ± 34% sched_debug.cpu#23.sched_goidle
23219 ±151% +266.8% 85180 ± 34% sched_debug.cpu#23.ttwu_count
476.50 ± 92% -60.3% 189.25 ± 15% sched_debug.cpu#23.ttwu_local
1.25 ±103% -260.0% -2.00 ±-35% sched_debug.cpu#24.nr_uninterruptible
6066 ± 20% -49.3% 3077 ± 7% sched_debug.cpu#25.nr_load_updates
2.75 ±141% -109.1% -0.25 ±-435% sched_debug.cpu#27.nr_uninterruptible
1373 ± 87% +44629.3% 614132 ±106% sched_debug.cpu#28.nr_switches
651.25 ± 91% +47043.5% 307021 ±106% sched_debug.cpu#28.sched_goidle
1585 ± 83% +19416.5% 309385 ±104% sched_debug.cpu#28.ttwu_count
5461 ± 19% -44.7% 3022 ± 12% sched_debug.cpu#29.nr_load_updates
0.00 ± 0% +Inf% 6.00 ± 88% sched_debug.cpu#29.nr_uninterruptible
9976 ± 6% -34.5% 6536 ± 28% sched_debug.cpu#3.nr_load_updates
879948 ± 21% -51.1% 430031 ± 68% sched_debug.cpu#3.nr_switches
880415 ± 21% -51.1% 430197 ± 68% sched_debug.cpu#3.sched_count
439907 ± 21% -51.1% 214947 ± 68% sched_debug.cpu#3.sched_goidle
438872 ± 21% -51.1% 214556 ± 68% sched_debug.cpu#3.ttwu_count
5336 ± 16% -45.3% 2917 ± 12% sched_debug.cpu#30.nr_load_updates
6028 ± 25% -45.4% 3290 ± 30% sched_debug.cpu#31.nr_load_updates
5256 ± 10% -45.9% 2843 ± 8% sched_debug.cpu#32.nr_load_updates
6489 ± 42% -55.0% 2923 ± 10% sched_debug.cpu#34.nr_load_updates
165.25 ± 11% +48.7% 245.75 ± 21% sched_debug.cpu#34.ttwu_local
4703 ± 5% -41.6% 2746 ± 2% sched_debug.cpu#36.nr_load_updates
4641 ± 4% -31.9% 3158 ± 26% sched_debug.cpu#37.nr_load_updates
1.75 ± 24% -128.6% -0.50 ±-300% sched_debug.cpu#37.nr_uninterruptible
148.50 ± 4% +24.4% 184.75 ± 15% sched_debug.cpu#37.ttwu_local
3485 ±138% +17598.4% 616876 ± 82% sched_debug.cpu#38.nr_switches
5.00 ± 16% -130.0% -1.50 ±-57% sched_debug.cpu#38.nr_uninterruptible
3491 ±138% +17600.5% 617925 ± 82% sched_debug.cpu#38.sched_count
1698 ±141% +18057.6% 308406 ± 82% sched_debug.cpu#38.sched_goidle
9765 ± 28% -33.6% 6488 ± 9% sched_debug.cpu#4.nr_load_updates
284.50 ± 13% +101.8% 574.25 ± 42% sched_debug.cpu#4.ttwu_local
5003 ± 9% -46.9% 2656 ± 10% sched_debug.cpu#40.nr_load_updates
180.00 ± 7% -17.6% 148.25 ± 13% sched_debug.cpu#40.ttwu_local
6289 ± 46% -55.9% 2772 ± 24% sched_debug.cpu#41.nr_load_updates
5385 ± 21% -55.1% 2419 ± 3% sched_debug.cpu#42.nr_load_updates
5249 ± 22% -53.9% 2418 ± 3% sched_debug.cpu#43.nr_load_updates
4.50 ± 57% -116.7% -0.75 ±-145% sched_debug.cpu#43.nr_uninterruptible
4694 ± 3% -46.0% 2533 ± 11% sched_debug.cpu#44.nr_load_updates
1.50 ±100% -66.7% 0.50 ±100% sched_debug.cpu#44.nr_uninterruptible
157.25 ± 6% -17.2% 130.25 ± 5% sched_debug.cpu#44.ttwu_local
4699 ± 7% -49.6% 2368 ± 5% sched_debug.cpu#45.nr_load_updates
4498 ± 5% -47.7% 2352 ± 3% sched_debug.cpu#46.nr_load_updates
322.00 ± 27% +355.0% 1465 ± 70% sched_debug.cpu#46.ttwu_count
988085 ± 2% -14.3% 846764 ± 9% sched_debug.cpu#47.avg_idle
5610 ± 26% -48.3% 2901 ± 32% sched_debug.cpu#47.nr_load_updates
132.00 ± 7% +55.3% 205.00 ± 8% sched_debug.cpu#47.ttwu_local
10304 ± 3% -43.8% 5788 ± 29% sched_debug.cpu#6.nr_load_updates
1041518 ± 12% -66.0% 354166 ± 96% sched_debug.cpu#6.nr_switches
1043597 ± 12% -66.0% 354370 ± 96% sched_debug.cpu#6.sched_count
520703 ± 12% -66.0% 177028 ± 96% sched_debug.cpu#6.sched_goidle
520372 ± 12% -66.1% 176504 ± 97% sched_debug.cpu#6.ttwu_count
10457 ± 29% -51.3% 5090 ± 13% sched_debug.cpu#7.nr_load_updates
1086551 ± 71% -78.7% 231433 ± 66% sched_debug.cpu#7.nr_switches
1092986 ± 71% -78.8% 231539 ± 66% sched_debug.cpu#7.sched_count
543145 ± 71% -78.7% 115671 ± 66% sched_debug.cpu#7.sched_goidle
543672 ± 71% -78.9% 114535 ± 67% sched_debug.cpu#7.ttwu_count
9814 ± 26% -38.1% 6078 ± 18% sched_debug.cpu#8.nr_load_updates


ivb43: Ivytown Ivy Bridge-EP
Memory: 64G

vm-vp-quantal-x86_64: qemu-system-x86_64 -enable-kvm
Memory: 360M

lkp-ne04: Nehalem-EP
Memory: 12G

wsm: Westmere
Memory: 6G

xps2: Nehalem
Memory: 4G

ivb42: Ivytown Ivy Bridge-EP
Memory: 64G




hackbench.throughput

64000 ++-------------------------*-*----*---------------------------------+
62000 *+*..*.*.*.*..*.*.*.*..*.* * *..*. .*..*.*.*. .*.*.* |
| *.* *. |
60000 ++ |
58000 ++ |
56000 ++ |
54000 ++ |
| |
52000 ++ |
50000 ++ |
48000 ++ |
46000 ++ O O O O O O O |
| O O O O O O O O O O O O O O O O O O O O O
44000 O+O O |
42000 ++------------------------------------------------------------------+


hackbench.time.minor_page_faults

8e+06 ++---------*--------------*--*-*-*---*----------------------------+
*.*.*.. .* + .*..*.*.*.* * .*.*. .*.*. .* |
7.5e+06 ++ * * *.* *. *.* |
7e+06 ++ |
| |
6.5e+06 ++ |
| |
6e+06 ++ O O O O O O O O O O O O O O O O |
| O O O O O O O O |
5.5e+06 O+O O O O O |
5e+06 ++ |
| |
4.5e+06 ++ |
| O
4e+06 ++----------------------------------------------------------------+


hackbench.time.voluntary_context_switches

2.6e+08 ++O---------------------------------------------------------------+
2.4e+08 O+ O O O O O O O O O O O O O O O O O O |
| O O O O O O O O O O |
2.2e+08 ++ |
2e+08 ++ |
| O
1.8e+08 ++ |
1.6e+08 ++ |
1.4e+08 ++ |
| |
1.2e+08 ++ |
1e+08 ++ |
| |
8e+07 *+*.*..*.*.*.*.*..*.*.*.*.*..*.*.*.*.*..*.*.*.*.*..*.*.*.*.* |
6e+07 ++----------------------------------------------------------------+


hackbench.time.involuntary_context_switches

7e+07 ++------------------------------------------------------------------+
| O |
6e+07 O+ |
| O O O O O O O O O O O O O O O O O O O O O |
| O O O O O O |
5e+07 ++ O O
| |
4e+07 ++ |
| |
3e+07 ++ |
| |
| |
2e+07 ++ * |
|.*.. .*. .*.. + + .*.*.*..*.*. .*. .*. .*.*. |
1e+07 *+---*---*-*--*-*-*----*---*-*-------------*-*----*---*------*------+


time.minor_page_faults

8e+06 ++---------*--------------*--*-*-*---*----------------------------+
*.*.*.. .* + .*..*.*.*.* * .*.*. .*.*. .* |
7.5e+06 ++ * * *.* *. *.* |
7e+06 ++ |
| |
6.5e+06 ++ |
| |
6e+06 ++ O O O O O O O O O O O O O O O O |
| O O O O O O O O |
5.5e+06 O+O O O O O |
5e+06 ++ |
| |
4.5e+06 ++ |
| O
4e+06 ++----------------------------------------------------------------+


time.voluntary_context_switches

2.6e+08 ++O---------------------------------------------------------------+
2.4e+08 O+ O O O O O O O O O O O O O O O O O O |
| O O O O O O O O O O |
2.2e+08 ++ |
2e+08 ++ |
| O
1.8e+08 ++ |
1.6e+08 ++ |
1.4e+08 ++ |
| |
1.2e+08 ++ |
1e+08 ++ |
| |
8e+07 *+*.*..*.*.*.*.*..*.*.*.*.*..*.*.*.*.*..*.*.*.*.*..*.*.*.*.* |
6e+07 ++----------------------------------------------------------------+


time.involuntary_context_switches

7e+07 ++------------------------------------------------------------------+
| O |
6e+07 O+ |
| O O O O O O O O O O O O O O O O O O O O O |
| O O O O O O |
5e+07 ++ O O
| |
4e+07 ++ |
| |
3e+07 ++ |
| |
| |
2e+07 ++ * |
|.*.. .*. .*.. + + .*.*.*..*.*. .*. .*. .*.*. |
1e+07 *+---*---*-*--*-*-*----*---*-*-------------*-*----*---*------*------+


time.file_system_outputs

220 ++--------------------------------------------------------------------+
210 *+ .*. .*. .*.. .*.*..*.*.*..*.*.*..*. .* |
| *. *.*. * * *.*..*.*.*..*.*.*..* |
200 ++ |
190 ++ |
| |
180 ++ |
170 ++ |
160 ++ O O O O O O O O O O O O O O O O O O O O O O |
| |
150 O+O O O O O O O |
140 ++ |
| |
130 ++ |
120 ++--------------------------------------------------------------------O


vmstat.system.in

80000 ++------------------------------------------------------------------+
O O |
70000 ++ O O O O O O O O O O O O O O O O |
| O O O O O O O O O O O |
60000 ++ O O
| |
50000 ++ |
| |
40000 ++ |
| |
30000 ++ |
| |
20000 *+*..*.*.*.*..*.*.*.*..*.*.*.*..*.*.*..*.*.*.*..*.*.*.*..*.*.* |
| |
10000 ++------------------------------------------------------------------+


vmstat.system.cs

550000 ++-----------------------------------------------------------------+
O O O O O |
500000 ++ O O O O O O O O O O O O O O O O O O O O |
450000 ++ O O O O O |
| |
400000 ++ O
350000 ++ |
| |
300000 ++ |
250000 ++ |
| |
200000 ++ |
150000 ++*.*..*.*.*.*.. .*.*.*..*.*.*.*..*.*.*.*. .*.*.*.*..*.*.*.* |
* * *. |
100000 ++-----------------------------------------------------------------+


proc-vmstat.numa_hit

9.5e+06 ++----------------------------------------------------------------+
| .*.*. .*. .*. .*.. |
9e+06 *+ .*.. .*. .*.. .* *. * * *. .*.*. .* |
8.5e+06 ++* *.* * * * *..*.*.*.* |
| |
8e+06 ++ |
7.5e+06 ++ |
| |
7e+06 ++ O O O O O O O O O O O O O O O O O O O O O O |
6.5e+06 O+O O O O O O O |
| |
6e+06 ++ |
5.5e+06 ++ |
| O
5e+06 ++----------------------------------------------------------------+


proc-vmstat.numa_local

9.5e+06 ++----------------------------------------------------------------+
| .*.*. .*. .*. .*.. |
9e+06 *+ .*.. .*. .*.. .* *. * * *. .*.*. .* |
8.5e+06 ++* *.* * * * *..*.*.*.* |
| |
8e+06 ++ |
7.5e+06 ++ |
| |
7e+06 ++ O O O O O O O O O O O O O O O O O O O O O O |
6.5e+06 O+O O O O O O O |
| |
6e+06 ++ |
5.5e+06 ++ |
| O
5e+06 ++----------------------------------------------------------------+


proc-vmstat.pgalloc_dma32

7.5e+06 ++----------------------------------------------------------------+
| |
7e+06 *+ .*.. .*. .*.. .*.*.*..*.*.*.*.*.. .*. .* |
| * *.* * .* *.*.* *..*.*. .* |
6.5e+06 ++ * * |
| |
6e+06 ++ |
| |
5.5e+06 ++ O O O O O O O O O O O O O O O O O O O O O |
O O O O O O O |
5e+06 ++ O O |
| |
4.5e+06 ++ |
| O
4e+06 ++----------------------------------------------------------------+


proc-vmstat.pgalloc_normal

6.5e+06 ++----------------------------------------------------------------+
| |
6e+06 *+ .*.. * .*.*.*..*. .*.*.*.. .* |
| * + + .*.. .* * *.*.*.*.*..*.*. .* |
| *.* * * * |
5.5e+06 ++ |
| |
5e+06 ++ |
| O O O O O O O O O |
4.5e+06 ++O O O O O O O O O O O O O O O O |
O O O O O |
| |
4e+06 ++ |
| |
3.5e+06 ++----------------------------------------------------------------O


proc-vmstat.pgfree

1.4e+07 ++----------------------------------------------------------------+
| |
1.3e+07 *+ *. .*.. .*.*.*..*. .*.*.*.. .*. .* |
| *.*..*. + * .* * *.*.* *..*.*. .* |
1.2e+07 ++ * * * |
| |
1.1e+07 ++ |
| |
1e+07 ++ O O O O O O O O O O O O O O O O O O O O O O |
O O O O O O O O |
9e+06 ++ |
| |
8e+06 ++ |
| O
7e+06 ++----------------------------------------------------------------+


proc-vmstat.pgfault

8.5e+06 ++----------------------------------------------------------------+
| .*.. |
8e+06 *+ .*.. .*. .*.. .*.*.* *.*.*.*.*.. .*.*. .*. .* |
7.5e+06 ++* *.* * * *.* *..* *.* |
| |
7e+06 ++ |
| |
6.5e+06 ++ |
| O |
6e+06 ++ O O O O O O O O O O O O O O O O O O O O O |
5.5e+06 O+O O O O O O O |
| |
5e+06 ++ |
| O
4.5e+06 ++----------------------------------------------------------------+

[*] bisect-good sample
[O] bisect-bad sample

To reproduce:

git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp run job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: hackbench
default-monitors:
wait: activate-monitor
kmsg:
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
interval: 10
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 60
cpufreq_governor: performance
default-watchdogs:
oom-killer:
watchdog:
commit: ede36133dba1b529d2cd1dac5efb3ac63a7f45eb
model: Westmere
memory: 6G
nr_hdd_partitions: 1
hdd_partitions: "/dev/disk/by-id/ata-ATAPI_iHES208_2_3782208017_250915404"
swap_partitions:
rootfs_partition:
netconsole_port: 6667
category: benchmark
nr_threads: 1600%
perf-profile:
freq: 800
hackbench:
mode: process
ipc: socket
queue: cyclic
testbox: wsm
tbox_group: wsm
kconfig: x86_64-rhel
enqueue_time: 2015-11-06 15:40:33.457988319 +08:00
id: 60774267aeec02a2baa15ea2493345479fda2031
user: lkp
compiler: gcc-4.9
head_commit: ede36133dba1b529d2cd1dac5efb3ac63a7f45eb
base_commit: 6a13feb9c82803e2b815eca72fa7a9f5561d7861
branch: linux-devel/devel-hourly-2015110701
kernel: "/pkg/linux/x86_64-rhel/gcc-4.9/ede36133dba1b529d2cd1dac5efb3ac63a7f45eb/vmlinuz-4.3.0-bochs-virtio-gpu-wl-ath-13116-gede3613"
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/hackbench/performance-1600%-process-socket/wsm/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/ede36133dba1b529d2cd1dac5efb3ac63a7f45eb/0"
job_file: "/lkp/scheduled/wsm/cyclic_hackbench-performance-1600%-process-socket-x86_64-rhel-CYCLIC_HEAD-ede36133dba1b529d2cd1dac5efb3ac63a7f45eb-20151106-6185-1o555co-0.yaml"
dequeue_time: 2015-11-07 07:14:47.907201157 +08:00
nr_cpu: "$(nproc)"
max_uptime: 2400
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- job=/lkp/scheduled/wsm/cyclic_hackbench-performance-1600%-process-socket-x86_64-rhel-CYCLIC_HEAD-ede36133dba1b529d2cd1dac5efb3ac63a7f45eb-20151106-6185-1o555co-0.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linux-devel/devel-hourly-2015110701
- commit=ede36133dba1b529d2cd1dac5efb3ac63a7f45eb
- BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/ede36133dba1b529d2cd1dac5efb3ac63a7f45eb/vmlinuz-4.3.0-bochs-virtio-gpu-wl-ath-13116-gede3613
- max_uptime=2400
- RESULT_ROOT=/result/hackbench/performance-1600%-process-socket/wsm/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/ede36133dba1b529d2cd1dac5efb3ac63a7f45eb/0
- LKP_SERVER=inn
- |2-


earlyprintk=ttyS0,115200 systemd.log_level=err
debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0
console=ttyS0,115200 console=tty0 vga=normal

rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/ede36133dba1b529d2cd1dac5efb3ac63a7f45eb/modules.cgz"
bm_initrd: "/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz"
job_state: finished
loadavg: 1216.88 1083.35 608.00 1/188 19294
start_time: '1446851715'
end_time: '1446852333'
version: "/lkp/lkp/.src-20151106-190021"
echo performance > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu10/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu11/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu8/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu9/cpufreq/scaling_governor
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
/usr/bin/hackbench -g 192 --process -l 1875
\
 
 \ /
  Last update: 2015-11-10 02:41    [W:0.643 / U:0.148 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site