lkml.org 
[lkml]   [2015]   [Jul]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Subject[lkp] [x86/entry/64] 62c79204783: 5.9% aim7.jobs-per-min
From
Date
FYI, we noticed the below changes on

git://git.kernel.org/pub/scm/linux/kernel/git/luto/linux.git x86/entry
commit 62c79204783e188291d880f23d49c02d8c8f498b ("x86/entry/64: When returning via SYSRET, POP regs instead of using MOV")


=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/load/test:
lkp-a06/aim7/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/4000/misc_rtns_1

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
108044 ± 0% +5.9% 114416 ± 0% aim7.jobs-per-min
223.70 ± 0% -5.6% 211.11 ± 0% aim7.time.elapsed_time
223.70 ± 0% -5.6% 211.11 ± 0% aim7.time.elapsed_time.max
2113772 ± 0% -83.5% 349283 ± 2% aim7.time.involuntary_context_switches
599.10 ± 0% -2.2% 585.90 ± 0% aim7.time.system_time
203.87 ± 1% -4.7% 194.25 ± 0% aim7.time.user_time
2113772 ± 0% -83.5% 349283 ± 2% time.involuntary_context_switches

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/load/test:
lkp-a06/aim7/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/4000/new_raph

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
184357 ± 0% +10.8% 204299 ± 0% aim7.jobs-per-min
131.71 ± 0% -9.8% 118.82 ± 0% aim7.time.elapsed_time
131.71 ± 0% -9.8% 118.82 ± 0% aim7.time.elapsed_time.max
2193692 ± 0% -92.5% 164787 ± 0% aim7.time.involuntary_context_switches
18.86 ± 2% -68.5% 5.94 ± 4% aim7.time.system_time
435.63 ± 0% -2.7% 424.04 ± 0% aim7.time.user_time
58879 ± 2% -19.5% 47402 ± 2% aim7.time.voluntary_context_switches
131.71 ± 0% -9.8% 118.82 ± 0% time.elapsed_time
131.71 ± 0% -9.8% 118.82 ± 0% time.elapsed_time.max
2193692 ± 0% -92.5% 164787 ± 0% time.involuntary_context_switches
18.86 ± 2% -68.5% 5.94 ± 4% time.system_time
58879 ± 2% -19.5% 47402 ± 2% time.voluntary_context_switches

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/load/test:
lkp-a06/aim7/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/4000/pipe_cpy

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
240551 ± 0% +13.9% 273904 ± 2% aim7.jobs-per-min
101.42 ± 0% -12.2% 89.04 ± 2% aim7.time.elapsed_time
101.42 ± 0% -12.2% 89.04 ± 2% aim7.time.elapsed_time.max
1981819 ± 0% -93.0% 138430 ± 3% aim7.time.involuntary_context_switches
278.03 ± 0% -6.5% 259.97 ± 2% aim7.time.system_time
54936 ± 1% -19.6% 44147 ± 2% aim7.time.voluntary_context_switches
101.42 ± 0% -12.2% 89.04 ± 2% time.elapsed_time
101.42 ± 0% -12.2% 89.04 ± 2% time.elapsed_time.max
1981819 ± 0% -93.0% 138430 ± 3% time.involuntary_context_switches
54936 ± 1% -19.6% 44147 ± 2% time.voluntary_context_switches

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
lkp-sb02/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/btrfs/9B/400M/fsyncBeforeClose/16d/256fpd

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
2554844 ± 1% -13.1% 2221395 ± 1% fsmark.app_overhead
223789 ± 0% -57.2% 95777 ± 6% fsmark.time.involuntary_context_switches
32.50 ± 1% +5.4% 34.25 ± 1% fsmark.time.percent_of_cpu_this_job_got
2118350 ± 0% +5.2% 2229178 ± 0% fsmark.time.voluntary_context_switches
223789 ± 0% -57.2% 95777 ± 6% time.involuntary_context_switches
39831 ± 0% -9.9% 35903 ± 1% softirqs.SCHED
59651 ± 1% -8.7% 54478 ± 0% softirqs.TIMER
64594 ± 0% -69.9% 19433 ± 0% vmstat.system.cs
25161 ± 0% -94.2% 1465 ± 3% vmstat.system.in
4366124 ± 0% -90.0% 434486 ± 4% cpuidle.C1-SNB.usage
17993557 ± 4% +17.6% 21163962 ± 3% cpuidle.C1E-SNB.time
42154 ± 3% +15.4% 48646 ± 6% cpuidle.C1E-SNB.usage
9261281 ± 5% +28.4% 11895838 ± 4% cpuidle.C3-SNB.time
6639 ± 2% +14.5% 7601 ± 3% cpuidle.C3-SNB.usage
9194 ± 2% -97.7% 208.25 ± 6% cpuidle.POLL.usage
11.44 ± 0% -20.0% 9.14 ± 0% turbostat.%Busy
333.25 ± 0% -21.0% 263.25 ± 1% turbostat.Avg_MHz
1.32 ± 5% +28.4% 1.70 ± 4% turbostat.CPU%c3
11.04 ± 1% -13.0% 9.61 ± 1% turbostat.CorWatt
1.47 ± 3% +26.7% 1.86 ± 4% turbostat.Pkg%pc2
1.19 ± 8% +58.6% 1.89 ± 5% turbostat.Pkg%pc3
19.86 ± 1% +35.2% 26.86 ± 1% turbostat.Pkg%pc6
14.61 ± 0% -9.9% 13.16 ± 1% turbostat.PkgWatt
2143346 ± 57% -100.0% 0.00 ± -1% latency_stats.avg.btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
30524 ± 3% +20.3% 36728 ± 1% latency_stats.hits.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].btrfs_insert_delayed_items.[btrfs].btrfs_commit_inode_delayed_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync
18837 ± 4% +56.0% 29390 ± 7% latency_stats.hits.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].copy_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync
12096 ± 6% +46.5% 17718 ± 4% latency_stats.hits.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].drop_objectid_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
62486 ± 1% +19.6% 74731 ± 4% latency_stats.hits.btrfs_tree_read_lock.[btrfs].btrfs_read_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].copy_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync
52038 ± 2% +16.7% 60742 ± 3% latency_stats.hits.btrfs_tree_read_lock.[btrfs].btrfs_read_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].drop_objectid_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
3558104 ± 57% -100.0% 0.00 ± -1% latency_stats.max.btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
4.111e+08 ± 57% -100.0% 0.00 ± -1% latency_stats.sum.btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
1403516 ± 2% +31.1% 1840040 ± 2% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].btrfs_insert_delayed_items.[btrfs].btrfs_commit_inode_delayed_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync
3349730 ± 2% +19.6% 4005849 ± 2% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].btrfs_new_inode.[btrfs].btrfs_create.[btrfs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
178060 ± 6% +74.3% 310377 ± 11% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].copy_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync
1322666 ± 4% +27.8% 1690982 ± 1% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].insert_with_overflow.[btrfs].btrfs_insert_dir_item.[btrfs].btrfs_add_link.[btrfs].btrfs_create.[btrfs].vfs_create.path_openat.do_filp_open.do_sys_open
42794 ± 5% +63.1% 69810 ± 11% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_truncate_inode_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
122546 ± 7% +60.4% 196523 ± 7% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].drop_objectid_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
24192 ± 3% +29.8% 31401 ± 6% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].copy_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
697815 ± 1% +11.1% 775306 ± 2% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_search_slot.[btrfs].btrfs_lookup_inode.[btrfs].__btrfs_update_delayed_inode.[btrfs].btrfs_commit_inode_delayed_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync
566092 ± 2% +29.3% 732008 ± 7% latency_stats.sum.btrfs_tree_read_lock.[btrfs].btrfs_read_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].copy_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync
283356 ± 4% +33.1% 377256 ± 7% latency_stats.sum.btrfs_tree_read_lock.[btrfs].btrfs_read_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_truncate_inode_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
525841 ± 2% +24.2% 653227 ± 4% latency_stats.sum.btrfs_tree_read_lock.[btrfs].btrfs_read_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].drop_objectid_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
759116 ± 2% -9.4% 687617 ± 1% latency_stats.sum.btrfs_tree_read_lock.[btrfs].btrfs_search_slot.[btrfs].btrfs_lookup_dir_item.[btrfs].btrfs_lookup_dentry.[btrfs].btrfs_lookup.[btrfs].lookup_real.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
12244 ± 8% -11.0% 10892 ± 3% sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum
8078 ± 6% +20.1% 9700 ± 5% sched_debug.cfs_rq[0]:/.exec_clock
12997 ± 8% +25.1% 16258 ± 2% sched_debug.cfs_rq[0]:/.min_vruntime
269.00 ± 8% -11.3% 238.50 ± 3% sched_debug.cfs_rq[0]:/.tg_runnable_contrib
553.11 ±128% -403.8% -1680 ±-72% sched_debug.cfs_rq[2]:/.spread0
28.75 ± 35% -43.5% 16.25 ± 42% sched_debug.cfs_rq[3]:/.nr_spread_over
42568 ± 29% -52.2% 20355 ± 4% sched_debug.cpu#0.nr_load_updates
4438150 ± 51% -92.6% 328982 ± 4% sched_debug.cpu#0.nr_switches
4438484 ± 51% -92.6% 329332 ± 4% sched_debug.cpu#0.sched_count
2060550 ± 53% -96.5% 72130 ± 7% sched_debug.cpu#0.sched_goidle
2323810 ± 48% -87.5% 290944 ± 5% sched_debug.cpu#0.ttwu_count
2136144 ± 53% -97.7% 48220 ± 2% sched_debug.cpu#0.ttwu_local
1705455 ±133% -80.1% 339746 ± 5% sched_debug.cpu#1.nr_switches
1705520 ±133% -80.1% 339819 ± 5% sched_debug.cpu#1.sched_count
739354 ±148% -89.5% 77619 ± 11% sched_debug.cpu#1.sched_goidle
926939 ±122% -74.2% 238908 ± 6% sched_debug.cpu#1.ttwu_count
742260 ±155% -95.8% 31432 ± 1% sched_debug.cpu#1.ttwu_local
968.25 ± 11% -89.3% 104.00 ±102% sched_debug.cpu#2.nr_uninterruptible
63733 ± 9% +21.4% 77348 ± 12% sched_debug.cpu#2.sched_goidle
33525 ± 3% -13.7% 28932 ± 1% sched_debug.cpu#2.ttwu_local
1264 ± 17% -101.5% -18.50 ±-378% sched_debug.cpu#3.nr_uninterruptible

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
nhm4/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/btrfs/9B/400M/fsyncBeforeClose/16d/256fpd

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
3294133 ± 2% -6.2% 3088824 ± 2% fsmark.app_overhead
463365 ± 1% -51.7% 223905 ± 7% fsmark.time.involuntary_context_switches
140.50 ± 1% +8.2% 152.00 ± 1% fsmark.time.percent_of_cpu_this_job_got
213.09 ± 1% +7.2% 228.37 ± 1% fsmark.time.system_time
4278018 ± 1% +3.6% 4432123 ± 1% fsmark.time.voluntary_context_switches
463365 ± 1% -51.7% 223905 ± 7% time.involuntary_context_switches
7.75 ± 5% +18.4% 9.17 ± 0% turbostat.CPU%c6
5214507 ± 0% -70.1% 1561193 ± 2% cpuidle.C1-NHM.usage
23195 ± 5% -97.2% 641.50 ± 5% cpuidle.POLL.usage
96711 ± 1% -51.9% 46555 ± 0% vmstat.system.cs
30013 ± 1% -87.8% 3649 ± 3% vmstat.system.in
5154 ± 4% -10.8% 4599 ± 3% slabinfo.btrfs_extent_buffer.active_objs
5154 ± 4% -10.8% 4599 ± 3% slabinfo.btrfs_extent_buffer.num_objs
1674 ± 7% -13.2% 1453 ± 5% slabinfo.buffer_head.active_objs
1680 ± 7% -13.1% 1459 ± 5% slabinfo.buffer_head.num_objs
1145192 ± 70% -100.0% 0.00 ± -1% latency_stats.avg.btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
137982 ± 2% +73.7% 239720 ± 75% latency_stats.hits.btrfs_tree_lock.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].insert_with_overflow.[btrfs].btrfs_insert_dir_item.[btrfs].btrfs_add_link.[btrfs].btrfs_create.[btrfs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open
1540569 ± 70% -100.0% 0.00 ± -1% latency_stats.max.btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
1.741e+08 ± 70% -100.0% 0.00 ± -1% latency_stats.sum.btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
2427124 ± 2% +60.9% 3906177 ± 78% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].insert_with_overflow.[btrfs].btrfs_insert_dir_item.[btrfs].btrfs_add_link.[btrfs].btrfs_create.[btrfs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open
47047 ± 9% +15.6% 54373 ± 2% sched_debug.cfs_rq[0]:/.min_vruntime
23933 ± 18% +24.7% 29834 ± 2% sched_debug.cfs_rq[0]:/.tg_load_avg
788.25 ± 74% +312.2% 3249 ± 37% sched_debug.cfs_rq[1]:/.blocked_load_avg
-297.49 ±-1471% +989.4% -3240 ±-11% sched_debug.cfs_rq[1]:/.spread0
23934 ± 18% +24.7% 29840 ± 2% sched_debug.cfs_rq[1]:/.tg_load_avg
815.00 ± 73% +303.0% 3284 ± 37% sched_debug.cfs_rq[1]:/.tg_load_contrib
42646 ± 8% +20.4% 51331 ± 2% sched_debug.cfs_rq[2]:/.min_vruntime
23848 ± 17% +23.9% 29555 ± 4% sched_debug.cfs_rq[2]:/.tg_load_avg
4349 ± 24% -41.6% 2541 ± 40% sched_debug.cfs_rq[3]:/.blocked_load_avg
119.25 ± 42% -86.6% 16.00 ±141% sched_debug.cfs_rq[3]:/.load
133.50 ± 32% -93.0% 9.33 ±141% sched_debug.cfs_rq[3]:/.runnable_load_avg
23820 ± 17% +23.6% 29446 ± 3% sched_debug.cfs_rq[3]:/.tg_load_avg
4486 ± 24% -41.7% 2617 ± 42% sched_debug.cfs_rq[3]:/.tg_load_contrib
23822 ± 17% +23.5% 29431 ± 3% sched_debug.cfs_rq[4]:/.tg_load_avg
43836 ± 2% +10.4% 48383 ± 2% sched_debug.cfs_rq[5]:/.min_vruntime
23750 ± 17% +23.9% 29419 ± 3% sched_debug.cfs_rq[5]:/.tg_load_avg
41692 ± 4% +15.9% 48332 ± 3% sched_debug.cfs_rq[6]:/.min_vruntime
23751 ± 17% +23.8% 29398 ± 3% sched_debug.cfs_rq[6]:/.tg_load_avg
42923 ± 6% +13.2% 48585 ± 3% sched_debug.cfs_rq[7]:/.min_vruntime
23749 ± 17% +23.7% 29373 ± 3% sched_debug.cfs_rq[7]:/.tg_load_avg
253.00 ± 69% -180.0% -202.50 ±-55% sched_debug.cpu#1.nr_uninterruptible
150.00 ± 43% -81.6% 27.67 ±141% sched_debug.cpu#3.cpu_load[0]
88.50 ± 32% -80.8% 17.00 ±136% sched_debug.cpu#3.cpu_load[1]
59.50 ± 30% -70.2% 17.75 ± 92% sched_debug.cpu#3.cpu_load[2]
46.50 ± 34% -53.2% 21.75 ± 55% sched_debug.cpu#3.cpu_load[3]
1208 ± 19% -91.8% 99.33 ±141% sched_debug.cpu#3.curr->pid
127.50 ± 49% -87.5% 16.00 ±141% sched_debug.cpu#3.load
1447 ± 34% -50.6% 715.00 ± 21% sched_debug.cpu#4.nr_uninterruptible
1029 ± 18% -38.7% 631.00 ± 10% sched_debug.cpu#5.nr_uninterruptible
1244821 ± 70% -70.6% 366365 ± 3% sched_debug.cpu#6.nr_switches
1136 ± 14% -60.6% 447.75 ± 15% sched_debug.cpu#6.nr_uninterruptible
1244958 ± 70% -70.6% 366498 ± 3% sched_debug.cpu#6.sched_count
845.25 ± 17% -30.1% 591.00 ± 7% sched_debug.cpu#7.nr_uninterruptible

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
nhm4/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/f2fs/16MB/60G/fsyncBeforeClose/16d/256fpd

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
98844 ± 2% -75.2% 24496 ± 1% fsmark.time.involuntary_context_switches
98844 ± 2% -75.2% 24496 ± 1% time.involuntary_context_switches
22613 ± 0% -78.7% 4810 ± 0% vmstat.system.cs
9686 ± 0% -93.5% 631.00 ± 2% vmstat.system.in
1.703e+08 ± 3% -21.2% 1.342e+08 ± 5% cpuidle.C1-NHM.time
4972587 ± 0% -88.7% 560540 ± 2% cpuidle.C1-NHM.usage
2092 ± 5% -91.8% 171.75 ± 6% cpuidle.POLL.usage
3.44 ± 0% -11.9% 3.04 ± 0% turbostat.%Busy
112.00 ± 0% -12.9% 97.50 ± 0% turbostat.Avg_MHz
12.01 ± 1% -15.8% 10.12 ± 3% turbostat.CPU%c1
42309 ±125% -92.6% 3129 ± 72% latency_stats.avg.wait_on_page_bit.f2fs_wait_on_page_writeback.[f2fs].f2fs_wait_on_page_writeback.[f2fs].get_dnode_of_data.[f2fs].f2fs_reserve_block.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write.vfs_write
153873 ±159% -90.8% 14192 ± 51% latency_stats.max.alloc_nid.[f2fs].f2fs_new_inode.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
1458401 ±133% -69.6% 444016 ± 59% latency_stats.sum.alloc_nid.[f2fs].f2fs_new_inode.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
20021 ± 29% +6098.0% 1240911 ±137% latency_stats.sum.wait_on_page_bit.f2fs_wait_on_page_writeback.[f2fs].f2fs_wait_on_page_writeback.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
13943 ±132% -53.5% 6479 ±100% latency_stats.sum.wait_on_page_bit.find_data_page.[f2fs].f2fs_find_entry.[f2fs].f2fs_lookup.[f2fs].lookup_real.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
4129 ± 27% -34.1% 2721 ± 21% sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum
89.00 ± 27% -34.0% 58.75 ± 22% sched_debug.cfs_rq[0]:/.tg_runnable_contrib
30.50 ± 36% -59.0% 12.50 ± 74% sched_debug.cfs_rq[3]:/.runnable_load_avg
3952 ± 34% -41.6% 2306 ± 39% sched_debug.cfs_rq[6]:/.avg->runnable_avg_sum
85.25 ± 34% -41.9% 49.50 ± 40% sched_debug.cfs_rq[6]:/.tg_runnable_contrib
3552 ± 36% -44.5% 1971 ± 23% sched_debug.cfs_rq[7]:/.avg->runnable_avg_sum
207.00 ± 47% -72.9% 56.00 ± 93% sched_debug.cfs_rq[7]:/.load
76.50 ± 36% -45.1% 42.00 ± 23% sched_debug.cfs_rq[7]:/.tg_runnable_contrib
3.67 ± 89% +445.5% 20.00 ± 23% sched_debug.cpu#0.cpu_load[0]
4.75 ±102% +163.2% 12.50 ± 6% sched_debug.cpu#0.cpu_load[1]
16.50 ± 53% -78.8% 3.50 ±140% sched_debug.cpu#1.cpu_load[1]
12.25 ± 49% -77.6% 2.75 ± 39% sched_debug.cpu#1.cpu_load[4]
33933 ± 41% -54.9% 15287 ± 1% sched_debug.cpu#2.nr_load_updates
3238629 ± 74% -96.6% 108909 ± 12% sched_debug.cpu#2.nr_switches
3238752 ± 74% -96.6% 109056 ± 12% sched_debug.cpu#2.sched_count
1570598 ± 75% -97.8% 35193 ± 18% sched_debug.cpu#2.sched_goidle
1582367 ± 76% -98.6% 21962 ± 2% sched_debug.cpu#2.ttwu_local
3380681 ± 73% -96.1% 132270 ± 13% sched_debug.cpu#6.nr_switches
3380800 ± 73% -96.1% 132424 ± 13% sched_debug.cpu#6.sched_count
1648415 ± 74% -97.0% 50035 ± 17% sched_debug.cpu#6.sched_goidle
203.25 ± 47% -72.4% 56.00 ± 93% sched_debug.cpu#7.load

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
nhm4/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/f2fs/5K/400M/fsyncBeforeClose/16d/256fpd

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
35663 ± 8% -44.4% 19828 ± 0% fsmark.time.involuntary_context_switches
13.00 ± 0% +7.7% 14.00 ± 0% fsmark.time.percent_of_cpu_this_job_got
645071 ± 0% +1.9% 657186 ± 0% fsmark.time.voluntary_context_switches
35663 ± 8% -44.4% 19828 ± 0% time.involuntary_context_switches
69916 ± 0% -83.1% 11789 ± 0% vmstat.system.cs
30442 ± 0% -96.3% 1137 ± 1% vmstat.system.in
3.49 ± 0% -37.3% 2.19 ± 0% turbostat.%Busy
110.50 ± 1% -42.8% 63.25 ± 0% turbostat.Avg_MHz
41.09 ± 1% -11.4% 36.40 ± 0% turbostat.CPU%c1
7.23 ± 4% +22.1% 8.83 ± 5% turbostat.CPU%c6
20352 ± 83% -59.3% 8281 ± 49% latency_stats.sum.call_rwsem_down_read_failed.f2fs_mkdir.[f2fs].vfs_mkdir.SyS_mkdir.entry_SYSCALL_64_fastpath
6985436 ± 1% +13.7% 7940904 ± 3% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write.vfs_write.SyS_write
3675763 ± 1% +12.9% 4149821 ± 4% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_reserve_block.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write
5179210 ± 1% +13.6% 5882087 ± 2% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_reserve_block.[f2fs].get_new_data_page.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open
412246 ± 0% +8.4% 446769 ± 2% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].write_data_page.[f2fs].do_write_data_page.[f2fs].f2fs_write_data_page.[f2fs].__f2fs_writepage.[f2fs].write_cache_pages.f2fs_write_data_pages.[f2fs].do_writepages.__filemap_fdatawrite_range.filemap_write_and_wait_range.f2fs_sync_file.[f2fs]
2.715e+08 ± 2% -11.1% 2.414e+08 ± 1% cpuidle.C1-NHM.time
4779213 ± 0% -93.2% 324049 ± 2% cpuidle.C1-NHM.usage
2.285e+08 ± 2% +13.2% 2.587e+08 ± 2% cpuidle.C6-NHM.time
105567 ± 1% +13.5% 119864 ± 2% cpuidle.C6-NHM.usage
31639 ± 11% -87.1% 4072 ± 57% cpuidle.POLL.time
8304 ± 10% -98.8% 101.00 ± 7% cpuidle.POLL.usage
-1817 ± -4% +37.1% -2491 ±-25% sched_debug.cfs_rq[3]:/.spread0
1313 ± 3% +12.7% 1480 ± 3% sched_debug.cfs_rq[5]:/.exec_clock
-1878 ± -5% +16.6% -2190 ± -1% sched_debug.cpu#0.nr_uninterruptible
375233 ±118% -90.3% 36371 ± 4% sched_debug.cpu#3.ttwu_count
503266 ± 30% +63.0% 820371 ± 8% sched_debug.cpu#5.avg_idle
15.50 ± 29% -59.7% 6.25 ± 70% sched_debug.cpu#5.cpu_load[3]
13.75 ± 18% -47.3% 7.25 ± 44% sched_debug.cpu#5.cpu_load[4]
743.00 ± 7% -18.3% 607.25 ± 5% sched_debug.cpu#5.nr_uninterruptible
774.25 ± 15% -21.1% 610.50 ± 6% sched_debug.cpu#6.nr_uninterruptible
23789 ± 58% -59.0% 9764 ± 2% sched_debug.cpu#7.nr_load_updates

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
nhm4/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/f2fs/8K/400M/fsyncBeforeClose/16d/256fpd

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
28399 ± 12% -57.0% 12208 ± 1% fsmark.time.involuntary_context_switches
12.50 ± 4% +12.0% 14.00 ± 0% fsmark.time.percent_of_cpu_this_job_got
400598 ± 0% +1.9% 408272 ± 0% fsmark.time.voluntary_context_switches
104101 ± 0% -88.9% 11556 ± 0% vmstat.system.cs
47821 ± 0% -97.6% 1125 ± 0% vmstat.system.in
27507 ± 3% -7.3% 25490 ± 3% meminfo.Active(anon)
27135 ± 3% -7.4% 25120 ± 3% meminfo.AnonPages
13576 ± 0% -11.3% 12048 ± 0% meminfo.Mapped
6878 ± 3% -7.4% 6370 ± 3% proc-vmstat.nr_active_anon
6796 ± 3% -7.5% 6288 ± 3% proc-vmstat.nr_anon_pages
3392 ± 0% -11.2% 3011 ± 0% proc-vmstat.nr_mapped
19880 ± 6% -17.3% 16446 ± 4% softirqs.RCU
17934 ± 2% -16.1% 15052 ± 4% softirqs.SCHED
29061 ± 3% -13.6% 25095 ± 6% softirqs.TIMER
28399 ± 12% -57.0% 12208 ± 1% time.involuntary_context_switches
11.65 ± 1% +12.1% 13.06 ± 0% time.system_time
0.49 ± 2% +20.7% 0.60 ± 3% time.user_time
1.821e+08 ± 1% -17.5% 1.502e+08 ± 2% cpuidle.C1-NHM.time
4650494 ± 0% -95.7% 199638 ± 1% cpuidle.C1-NHM.usage
1.391e+08 ± 1% +14.3% 1.59e+08 ± 1% cpuidle.C6-NHM.time
61904 ± 1% +20.1% 74370 ± 1% cpuidle.C6-NHM.usage
34499 ± 12% -90.5% 3270 ±113% cpuidle.POLL.time
8367 ± 16% -99.2% 64.00 ± 9% cpuidle.POLL.usage
4.26 ± 0% -48.8% 2.18 ± 0% turbostat.%Busy
139.00 ± 0% -54.7% 63.00 ± 0% turbostat.Avg_MHz
3255 ± 0% -11.1% 2892 ± 0% turbostat.Bzy_MHz
43.70 ± 1% -16.5% 36.48 ± 1% turbostat.CPU%c1
44.77 ± 1% +18.3% 52.95 ± 1% turbostat.CPU%c3
7.27 ± 2% +15.3% 8.38 ± 3% turbostat.CPU%c6
8144 ± 20% +112.7% 17320 ± 59% latency_stats.sum.call_rwsem_down_read_failed.f2fs_write_node_page.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
161602 ± 5% +27.7% 206366 ± 6% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].f2fs_write_node_page.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
1263747 ± 3% +19.4% 1508845 ± 4% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].new_node_page.[f2fs].new_inode_page.[f2fs].init_inode_metadata.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open
662165 ± 2% +17.5% 777739 ± 2% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].f2fs_convert_inline_inode.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write.vfs_write
3956363 ± 1% +22.3% 4840014 ± 1% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write.vfs_write.SyS_write
436691 ± 1% +15.4% 504042 ± 0% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].do_write_data_page.[f2fs].f2fs_write_data_page.[f2fs].__f2fs_writepage.[f2fs].write_cache_pages.f2fs_write_data_pages.[f2fs].do_writepages.__filemap_fdatawrite_range
2101052 ± 1% +20.7% 2536800 ± 2% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_reserve_block.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write
2931560 ± 2% +21.5% 3562381 ± 2% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_reserve_block.[f2fs].get_new_data_page.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open
882333 ± 1% +18.1% 1042172 ± 1% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].get_read_data_page.[f2fs].find_data_page.[f2fs].f2fs_find_entry.[f2fs].f2fs_lookup.[f2fs].lookup_real.path_openat.do_filp_open
86763 ± 3% +24.8% 108290 ± 2% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].update_inode_page.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open
1293620 ± 2% +18.0% 1526759 ± 2% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].update_inode_page.[f2fs].f2fs_write_end.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write.vfs_write
241754 ± 1% +14.7% 277276 ± 3% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].write_data_page.[f2fs].do_write_data_page.[f2fs].f2fs_write_data_page.[f2fs].__f2fs_writepage.[f2fs].write_cache_pages.f2fs_write_data_pages.[f2fs].do_writepages.__filemap_fdatawrite_range.filemap_write_and_wait_range.f2fs_sync_file.[f2fs]
285314 ± 4% +31.5% 375080 ± 6% latency_stats.sum.call_rwsem_down_read_failed.need_dentry_mark.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
298289 ± 4% +36.2% 406142 ± 7% latency_stats.sum.call_rwsem_down_read_failed.need_inode_block_update.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
1885703 ± 2% +17.4% 2213119 ± 2% latency_stats.sum.call_rwsem_down_write_failed.get_node_info.[f2fs].new_node_page.[f2fs].new_inode_page.[f2fs].init_inode_metadata.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open
198811 ± 3% +34.9% 268119 ± 3% latency_stats.sum.call_rwsem_down_write_failed.set_node_addr.[f2fs].f2fs_write_node_page.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
910989 ± 2% +17.8% 1072972 ± 2% latency_stats.sum.call_rwsem_down_write_failed.set_node_addr.[f2fs].new_node_page.[f2fs].new_inode_page.[f2fs].init_inode_metadata.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open
363528 ± 0% +3.4% 375985 ± 0% latency_stats.sum.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
9590 ± 9% -20.1% 7663 ± 5% sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum
211.25 ± 9% -21.1% 166.75 ± 6% sched_debug.cfs_rq[0]:/.tg_runnable_contrib
7864 ± 14% -22.2% 6117 ± 10% sched_debug.cfs_rq[1]:/.avg->runnable_avg_sum
172.00 ± 13% -22.7% 133.00 ± 11% sched_debug.cfs_rq[1]:/.tg_runnable_contrib
6243 ± 10% +32.8% 8292 ± 12% sched_debug.cfs_rq[4]:/.avg->runnable_avg_sum
1910 ± 14% +19.0% 2273 ± 4% sched_debug.cfs_rq[4]:/.min_vruntime
136.50 ± 11% +32.8% 181.25 ± 13% sched_debug.cfs_rq[4]:/.tg_runnable_contrib
726.92 ± 6% +13.6% 825.51 ± 5% sched_debug.cfs_rq[6]:/.exec_clock
1844 ± 9% +17.6% 2169 ± 6% sched_debug.cfs_rq[6]:/.min_vruntime
8.00 ± 45% +209.4% 24.75 ± 11% sched_debug.cpu#0.cpu_load[3]
9.00 ± 22% +141.7% 21.75 ± 8% sched_debug.cpu#0.cpu_load[4]
-701.75 ±-39% +56.6% -1098 ± -2% sched_debug.cpu#0.nr_uninterruptible
546261 ± 90% -85.3% 80139 ± 3% sched_debug.cpu#0.ttwu_count
483716 ±101% -95.6% 21310 ± 2% sched_debug.cpu#0.ttwu_local
547901 ±166% -96.5% 19272 ± 14% sched_debug.cpu#3.ttwu_count
24.00 ± 59% -64.6% 8.50 ± 55% sched_debug.cpu#4.cpu_load[3]
20.50 ± 32% -57.3% 8.75 ± 38% sched_debug.cpu#4.cpu_load[4]
13541 ± 61% -62.1% 5134 ± 3% sched_debug.cpu#4.nr_load_updates
1533535 ± 99% -97.1% 43964 ± 29% sched_debug.cpu#4.nr_switches
1533560 ± 99% -97.1% 43984 ± 29% sched_debug.cpu#4.sched_count
760621 ± 99% -97.5% 19182 ± 34% sched_debug.cpu#4.sched_goidle
750180 ±102% -99.8% 1305 ± 5% sched_debug.cpu#4.ttwu_local
414.75 ± 6% -23.3% 318.25 ± 5% sched_debug.cpu#6.nr_uninterruptible
20.25 ± 30% -56.8% 8.75 ± 74% sched_debug.cpu#7.cpu_load[4]

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/iterations/samples:
lituya/ftq/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/100%/20x/100000ss

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
7640 ± 1% +5.5% 8056 ± 0% ftq.counts
0.17 ± 11% -82.1% 0.03 ± 6% ftq.stddev
1806627 ± 4% -98.6% 25037 ± 32% ftq.time.involuntary_context_switches
1437 ± 2% +5.6% 1518 ± 0% ftq.time.percent_of_cpu_this_job_got
546.95 ± 0% +3.5% 566.01 ± 0% ftq.time.user_time
16653 ± 0% -13.2% 14452 ± 0% meminfo.Mapped
4163 ± 0% -13.3% 3611 ± 0% proc-vmstat.nr_mapped
9329 ± 4% -17.0% 7746 ± 9% softirqs.SCHED
1.20 ± 2% -52.7% 0.57 ± 12% turbostat.CPU%c1
330.20 ± 5% -7.2% 306.54 ± 4% uptime.idle
96844 ± 0% -97.0% 2925 ± 4% vmstat.system.cs
61693 ± 0% -76.0% 14816 ± 1% vmstat.system.in
1806627 ± 4% -98.6% 25037 ± 32% time.involuntary_context_switches
2.15 ± 8% -50.9% 1.06 ± 1% time.system_time
820.50 ± 33% +45.4% 1192 ± 3% time.voluntary_context_switches
1857689 ± 17% -85.0% 278914 ± 73% cpuidle.C1-HSW.time
110169 ± 31% -99.3% 786.25 ± 4% cpuidle.C1-HSW.usage
779.50 ± 69% +78.4% 1390 ± 4% cpuidle.C3-HSW.usage
2663 ± 41% -48.2% 1380 ± 1% cpuidle.C6-HSW.usage
193.50 ± 95% -99.6% 0.75 ±173% cpuidle.POLL.time
125.00 ± 94% -99.8% 0.25 ±173% cpuidle.POLL.usage
6.25 ± 17% -36.0% 4.00 ± 17% sched_debug.cfs_rq[0]:/.nr_spread_over
118.31 ± 19% +50.0% 177.51 ± 18% sched_debug.cfs_rq[11]:/.exec_clock
1410 ± 23% -32.8% 948.03 ± 23% sched_debug.cfs_rq[14]:/.min_vruntime
16706 ± 38% -72.2% 4641 ± 40% sched_debug.cfs_rq[15]:/.avg->runnable_avg_sum
367.50 ± 38% -72.2% 102.00 ± 41% sched_debug.cfs_rq[15]:/.tg_runnable_contrib
6553 ± 29% -34.8% 4269 ± 12% sched_debug.cfs_rq[2]:/.avg->runnable_avg_sum
143.25 ± 30% -35.1% 93.00 ± 12% sched_debug.cfs_rq[2]:/.tg_runnable_contrib
1304 ± 17% +43.7% 1873 ± 28% sched_debug.cfs_rq[3]:/.min_vruntime
3651 ±139% +234.7% 12223 ± 54% sched_debug.cfs_rq[6]:/.blocked_load_avg
3651 ±139% +240.9% 12449 ± 53% sched_debug.cfs_rq[6]:/.tg_load_contrib
1316 ± 70% +500.7% 7907 ± 51% sched_debug.cfs_rq[8]:/.blocked_load_avg
1330 ± 71% +494.4% 7907 ± 51% sched_debug.cfs_rq[8]:/.tg_load_contrib
111.43 ± 6% -14.1% 95.77 ± 7% sched_debug.cfs_rq[9]:/.exec_clock
238565 ± 59% +271.1% 885357 ± 15% sched_debug.cpu#0.avg_idle
5.75 ± 85% +152.2% 14.50 ± 55% sched_debug.cpu#0.cpu_load[3]
3.25 ± 95% +1215.4% 42.75 ± 78% sched_debug.cpu#1.cpu_load[1]
5.25 ± 78% -119.0% -1.00 ±-300% sched_debug.cpu#10.nr_uninterruptible
2.50 ± 34% +230.0% 8.25 ± 40% sched_debug.cpu#12.nr_uninterruptible
468.25 ± 12% -21.3% 368.50 ± 8% sched_debug.cpu#13.sched_goidle
1.25 ±131% -420.0% -4.00 ±-20% sched_debug.cpu#14.nr_uninterruptible
73.25 ± 4% +39.6% 102.25 ± 13% sched_debug.cpu#14.ttwu_local
-3.50 ±-76% -228.6% 4.50 ± 57% sched_debug.cpu#15.nr_uninterruptible
319447 ± 52% +169.7% 861647 ± 13% sched_debug.cpu#2.avg_idle
3181 ± 38% -65.7% 1090 ± 20% sched_debug.cpu#4.nr_load_updates
787.75 ± 15% -23.5% 602.50 ± 6% sched_debug.cpu#5.sched_goidle
4.25 ± 45% +494.1% 25.25 ± 41% sched_debug.cpu#8.cpu_load[3]
9.00 ± 54% +200.0% 27.00 ± 27% sched_debug.cpu#8.cpu_load[4]
2045 ± 34% -41.3% 1201 ± 14% sched_debug.cpu#8.nr_switches
2048 ± 33% -41.2% 1203 ± 14% sched_debug.cpu#8.sched_count
789.00 ± 24% -47.5% 414.25 ± 26% sched_debug.cpu#8.sched_goidle
20.59 ± 92% -74.0% 5.36 ± 12% sched_debug.rt_rq[0]:/.rt_time

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/iterations/samples:
lituya/ftq/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/powersave/100%/20x/100000ss

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
7580 ± 0% +6.1% 8044 ± 0% ftq.counts
0.18 ± 7% -82.1% 0.03 ± 8% ftq.stddev
1761823 ± 3% -98.3% 29477 ± 38% ftq.time.involuntary_context_switches
1454 ± 1% +4.3% 1517 ± 0% ftq.time.percent_of_cpu_this_job_got
547.59 ± 0% +3.4% 566.04 ± 0% ftq.time.user_time
562.25 ± 23% +96.8% 1106 ± 5% ftq.time.voluntary_context_switches
16809 ± 0% -13.8% 14482 ± 0% meminfo.Mapped
4198 ± 0% -13.7% 3623 ± 0% proc-vmstat.nr_mapped
484.00 ± 6% +18.2% 572.00 ± 7% slabinfo.blkdev_requests.active_objs
484.00 ± 6% +18.2% 572.00 ± 7% slabinfo.blkdev_requests.num_objs
1.17 ± 2% -50.9% 0.57 ± 8% turbostat.CPU%c1
0.21 ±127% +215.5% 0.66 ± 33% turbostat.RAMWatt
96716 ± 0% -96.9% 2979 ± 10% vmstat.system.cs
61578 ± 0% -75.7% 14933 ± 1% vmstat.system.in
1761823 ± 3% -98.3% 29477 ± 38% time.involuntary_context_switches
1.52 ± 5% -28.3% 1.09 ± 2% time.system_time
562.25 ± 23% +96.8% 1106 ± 5% time.voluntary_context_switches
1633723 ± 55% -89.4% 172951 ± 3% cpuidle.C1-HSW.time
124268 ± 20% -99.4% 713.50 ± 8% cpuidle.C1-HSW.usage
104432 ± 22% +110.9% 220296 ± 63% cpuidle.C1E-HSW.time
552.75 ± 33% +33.6% 738.50 ± 3% cpuidle.C1E-HSW.usage
23.00 ± 43% -100.0% 0.00 ± 0% cpuidle.POLL.time
11.25 ± 51% -100.0% 0.00 ± 0% cpuidle.POLL.usage
49106 ± 3% +32.3% 64986 ± 8% sched_debug.cfs_rq[0]:/.tg_load_avg
47692 ± 6% +35.4% 64593 ± 8% sched_debug.cfs_rq[10]:/.tg_load_avg
47692 ± 6% +35.4% 64593 ± 8% sched_debug.cfs_rq[11]:/.tg_load_avg
47692 ± 6% +35.4% 64593 ± 8% sched_debug.cfs_rq[12]:/.tg_load_avg
47692 ± 6% +35.4% 64593 ± 8% sched_debug.cfs_rq[13]:/.tg_load_avg
860.75 ±116% +570.6% 5772 ± 87% sched_debug.cfs_rq[14]:/.blocked_load_avg
47692 ± 6% +35.4% 64593 ± 8% sched_debug.cfs_rq[14]:/.tg_load_avg
860.75 ±116% +617.8% 6178 ± 92% sched_debug.cfs_rq[14]:/.tg_load_contrib
2268 ± 20% -42.9% 1296 ± 24% sched_debug.cfs_rq[15]:/.min_vruntime
47692 ± 6% +35.4% 64593 ± 8% sched_debug.cfs_rq[15]:/.tg_load_avg
48510 ± 5% +34.0% 64986 ± 8% sched_debug.cfs_rq[1]:/.tg_load_avg
48510 ± 5% +33.9% 64934 ± 8% sched_debug.cfs_rq[2]:/.tg_load_avg
48510 ± 5% +33.9% 64934 ± 8% sched_debug.cfs_rq[3]:/.tg_load_avg
48510 ± 5% +33.9% 64934 ± 8% sched_debug.cfs_rq[4]:/.tg_load_avg
48494 ± 5% +33.9% 64916 ± 8% sched_debug.cfs_rq[5]:/.tg_load_avg
48494 ± 5% +33.9% 64916 ± 8% sched_debug.cfs_rq[6]:/.tg_load_avg
48455 ± 5% +33.9% 64905 ± 8% sched_debug.cfs_rq[7]:/.tg_load_avg
0.50 ±100% +300.0% 2.00 ± 0% sched_debug.cfs_rq[8]:/.nr_spread_over
48404 ± 5% +34.1% 64905 ± 8% sched_debug.cfs_rq[8]:/.tg_load_avg
47692 ± 6% +35.5% 64637 ± 8% sched_debug.cfs_rq[9]:/.tg_load_avg
12.00 ± 27% -52.1% 5.75 ± 43% sched_debug.cpu#1.nr_uninterruptible
347.00 ± 7% +42.0% 492.75 ± 20% sched_debug.cpu#10.sched_goidle
518.00 ± 8% +24.0% 642.50 ± 6% sched_debug.cpu#11.sched_goidle
9.25 ± 62% +278.4% 35.00 ± 67% sched_debug.cpu#12.cpu_load[4]
5.75 ± 74% -100.0% 0.00 ± 0% sched_debug.cpu#14.nr_uninterruptible
1.00 ± 70% +2800.0% 29.00 ± 88% sched_debug.cpu#15.cpu_load[2]
3.00 ± 91% +608.3% 21.25 ± 62% sched_debug.cpu#15.cpu_load[3]
4.00 ±117% +275.0% 15.00 ± 54% sched_debug.cpu#15.cpu_load[4]
3.00 ±131% +91.7% 5.75 ±125% sched_debug.cpu#2.nr_uninterruptible
2443 ± 35% -46.9% 1296 ± 14% sched_debug.cpu#3.nr_load_updates
6257 ± 21% -47.1% 3309 ± 36% sched_debug.cpu#3.nr_switches
6262 ± 21% -47.1% 3315 ± 36% sched_debug.cpu#3.sched_count
2859 ± 24% -54.1% 1313 ± 35% sched_debug.cpu#3.sched_goidle
3022 ± 19% -46.9% 1604 ± 19% sched_debug.cpu#3.ttwu_count
1491 ± 55% -78.3% 323.00 ± 69% sched_debug.cpu#3.ttwu_local
994307 ± 0% -27.9% 716731 ± 21% sched_debug.cpu#6.avg_idle

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/iterations/samples:
lituya/fwq/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/100%/20x/100000ss

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
0.11 ± 3% -43.2% 0.06 ± 1% fwq.stddev
3230702 ± 1% -86.7% 430932 ± 3% fwq.time.involuntary_context_switches
159977 ± 11% -25.2% 119739 ± 0% latency_stats.sum.do_wait.SyS_wait4.entry_SYSCALL_64_fastpath
3230702 ± 1% -86.7% 430932 ± 3% time.involuntary_context_switches
1.28 ± 35% -32.7% 0.86 ± 2% time.system_time
0.21 ± 42% -70.6% 0.06 ± 17% turbostat.CPU%c1
0.73 ± 23% -31.1% 0.51 ± 15% turbostat.CPU%c6
16514 ± 0% -79.4% 3404 ± 2% vmstat.system.cs
23255 ± 0% -28.0% 16754 ± 0% vmstat.system.in
1081329 ± 53% -93.7% 67884 ± 33% cpuidle.C1-HSW.time
45064 ± 0% -99.4% 278.25 ± 22% cpuidle.C1-HSW.usage
64741983 ± 8% -39.7% 39054828 ± 14% cpuidle.C6-HSW.time
2214 ± 19% -45.3% 1211 ± 11% cpuidle.C6-HSW.usage
4.50 ± 74% -100.0% 0.00 ± -1% cpuidle.POLL.time
2.50 ± 60% -100.0% 0.00 ± -1% cpuidle.POLL.usage
152.75 ±127% +449.4% 839.25 ± 60% sched_debug.cfs_rq[10]:/.blocked_load_avg
204.75 ± 94% +339.8% 900.50 ± 56% sched_debug.cfs_rq[10]:/.tg_load_contrib
-73145 ±-135% -141.7% 30484 ± 48% sched_debug.cfs_rq[14]:/.spread0
51.50 ± 0% +14.1% 58.75 ± 9% sched_debug.cpu#0.cpu_load[1]
51.75 ± 1% +13.0% 58.50 ± 7% sched_debug.cpu#0.cpu_load[2]
51.75 ± 1% +11.1% 57.50 ± 6% sched_debug.cpu#0.cpu_load[3]
11.25 ± 9% -64.4% 4.00 ± 63% sched_debug.cpu#11.nr_uninterruptible
-7.50 ±-35% -153.3% 4.00 ± 93% sched_debug.cpu#2.nr_uninterruptible
1419 ± 26% +119.7% 3117 ± 22% sched_debug.cpu#3.sched_goidle
124974 ± 61% -72.4% 34432 ±108% sched_debug.cpu#5.nr_switches
4.50 ± 71% -177.8% -3.50 ±-123% sched_debug.cpu#5.nr_uninterruptible
125045 ± 61% -72.4% 34489 ±108% sched_debug.cpu#5.sched_count
47153 ± 75% -52.2% 22527 ±124% sched_debug.cpu#5.ttwu_count
33719 ± 76% -70.3% 10005 ±131% sched_debug.cpu#5.ttwu_local
53.75 ± 4% +18.1% 63.50 ± 18% sched_debug.cpu#7.cpu_load[1]
5925 ± 34% +483.7% 34588 ± 82% sched_debug.cpu#8.sched_count
56.25 ± 8% -10.2% 50.50 ± 0% sched_debug.cpu#9.cpu_load[1]
58.75 ± 10% -13.2% 51.00 ± 1% sched_debug.cpu#9.cpu_load[2]
61.50 ± 14% -16.3% 51.50 ± 2% sched_debug.cpu#9.cpu_load[3]
63.00 ± 17% -18.3% 51.50 ± 2% sched_debug.cpu#9.cpu_load[4]

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/iterations/samples:
lituya/fwq/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/powersave/100%/20x/100000ss

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
0.11 ± 2% -41.0% 0.06 ± 1% fwq.stddev
3249189 ± 1% -86.8% 428991 ± 8% fwq.time.involuntary_context_switches
0.19 ± 24% -58.1% 0.08 ± 10% turbostat.CPU%c1
3249189 ± 1% -86.8% 428991 ± 8% time.involuntary_context_switches
1.81 ± 12% -48.6% 0.93 ± 2% time.system_time
16527 ± 1% -79.2% 3430 ± 3% vmstat.system.cs
23258 ± 0% -28.0% 16746 ± 0% vmstat.system.in
41773 ± 2% -99.2% 318.75 ± 13% cpuidle.C1-HSW.usage
61756882 ± 15% -30.4% 43002599 ± 8% cpuidle.C6-HSW.time
2117 ± 9% -33.5% 1407 ± 3% cpuidle.C6-HSW.usage
66.00 ± 26% -21.6% 51.75 ± 5% sched_debug.cfs_rq[0]:/.load
0.75 ± 57% +633.3% 5.50 ± 58% sched_debug.cfs_rq[13]:/.nr_spread_over
181.50 ±139% +229.6% 598.25 ± 44% sched_debug.cfs_rq[15]:/.blocked_load_avg
239.00 ±104% +175.8% 659.25 ± 42% sched_debug.cfs_rq[15]:/.tg_load_contrib
893.00 ± 0% +14.5% 1022 ± 12% sched_debug.cfs_rq[1]:/.utilization_load_avg
-66095 ±-174% -144.5% 29384 ± 74% sched_debug.cfs_rq[4]:/.spread0
-48436 ±-144% -146.3% 22425 ± 36% sched_debug.cfs_rq[5]:/.spread0
66.00 ± 26% -21.6% 51.75 ± 5% sched_debug.cpu#0.load
807.00 ± 0% +7.3% 866.00 ± 6% sched_debug.cpu#11.curr->pid
4.25 ± 76% -84.3% 0.67 ±604% sched_debug.cpu#14.nr_uninterruptible
370327 ±158% -95.9% 15153 ± 80% sched_debug.cpu#15.nr_switches
370400 ±158% -95.9% 15239 ± 80% sched_debug.cpu#15.sched_count
186416 ±158% -97.0% 5614 ± 81% sched_debug.cpu#15.ttwu_count
-4.00 ±-81% -187.5% 3.50 ± 24% sched_debug.cpu#5.nr_uninterruptible
0.00 ± 71% +10603.7% 0.19 ± 13% sched_debug.rt_rq[2]:/.rt_time

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/runtime:
wsm/ku-latency/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/300s

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
1207 ± 4% -27.5% 876.00 ± 22% proc-vmstat.pgactivate
26029 ± 2% -21.2% 20524 ± 2% softirqs.SCHED
12.52 ± 5% -19.2% 10.12 ± 9% turbostat.CPU%c1
46.75 ± 2% -7.0% 43.50 ± 3% turbostat.CoreTmp
31128 ± 0% -92.1% 2449 ± 1% vmstat.system.cs
15692 ± 0% -92.3% 1214 ± 0% vmstat.system.in
5760097 ±125% -100.0% 0.00 ± -1% latency_stats.avg.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
5760097 ±125% -100.0% 0.00 ± -1% latency_stats.max.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
5760097 ±125% -100.0% 0.00 ± -1% latency_stats.sum.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
55981055 ± 4% -65.3% 19428998 ± 21% cpuidle.C1-NHM.time
4373782 ± 0% -99.5% 22463 ± 9% cpuidle.C1-NHM.usage
312.50 ± 78% -72.3% 86.50 ± 14% cpuidle.C1E-NHM.usage
2690 ± 28% -37.2% 1689 ± 16% cpuidle.C3-NHM.usage
1071 ± 23% -80.9% 204.25 ± 63% cpuidle.POLL.usage
97.00 ± 45% -83.8% 15.67 ± 70% sched_debug.cfs_rq[10]:/.runnable_load_avg
425.75 ± 34% -74.4% 109.00 ± 70% sched_debug.cfs_rq[10]:/.utilization_load_avg
-6567 ±-65% -1590.9% 97909 ±167% sched_debug.cfs_rq[11]:/.spread0
85.25 ± 74% +2038.1% 1822 ± 58% sched_debug.cfs_rq[2]:/.blocked_load_avg
217733 ± 95% -57.1% 93407 ±164% sched_debug.cfs_rq[2]:/.min_vruntime
209.25 ± 66% +813.3% 1911 ± 58% sched_debug.cfs_rq[2]:/.tg_load_contrib
2123 ± 49% -68.8% 662.75 ±100% sched_debug.cfs_rq[4]:/.blocked_load_avg
86.00 ± 53% -100.0% 0.00 ± -1% sched_debug.cfs_rq[4]:/.load
58.50 ± 51% -100.0% 0.00 ± -1% sched_debug.cfs_rq[4]:/.runnable_load_avg
2197 ± 48% -69.5% 671.25 ± 98% sched_debug.cfs_rq[4]:/.tg_load_contrib
300.25 ± 48% -100.0% 0.00 ± -1% sched_debug.cfs_rq[4]:/.utilization_load_avg
3463 ± 55% -48.3% 1789 ± 4% sched_debug.cfs_rq[6]:/.min_vruntime
1227502 ± 99% -98.3% 20386 ± 42% sched_debug.cpu#0.ttwu_count
62332 ± 85% -77.9% 13783 ± 3% sched_debug.cpu#1.nr_load_updates
14318 ± 18% +37.4% 19675 ± 19% sched_debug.cpu#1.ttwu_count
4361 ± 11% -20.1% 3483 ± 7% sched_debug.cpu#1.ttwu_local
146.25 ± 33% -82.2% 26.00 ± 13% sched_debug.cpu#10.cpu_load[0]
79.00 ± 36% -81.4% 14.67 ± 21% sched_debug.cpu#10.cpu_load[1]
44.00 ± 36% -83.5% 7.25 ± 58% sched_debug.cpu#10.cpu_load[2]
25.50 ± 37% -77.5% 5.75 ± 33% sched_debug.cpu#10.cpu_load[3]
15.00 ± 41% -70.0% 4.50 ± 24% sched_debug.cpu#10.cpu_load[4]
89157 ± 69% -47.0% 47258 ±126% sched_debug.cpu#2.nr_load_updates
9.50 ± 33% -76.3% 2.25 ±148% sched_debug.cpu#4.cpu_load[3]
7.25 ± 36% -82.8% 1.25 ±131% sched_debug.cpu#4.cpu_load[4]
1030 ± 36% -100.0% 0.00 ± -1% sched_debug.cpu#4.curr->pid
86.00 ± 53% -100.0% 0.00 ± -1% sched_debug.cpu#4.load
850245 ± 5% +11.7% 949380 ± 3% sched_debug.cpu#5.avg_idle
3578 ± 8% -48.9% 1830 ± 64% sched_debug.cpu#5.ttwu_local
925565 ± 7% -17.7% 761616 ± 6% sched_debug.cpu#6.avg_idle
649243 ± 95% -98.0% 12905 ± 31% sched_debug.cpu#6.ttwu_count
2529 ± 49% -63.9% 911.75 ± 83% sched_debug.cpu#9.ttwu_local

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/runtime/nr_threads/cluster/test:
lkp-t410/netperf/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/300s/200%/cs-localhost/TCP_SENDFILE

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
4404 ± 0% +6.5% 4689 ± 0% netperf.Throughput_Mbps
1902815 ± 4% -98.0% 38389 ± 9% netperf.time.involuntary_context_switches
208.00 ± 0% +2.3% 212.75 ± 0% netperf.time.percent_of_cpu_this_job_got
610.08 ± 0% +1.6% 619.92 ± 0% netperf.time.system_time
291566 ± 0% +12.0% 326523 ± 0% netperf.time.voluntary_context_switches
1.03 ± 4% +10.4% 1.14 ± 3% perf-profile.cpu-cycles.rw_verify_area.do_splice_to.splice_direct_to_actor.do_splice_direct.do_sendfile
2545 ± 2% -17.3% 2103 ± 1% proc-vmstat.pgactivate
0.21 ± 3% -95.2% 0.01 ± 0% turbostat.CPU%c1
0.22 ± 5% +146.0% 0.54 ± 3% turbostat.CPU%c6
21522 ± 1% -76.5% 5056 ± 0% vmstat.system.cs
11704 ± 1% -62.7% 4363 ± 0% vmstat.system.in
1902815 ± 4% -98.0% 38389 ± 9% time.involuntary_context_switches
17.50 ± 0% +19.8% 20.97 ± 0% time.user_time
291566 ± 0% +12.0% 326523 ± 0% time.voluntary_context_switches
291365 ± 0% +12.0% 326363 ± 0% latency_stats.hits.sk_stream_wait_memory.tcp_sendpage.inet_sendpage.kernel_sendpage.sock_sendpage.pipe_to_sendpage.__splice_from_pipe.splice_from_pipe.generic_splice_sendpage.direct_splice_actor.splice_direct_to_actor.do_splice_direct
975206 ± 4% -61.5% 374990 ± 1% latency_stats.hits.sk_wait_data.tcp_recvmsg.inet_recvmsg.sock_recvmsg.SYSC_recvfrom.SyS_recvfrom.entry_SYSCALL_64_fastpath
52419094 ± 1% +7.2% 56170711 ± 0% latency_stats.sum.sk_stream_wait_memory.tcp_sendpage.inet_sendpage.kernel_sendpage.sock_sendpage.pipe_to_sendpage.__splice_from_pipe.splice_from_pipe.generic_splice_sendpage.direct_splice_actor.splice_direct_to_actor.do_splice_direct
1.009e+09 ± 0% +5.3% 1.062e+09 ± 0% latency_stats.sum.sk_wait_data.tcp_recvmsg.inet_recvmsg.sock_recvmsg.SYSC_recvfrom.SyS_recvfrom.entry_SYSCALL_64_fastpath
794782 ± 8% -99.4% 4498 ± 40% cpuidle.C1-NHM.time
104769 ± 2% -99.8% 166.25 ± 31% cpuidle.C1-NHM.usage
482872 ± 14% -32.6% 325328 ± 28% cpuidle.C3-NHM.time
4632646 ± 1% +44.6% 6697379 ± 1% cpuidle.C6-NHM.time
337.25 ± 8% +35.4% 456.75 ± 9% cpuidle.C6-NHM.usage
1156 ± 15% -100.0% 0.00 ± 0% cpuidle.POLL.time
180.50 ± 17% -100.0% 0.00 ± 0% cpuidle.POLL.usage
505.75 ± 19% -25.4% 377.25 ± 7% sched_debug.cfs_rq[0]:/.load
443.50 ± 5% -13.0% 386.00 ± 6% sched_debug.cfs_rq[0]:/.runnable_load_avg
6167 ± 13% +18.1% 7285 ± 16% sched_debug.cfs_rq[0]:/.tg_load_avg
6057 ± 13% +17.6% 7121 ± 15% sched_debug.cfs_rq[1]:/.tg_load_avg
431.50 ± 14% +228.2% 1416 ± 5% sched_debug.cfs_rq[2]:/.blocked_load_avg
33.75 ± 11% -18.5% 27.50 ± 11% sched_debug.cfs_rq[2]:/.nr_spread_over
818.50 ± 6% +118.3% 1786 ± 5% sched_debug.cfs_rq[2]:/.tg_load_contrib
301.25 ± 14% +38.5% 417.25 ± 16% sched_debug.cfs_rq[3]:/.load
5609 ± 10% +21.4% 6807 ± 16% sched_debug.cfs_rq[3]:/.tg_load_avg
607848 ± 32% +35.5% 823868 ± 6% sched_debug.cpu#0.avg_idle
435380 ± 40% -52.0% 208833 ± 35% sched_debug.cpu#0.nr_switches
435948 ± 40% -52.0% 209079 ± 35% sched_debug.cpu#0.sched_count
1118781 ±122% -82.2% 198660 ± 26% sched_debug.cpu#1.nr_switches
1119297 ±122% -82.2% 198883 ± 26% sched_debug.cpu#1.sched_count
649029 ±121% -77.9% 143430 ± 19% sched_debug.cpu#1.ttwu_count
594549 ±120% -77.7% 132447 ± 20% sched_debug.cpu#1.ttwu_local
273528 ± 94% +214.0% 858787 ± 11% sched_debug.cpu#2.avg_idle
2797014 ± 51% -92.5% 209149 ± 21% sched_debug.cpu#2.nr_switches
2797284 ± 51% -92.5% 209354 ± 21% sched_debug.cpu#2.sched_count
1635953 ± 51% -90.9% 148709 ± 15% sched_debug.cpu#2.ttwu_count
1457289 ± 50% -90.5% 137841 ± 15% sched_debug.cpu#2.ttwu_local
331.25 ± 11% +26.0% 417.50 ± 15% sched_debug.cpu#3.load
617159 ± 28% -70.0% 185105 ± 33% sched_debug.cpu#3.nr_switches
617564 ± 28% -70.0% 185318 ± 33% sched_debug.cpu#3.sched_count
271272 ± 20% -49.9% 135909 ± 21% sched_debug.cpu#3.ttwu_count
254215 ± 19% -50.3% 126227 ± 22% sched_debug.cpu#3.ttwu_local

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test/cluster:
wsm/netpipe/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/tcp/cs-localhost

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
9.39 ± 1% -7.6% 8.68 ± 1% netpipe.less_8K_usec.avg
17994 ± 3% -19.9% 14406 ± 5% softirqs.RCU
0.29 ± 4% -13.7% 0.25 ± 6% time.user_time
106713 ± 1% -67.9% 34294 ± 4% vmstat.system.cs
37772 ± 0% -95.5% 1693 ± 0% vmstat.system.in
87490550 ± 4% -54.1% 40120043 ± 2% cpuidle.C1-NHM.time
6115692 ± 1% -72.2% 1698584 ± 4% cpuidle.C1-NHM.usage
2583 ± 5% -93.4% 170.00 ± 23% cpuidle.POLL.usage
16.39 ± 3% -10.9% 14.59 ± 2% turbostat.%Busy
585.25 ± 2% -9.7% 528.25 ± 2% turbostat.Avg_MHz
26.28 ± 2% -25.9% 19.47 ± 2% turbostat.CPU%c1
56.33 ± 0% +14.6% 64.54 ± 1% turbostat.CPU%c6
2320 ± 6% -10.0% 2087 ± 2% sched_debug.cfs_rq[0]:/.tg->runnable_avg
2328 ± 5% -9.6% 2104 ± 2% sched_debug.cfs_rq[10]:/.tg->runnable_avg
2329 ± 5% -9.4% 2109 ± 2% sched_debug.cfs_rq[11]:/.tg->runnable_avg
8.50 ± 50% -64.7% 3.00 ±-33% sched_debug.cfs_rq[1]:/.nr_spread_over
3012 ±687% +900.1% 30128 ±106% sched_debug.cfs_rq[1]:/.spread0
2326 ± 6% -10.2% 2088 ± 2% sched_debug.cfs_rq[1]:/.tg->runnable_avg
2326 ± 6% -10.0% 2094 ± 2% sched_debug.cfs_rq[2]:/.tg->runnable_avg
2327 ± 6% -9.9% 2096 ± 2% sched_debug.cfs_rq[3]:/.tg->runnable_avg
2330 ± 6% -10.0% 2096 ± 2% sched_debug.cfs_rq[4]:/.tg->runnable_avg
2326 ± 6% -9.8% 2097 ± 2% sched_debug.cfs_rq[5]:/.tg->runnable_avg
2326 ± 6% -9.6% 2102 ± 2% sched_debug.cfs_rq[6]:/.tg->runnable_avg
34300 ± 99% -96.8% 1105 ± 16% sched_debug.cfs_rq[7]:/.min_vruntime
2326 ± 6% -9.6% 2102 ± 2% sched_debug.cfs_rq[7]:/.tg->runnable_avg
2326 ± 6% -9.6% 2103 ± 2% sched_debug.cfs_rq[8]:/.tg->runnable_avg
2328 ± 5% -9.7% 2103 ± 2% sched_debug.cfs_rq[9]:/.tg->runnable_avg
29.00 ±141% +330.2% 124.75 ± 65% sched_debug.cpu#2.cpu_load[1]
520151 ±141% -87.1% 66855 ± 50% sched_debug.cpu#2.ttwu_count
52023 ± 71% -88.3% 6101 ± 92% sched_debug.cpu#6.nr_switches
52035 ± 71% -88.3% 6112 ± 92% sched_debug.cpu#6.sched_count
25756 ± 72% -89.0% 2837 ±100% sched_debug.cpu#6.sched_goidle

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/group:
lkp-t410/piglit/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/igt-044

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
83.99 ± 0% -14.7% 71.66 ± 0% piglit.time.elapsed_time
83.99 ± 0% -14.7% 71.66 ± 0% piglit.time.elapsed_time.max
3034690 ± 0% -97.2% 85516 ± 2% piglit.time.involuntary_context_switches
352.25 ± 0% +8.9% 383.75 ± 0% piglit.time.percent_of_cpu_this_job_got
285.25 ± 0% -5.8% 268.60 ± 0% piglit.time.user_time
16.75 ± 13% -25.4% 12.50 ± 12% vmstat.procs.r
85019 ± 0% -86.1% 11835 ± 3% vmstat.system.cs
42758 ± 0% -88.2% 5029 ± 0% vmstat.system.in
83.99 ± 0% -14.7% 71.66 ± 0% time.elapsed_time
83.99 ± 0% -14.7% 71.66 ± 0% time.elapsed_time.max
3034690 ± 0% -97.2% 85516 ± 2% time.involuntary_context_switches
11.15 ± 0% -40.4% 6.65 ± 0% time.system_time

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/blocksize:
lkp-sb02/pigz/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/100%/512K

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
65777721 ± 0% +1.5% 66758347 ± 0% pigz.throughput
4731453 ± 0% -95.3% 223426 ± 2% pigz.time.involuntary_context_switches
17391 ± 1% +2.3% 17800 ± 0% pigz.time.minor_page_faults
394.00 ± 0% +1.0% 398.00 ± 0% pigz.time.percent_of_cpu_this_job_got
1170 ± 0% +1.3% 1185 ± 0% pigz.time.user_time
244236 ± 0% +2.8% 250954 ± 0% pigz.time.voluntary_context_switches
34446 ± 32% -99.6% 129.50 ± 60% latency_stats.sum.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
13004 ± 2% -6.2% 12197 ± 4% meminfo.AnonHugePages
4731453 ± 0% -95.3% 223426 ± 2% time.involuntary_context_switches
13.98 ± 0% -13.6% 12.07 ± 0% time.system_time
0.06 ± 9% -81.8% 0.01 ± 0% turbostat.CPU%c1
0.14 ± 3% +27.3% 0.18 ± 2% turbostat.CPU%c6
33869 ± 0% -89.4% 3581 ± 2% vmstat.system.cs
19774 ± 0% -76.7% 4609 ± 0% vmstat.system.in
591398 ± 9% -94.5% 32236 ± 4% cpuidle.C1-SNB.time
59726 ± 10% -97.7% 1370 ± 84% cpuidle.C1-SNB.usage
2880102 ± 1% +17.3% 3377630 ± 0% cpuidle.C6-SNB.time
76.75 ± 10% -100.0% 0.00 ± 0% cpuidle.POLL.time
43.25 ± 18% -100.0% 0.00 ± 0% cpuidle.POLL.usage
0.74 ± 33% -59.1% 0.30 ± 75% perf-profile.cpu-cycles.call_timer_fn.run_timer_softirq.__do_softirq.irq_exit.smp_apic_timer_interrupt
1.14 ± 72% -100.0% 0.00 ± -1% perf-profile.cpu-cycles.copy_process.part.27._do_fork.sys_clone.entry_SYSCALL_64_fastpath
0.00 ± -1% +Inf% 0.56 ± 56% perf-profile.cpu-cycles.copy_process.part.28._do_fork.sys_clone.entry_SYSCALL_64_fastpath
0.95 ± 21% +33.9% 1.27 ± 3% perf-profile.cpu-cycles.free_hot_cold_page.put_page.anon_pipe_buf_release.pipe_read.__vfs_read
0.51 ± 43% +243.1% 1.75 ± 34% perf-profile.cpu-cycles.kthread.ret_from_fork
0.26 ± 66% +316.3% 1.08 ± 39% perf-profile.cpu-cycles.rcu_nocb_kthread.kthread.ret_from_fork
0.51 ± 43% +243.1% 1.75 ± 34% perf-profile.cpu-cycles.ret_from_fork
198.25 ± 10% +26.1% 250.00 ± 9% sched_debug.cfs_rq[1]:/.runnable_load_avg
26484 ± 49% -107.5% -1995 ±-124% sched_debug.cfs_rq[2]:/.spread0
26370 ± 45% -105.4% -1415 ±-116% sched_debug.cfs_rq[3]:/.spread0
5582478 ± 33% -97.5% 142112 ± 16% sched_debug.cpu#0.nr_switches
5582876 ± 33% -97.4% 142504 ± 16% sched_debug.cpu#0.sched_count
40451 ± 39% -87.2% 5176 ± 26% sched_debug.cpu#0.sched_goidle
2803309 ± 33% -97.2% 77183 ± 14% sched_debug.cpu#0.ttwu_count
2781915 ± 33% -98.0% 55288 ± 16% sched_debug.cpu#0.ttwu_local
2354598 ± 80% -93.5% 153545 ± 7% sched_debug.cpu#1.nr_switches
2354718 ± 80% -93.5% 153683 ± 7% sched_debug.cpu#1.sched_count
1185125 ± 79% -93.2% 80529 ± 10% sched_debug.cpu#1.ttwu_count
1164849 ± 81% -94.8% 60184 ± 14% sched_debug.cpu#1.ttwu_local
3834 ± 21% -35.2% 2483 ± 8% sched_debug.cpu#2.sched_goidle

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/test:
nhm-white/unixbench/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/shell1

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
2494800 ± 0% -6.0% 2345617 ± 0% unixbench.time.involuntary_context_switches
1.085e+09 ± 0% -2.4% 1.059e+09 ± 0% latency_stats.sum.do_wait.SyS_wait4.entry_SYSCALL_64_fastpath
52548 ± 5% -33.6% 34875 ± 0% vmstat.system.cs
14831 ± 9% -59.3% 6034 ± 0% vmstat.system.in
47.37 ± 0% -2.0% 46.41 ± 0% turbostat.%Busy
1360 ± 0% -2.9% 1321 ± 0% turbostat.Avg_MHz
7.69 ± 0% +16.3% 8.94 ± 3% turbostat.CPU%c6
3.51 ± 0% +31.3% 4.61 ± 6% turbostat.Pkg%pc6
65744708 ± 2% -50.6% 32458256 ± 0% cpuidle.C1-NHM.time
3629185 ± 14% -88.0% 435580 ± 2% cpuidle.C1-NHM.usage
4.728e+08 ± 0% +13.3% 5.358e+08 ± 2% cpuidle.C6-NHM.time
602583 ± 0% +11.6% 672654 ± 0% cpuidle.C6-NHM.usage
141592 ± 20% -75.6% 34556 ± 25% cpuidle.POLL.time
14080 ± 14% -91.1% 1252 ± 2% cpuidle.POLL.usage
18338 ± 14% +16.4% 21343 ± 4% sched_debug.cfs_rq[2]:/.blocked_load_avg
18489 ± 14% +16.1% 21467 ± 4% sched_debug.cfs_rq[2]:/.tg_load_contrib
146020 ± 5% -7.3% 135370 ± 0% sched_debug.cpu#0.nr_load_updates
3000951 ± 78% -78.9% 633028 ± 2% sched_debug.cpu#0.nr_switches
3001380 ± 78% -78.9% 633584 ± 2% sched_debug.cpu#0.sched_count
1354868 ± 82% -82.8% 233428 ± 3% sched_debug.cpu#0.sched_goidle
1426794 ± 82% -83.9% 230146 ± 1% sched_debug.cpu#0.ttwu_count
1287294 ± 92% -93.0% 90490 ± 0% sched_debug.cpu#0.ttwu_local
7751 ± 16% +37.1% 10628 ± 6% sched_debug.cpu#1.curr->pid
685476 ± 64% -65.6% 235696 ± 4% sched_debug.cpu#1.ttwu_count
535746 ± 83% -83.4% 88784 ± 1% sched_debug.cpu#1.ttwu_local
69.00 ± 4% -15.9% 58.00 ± 13% sched_debug.cpu#2.cpu_load[2]
69.00 ± 3% -17.8% 56.75 ± 10% sched_debug.cpu#2.cpu_load[3]
68.25 ± 4% -17.2% 56.50 ± 10% sched_debug.cpu#2.cpu_load[4]
-69.25 ±-14% -33.2% -46.25 ±-20% sched_debug.cpu#3.nr_uninterruptible
7500 ± 19% +62.1% 12161 ± 17% sched_debug.cpu#4.curr->pid
219010 ± 3% -8.1% 201264 ± 3% sched_debug.cpu#4.sched_goidle
93751 ± 5% -7.3% 86883 ± 0% sched_debug.cpu#4.ttwu_local
137580 ± 4% -5.7% 129702 ± 0% sched_debug.cpu#5.nr_load_updates
2993388 ± 79% -80.6% 581607 ± 1% sched_debug.cpu#5.nr_switches
2993731 ± 79% -80.6% 581990 ± 1% sched_debug.cpu#5.sched_count
1373366 ± 84% -85.1% 204662 ± 1% sched_debug.cpu#5.sched_goidle
1296037 ± 93% -93.0% 90170 ± 0% sched_debug.cpu#5.ttwu_local
78.50 ± 28% -43.0% 44.75 ± 45% sched_debug.cpu#7.nr_uninterruptible
0.01 ± 59% +21923.0% 1.25 ±171% sched_debug.rt_rq[3]:/.rt_time
0.00 ± 58% +7898.3% 0.38 ±169% sched_debug.rt_rq[7]:/.rt_time

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/test:
nhm-white/unixbench/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/shell8

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
5856190 ± 1% -25.5% 4360557 ± 0% unixbench.time.involuntary_context_switches
5856190 ± 1% -25.5% 4360557 ± 0% time.involuntary_context_switches
3.14 ± 3% +29.0% 4.05 ± 2% turbostat.Pkg%pc6
61840 ± 2% -26.2% 45627 ± 0% vmstat.system.cs
18904 ± 3% -44.2% 10543 ± 0% vmstat.system.in
44596831 ± 2% -34.1% 29385365 ± 1% cpuidle.C1-NHM.time
1853539 ± 8% -87.6% 229126 ± 1% cpuidle.C1-NHM.usage
246889 ± 17% -90.1% 24414 ± 38% cpuidle.POLL.time
31710 ± 8% -96.8% 1025 ± 3% cpuidle.POLL.usage
24168 ± 12% +18.9% 28748 ± 4% sched_debug.cfs_rq[0]:/.blocked_load_avg
24325 ± 13% +18.8% 28899 ± 3% sched_debug.cfs_rq[0]:/.tg_load_contrib
91.50 ± 13% +31.7% 120.50 ± 21% sched_debug.cfs_rq[1]:/.runnable_load_avg
83.00 ± 7% +29.2% 107.25 ± 14% sched_debug.cfs_rq[5]:/.runnable_load_avg
70.75 ± 18% +31.1% 92.75 ± 11% sched_debug.cfs_rq[7]:/.runnable_load_avg
7677 ± 34% +46.0% 11210 ± 10% sched_debug.cpu#2.curr->pid
389892 ± 17% +27.3% 496226 ± 13% sched_debug.cpu#4.avg_idle
6249 ± 27% +87.1% 11693 ± 7% sched_debug.cpu#6.curr->pid

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test:
wsm/will-it-scale/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/getppid1

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
18473072 ± 3% -11.3% 16379556 ± 0% will-it-scale.per_process_ops
18119608 ± 1% -9.5% 16402159 ± 0% will-it-scale.per_thread_ops
0.74 ± 7% +18.5% 0.87 ± 2% will-it-scale.scalability
628.18 ± 0% +4.4% 655.83 ± 0% will-it-scale.time.system_time
434.65 ± 0% -6.3% 407.30 ± 0% will-it-scale.time.user_time
22243 ± 4% -3.3% 21501 ± 4% meminfo.AnonPages
5560 ± 4% -3.3% 5374 ± 4% proc-vmstat.nr_anon_pages
23934 ± 4% -22.1% 18653 ± 3% softirqs.SCHED
18.03 ± 7% +14.8% 20.69 ± 0% turbostat.CPU%c6
1802 ± 2% +21.0% 2181 ± 4% slabinfo.kmalloc-512.active_objs
1802 ± 2% +22.4% 2206 ± 3% slabinfo.kmalloc-512.num_objs
30351 ± 0% -93.9% 1837 ± 0% vmstat.system.cs
21766 ± 0% -65.9% 7418 ± 0% vmstat.system.in
49070159 ± 12% -85.0% 7354447 ± 24% cpuidle.C1-NHM.time
4289833 ± 8% -97.5% 107531 ± 5% cpuidle.C1-NHM.usage
1606 ± 7% -27.1% 1171 ± 10% cpuidle.POLL.usage
21.60 ± 1% -22.8% 16.68 ± 0% perf-profile.cpu-cycles.entry_SYSCALL_64
30.96 ± 2% -22.8% 23.90 ± 0% perf-profile.cpu-cycles.entry_SYSCALL_64_after_swapgs
44.96 ± 2% +25.8% 56.58 ± 2% perf-profile.cpu-cycles.entry_SYSCALL_64_fastpath
9.80 ± 16% +156.6% 25.16 ± 2% perf-profile.cpu-cycles.pid_vnr.entry_SYSCALL_64_fastpath
6.40 ± 13% -50.6% 3.16 ± 4% perf-profile.cpu-cycles.pid_vnr.sys_getppid.entry_SYSCALL_64_fastpath
11.04 ± 7% +15.1% 12.70 ± 2% perf-profile.cpu-cycles.sys_getppid.entry_SYSCALL_64_fastpath
13943 ± 10% +25.4% 17491 ± 6% sched_debug.cfs_rq[0]:/.tg_load_avg
13886 ± 10% +25.1% 17373 ± 6% sched_debug.cfs_rq[10]:/.tg_load_avg
13888 ± 10% +24.9% 17352 ± 6% sched_debug.cfs_rq[11]:/.tg_load_avg
2.25 ± 19% +66.7% 3.75 ± 22% sched_debug.cfs_rq[1]:/.nr_spread_over
13949 ± 10% +25.1% 17452 ± 6% sched_debug.cfs_rq[1]:/.tg_load_avg
13949 ± 10% +25.1% 17456 ± 6% sched_debug.cfs_rq[2]:/.tg_load_avg
13948 ± 10% +25.1% 17447 ± 6% sched_debug.cfs_rq[3]:/.tg_load_avg
13929 ± 10% +25.2% 17441 ± 6% sched_debug.cfs_rq[4]:/.tg_load_avg
13931 ± 10% +24.9% 17406 ± 6% sched_debug.cfs_rq[5]:/.tg_load_avg
13929 ± 10% +24.8% 17379 ± 6% sched_debug.cfs_rq[6]:/.tg_load_avg
13910 ± 10% +24.9% 17379 ± 6% sched_debug.cfs_rq[7]:/.tg_load_avg
13890 ± 10% +25.1% 17371 ± 6% sched_debug.cfs_rq[8]:/.tg_load_avg
13888 ± 10% +25.1% 17370 ± 6% sched_debug.cfs_rq[9]:/.tg_load_avg
144.75 ± 52% -47.8% 75.50 ± 31% sched_debug.cpu#1.cpu_load[0]
128.25 ± 38% -43.3% 72.75 ± 14% sched_debug.cpu#1.cpu_load[1]
106.25 ± 25% -33.4% 70.75 ± 7% sched_debug.cpu#1.cpu_load[2]
90.25 ± 15% -23.0% 69.50 ± 4% sched_debug.cpu#1.cpu_load[3]
82.00 ± 7% -15.9% 69.00 ± 3% sched_debug.cpu#1.cpu_load[4]
1005269 ±166% -98.4% 16346 ± 25% sched_debug.cpu#1.nr_switches
1005304 ±166% -98.4% 16385 ± 25% sched_debug.cpu#1.sched_count
499712 ±168% -98.6% 6885 ± 37% sched_debug.cpu#1.sched_goidle
1.50 ±145% +83.3% 2.75 ±150% sched_debug.cpu#10.nr_uninterruptible
2154 ± 14% -38.3% 1330 ± 32% sched_debug.cpu#3.curr->pid
1562 ± 33% +40.0% 2187 ± 9% sched_debug.cpu#4.curr->pid
42.25 ± 2% +31.4% 55.50 ± 20% sched_debug.cpu#7.cpu_load[1]
42.75 ± 3% +19.3% 51.00 ± 12% sched_debug.cpu#7.cpu_load[2]
43.50 ± 5% +13.2% 49.25 ± 10% sched_debug.cpu#7.cpu_load[3]
441090 ±167% -99.5% 2153 ± 51% sched_debug.cpu#7.ttwu_count
72.75 ± 32% -39.9% 43.75 ± 9% sched_debug.cpu#8.cpu_load[0]

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test:
wsm/will-it-scale/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/open1

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
801101 ± 4% +6.5% 853345 ± 0% will-it-scale.per_process_ops
0.56 ± 10% -11.5% 0.50 ± 0% will-it-scale.scalability
2004 ±100% +233.7% 6688 ±100% latency_stats.sum.wait_woken.inotify_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
40.09 ± 1% -9.6% 36.25 ± 0% time.user_time
357.00 ± 13% +32.4% 472.50 ± 7% slabinfo.mnt_cache.active_objs
357.00 ± 13% +32.4% 472.50 ± 7% slabinfo.mnt_cache.num_objs
90485 ± 2% -7.4% 83796 ± 0% softirqs.RCU
49819 ± 2% -10.7% 44472 ± 1% softirqs.SCHED
30306 ± 2% -91.1% 2685 ± 1% vmstat.system.cs
21928 ± 1% -63.7% 7960 ± 0% vmstat.system.in
41567129 ± 11% -90.4% 3994402 ± 30% cpuidle.C1-NHM.time
3844950 ± 16% -97.0% 115267 ± 3% cpuidle.C1-NHM.usage
16389120 ± 54% -56.8% 7074969 ± 15% cpuidle.C3-NHM.time
1491 ± 49% -91.1% 132.00 ± 17% cpuidle.POLL.usage
1.54 ± 3% -18.8% 1.26 ± 4% perf-profile.cpu-cycles.__call_rcu.call_rcu_sched.__fput.____fput.task_work_run
1.21 ± 5% -18.7% 0.98 ± 2% perf-profile.cpu-cycles.__fd_install.fd_install.do_sys_open.sys_open.entry_SYSCALL_64_fastpath
3.01 ± 4% +11.0% 3.34 ± 3% perf-profile.cpu-cycles.__inode_permission.inode_permission.may_open.path_openat.do_filp_open
1.81 ± 2% +11.1% 2.01 ± 3% perf-profile.cpu-cycles.__memset.get_empty_filp.path_openat.do_filp_open.do_sys_open
1.47 ± 2% -11.4% 1.31 ± 3% perf-profile.cpu-cycles.__slab_alloc.kmem_cache_alloc.get_empty_filp.path_openat.do_filp_open
3.61 ± 2% -10.2% 3.25 ± 1% perf-profile.cpu-cycles.call_rcu_sched.__fput.____fput.task_work_run.prepare_exit_to_usermode
10.03 ± 1% -7.9% 9.24 ± 1% perf-profile.cpu-cycles.do_dentry_open.vfs_open.path_openat.do_filp_open.do_sys_open
1.39 ± 4% -18.2% 1.14 ± 3% perf-profile.cpu-cycles.fd_install.do_sys_open.sys_open.entry_SYSCALL_64_fastpath
2.50 ± 2% +8.9% 2.72 ± 2% perf-profile.cpu-cycles.get_unused_fd_flags.do_sys_open.sys_open.entry_SYSCALL_64_fastpath
3.31 ± 5% +10.4% 3.66 ± 2% perf-profile.cpu-cycles.inode_permission.may_open.path_openat.do_filp_open.do_sys_open
1.15 ± 5% +9.8% 1.26 ± 3% perf-profile.cpu-cycles.kfree.selinux_file_free_security.security_file_free.__fput.____fput
2.14 ± 2% -13.6% 1.85 ± 2% perf-profile.cpu-cycles.kmem_cache_alloc_trace.selinux_file_alloc_security.security_file_alloc.get_empty_filp.path_openat
3.62 ± 2% -8.8% 3.30 ± 1% perf-profile.cpu-cycles.security_file_alloc.get_empty_filp.path_openat.do_filp_open.do_sys_open
1.45 ± 5% +11.9% 1.62 ± 3% perf-profile.cpu-cycles.security_file_free.__fput.____fput.task_work_run.prepare_exit_to_usermode
2.13 ± 6% +15.2% 2.46 ± 4% perf-profile.cpu-cycles.security_inode_permission.__inode_permission.inode_permission.may_open.path_openat
3.30 ± 2% -8.7% 3.01 ± 2% perf-profile.cpu-cycles.selinux_file_alloc_security.security_file_alloc.get_empty_filp.path_openat.do_filp_open
1.18 ± 5% +8.7% 1.29 ± 2% perf-profile.cpu-cycles.selinux_file_free_security.security_file_free.__fput.____fput.task_work_run
3.21 ± 3% +12.8% 3.63 ± 1% perf-profile.cpu-cycles.selinux_inode_permission.security_inode_permission.__inode_permission.inode_permission.link_path_walk
1.83 ± 7% +17.2% 2.15 ± 5% perf-profile.cpu-cycles.selinux_inode_permission.security_inode_permission.__inode_permission.inode_permission.may_open
10.79 ± 1% -9.4% 9.78 ± 1% perf-profile.cpu-cycles.vfs_open.path_openat.do_filp_open.do_sys_open.sys_open
1138 ± 30% +58.8% 1807 ± 9% sched_debug.cfs_rq[11]:/.blocked_load_avg
1199 ± 27% +57.1% 1884 ± 8% sched_debug.cfs_rq[11]:/.tg_load_contrib
3.00 ± 84% +233.3% 10.00 ± 7% sched_debug.cfs_rq[2]:/.nr_spread_over
30468 ± 13% -17.6% 25115 ± 3% sched_debug.cfs_rq[7]:/.avg->runnable_avg_sum
663.25 ± 13% -17.4% 547.75 ± 3% sched_debug.cfs_rq[7]:/.tg_runnable_contrib
3028 ± 59% -57.3% 1291 ± 56% sched_debug.cfs_rq[8]:/.blocked_load_avg
3.25 ± 50% +169.2% 8.75 ± 21% sched_debug.cfs_rq[8]:/.nr_spread_over
3092 ± 57% -56.7% 1340 ± 54% sched_debug.cfs_rq[8]:/.tg_load_contrib
109897 ± 9% -16.2% 92140 ± 3% sched_debug.cpu#0.nr_load_updates
97.75 ± 23% -32.7% 65.75 ± 14% sched_debug.cpu#1.cpu_load[0]
88.00 ± 10% -26.7% 64.50 ± 7% sched_debug.cpu#1.cpu_load[1]
2.00 ± 93% -325.0% -4.50 ±-114% sched_debug.cpu#1.nr_uninterruptible
62.25 ± 8% -12.9% 54.25 ± 13% sched_debug.cpu#11.cpu_load[4]
24550 ± 18% -31.1% 16923 ± 24% sched_debug.cpu#2.ttwu_count
12972 ± 9% -37.0% 8171 ± 30% sched_debug.cpu#2.ttwu_local
1038 ± 41% +92.4% 1998 ± 9% sched_debug.cpu#4.curr->pid
15880 ± 17% +19.8% 19022 ± 4% sched_debug.cpu#4.sched_goidle
1492076 ± 99% -99.4% 8330 ± 50% sched_debug.cpu#6.nr_switches
1492358 ± 99% -99.4% 8627 ± 48% sched_debug.cpu#6.sched_count
614310 ±100% -99.6% 2432 ± 64% sched_debug.cpu#6.sched_goidle
62.75 ± 16% -23.9% 47.75 ± 18% sched_debug.cpu#7.cpu_load[2]
59.50 ± 16% -22.7% 46.00 ± 12% sched_debug.cpu#7.cpu_load[3]
1.00 ±212% +775.0% 8.75 ± 28% sched_debug.cpu#8.nr_uninterruptible

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test:
wsm/will-it-scale/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/pthread_mutex2

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
25290 ± 8% -28.3% 18121 ± 3% softirqs.SCHED
30175 ± 0% -94.6% 1620 ± 5% vmstat.system.cs
21733 ± 0% -66.0% 7399 ± 0% vmstat.system.in
50171812 ± 9% -80.7% 9701068 ± 55% cpuidle.C1-NHM.time
4178650 ± 12% -98.0% 83770 ± 12% cpuidle.C1-NHM.usage
1465 ± 6% -27.9% 1057 ± 17% cpuidle.POLL.usage
60.19 ± 0% -1.5% 59.27 ± 0% turbostat.%Busy
2118 ± 0% -1.6% 2085 ± 0% turbostat.Avg_MHz
17.82 ± 6% +15.7% 20.61 ± 0% turbostat.CPU%c6
0.07 ±133% +1864.3% 1.38 ± 73% perf-profile.cpu-cycles.__libc_start_main
0.07 ±133% +1814.3% 1.34 ± 75% perf-profile.cpu-cycles._start.main.__libc_start_main
1.72 ±110% -100.0% 0.00 ± -1% perf-profile.cpu-cycles.copy_process.part.27._do_fork.sys_clone.entry_SYSCALL_64_fastpath
0.07 ±133% +1814.3% 1.34 ± 75% perf-profile.cpu-cycles.main.__libc_start_main
807.25 ± 17% -26.5% 593.25 ± 24% sched_debug.cfs_rq[3]:/.utilization_load_avg
2069 ± 45% -80.7% 398.50 ± 40% sched_debug.cfs_rq[4]:/.blocked_load_avg
2162 ± 45% -78.0% 475.25 ± 38% sched_debug.cfs_rq[4]:/.tg_load_contrib
419067 ±169% -98.3% 7041 ± 4% sched_debug.cpu#0.sched_goidle
81.50 ± 3% -22.1% 63.50 ± 22% sched_debug.cpu#1.cpu_load[2]
16437 ± 46% -76.4% 3884 ± 57% sched_debug.cpu#10.ttwu_count
80953 ± 18% -24.0% 61501 ± 14% sched_debug.cpu#2.nr_load_updates
68.50 ± 15% -27.7% 49.50 ± 18% sched_debug.cpu#3.cpu_load[0]
67.25 ± 8% -15.2% 57.00 ± 12% sched_debug.cpu#4.cpu_load[3]
65.75 ± 8% -16.7% 54.75 ± 10% sched_debug.cpu#4.cpu_load[4]
39032 ± 14% -38.8% 23893 ± 40% sched_debug.cpu#4.nr_switches
39071 ± 14% -38.8% 23926 ± 40% sched_debug.cpu#4.sched_count
19248 ± 12% -42.4% 11089 ± 26% sched_debug.cpu#4.ttwu_count
9496 ± 6% -67.8% 3057 ± 30% sched_debug.cpu#4.ttwu_local

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test:
wsm/will-it-scale/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/readseek3

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
2439605 ± 0% +1.8% 2484631 ± 0% will-it-scale.per_process_ops
1842733 ± 1% +5.8% 1949535 ± 1% will-it-scale.per_thread_ops
0.54 ± 0% -1.8% 0.53 ± 1% will-it-scale.scalability
14706 ± 31% +52.6% 22440 ± 16% will-it-scale.time.involuntary_context_switches
136239 ± 2% +44.4% 196725 ± 3% latency_stats.hits.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
7.40 ± 3% -9.7% 6.68 ± 2% perf-profile.cpu-cycles.rw_verify_area.vfs_read.sys_read.entry_SYSCALL_64_fastpath
25530 ± 1% -19.8% 20467 ± 1% softirqs.SCHED
14706 ± 31% +52.6% 22440 ± 16% time.involuntary_context_switches
30169 ± 0% -93.6% 1945 ± 2% vmstat.system.cs
21810 ± 0% -65.8% 7449 ± 0% vmstat.system.in
59.90 ± 0% -1.5% 59.02 ± 0% turbostat.%Busy
2108 ± 0% -1.5% 2076 ± 0% turbostat.Avg_MHz
17.43 ± 0% +17.6% 20.50 ± 0% turbostat.CPU%c6
46658026 ± 4% -77.3% 10588249 ± 14% cpuidle.C1-NHM.time
4461627 ± 0% -97.5% 112631 ± 5% cpuidle.C1-NHM.usage
135274 ± 85% +363.4% 626895 ± 80% cpuidle.C1E-NHM.time
103.25 ± 12% +71.4% 177.00 ± 20% cpuidle.C1E-NHM.usage
38945629 ± 6% -29.3% 27546659 ± 19% cpuidle.POLL.time
1523 ± 10% -26.2% 1124 ± 3% cpuidle.POLL.usage
14514 ± 6% +21.2% 17593 ± 6% sched_debug.cfs_rq[0]:/.tg_load_avg
-69524 ±-63% +63.0% -113345 ±-40% sched_debug.cfs_rq[10]:/.spread0
14486 ± 6% +20.9% 17508 ± 6% sched_debug.cfs_rq[10]:/.tg_load_avg
14468 ± 6% +21.0% 17508 ± 6% sched_debug.cfs_rq[11]:/.tg_load_avg
58.25 ± 7% +32.6% 77.25 ± 25% sched_debug.cfs_rq[1]:/.runnable_load_avg
14520 ± 6% +21.1% 17583 ± 6% sched_debug.cfs_rq[1]:/.tg_load_avg
14468 ± 6% +21.4% 17560 ± 6% sched_debug.cfs_rq[2]:/.tg_load_avg
14471 ± 6% +21.3% 17557 ± 6% sched_debug.cfs_rq[3]:/.tg_load_avg
14483 ± 6% +21.1% 17545 ± 6% sched_debug.cfs_rq[4]:/.tg_load_avg
14489 ± 6% +20.8% 17503 ± 6% sched_debug.cfs_rq[5]:/.tg_load_avg
14493 ± 6% +20.7% 17487 ± 6% sched_debug.cfs_rq[6]:/.tg_load_avg
52254 ± 84% -94.8% 2722 ±1818% sched_debug.cfs_rq[7]:/.spread0
14492 ± 6% +20.6% 17481 ± 6% sched_debug.cfs_rq[7]:/.tg_load_avg
14492 ± 6% +20.9% 17523 ± 6% sched_debug.cfs_rq[8]:/.tg_load_avg
-34317 ±-276% +228.6% -112775 ±-40% sched_debug.cfs_rq[9]:/.spread0
14486 ± 6% +20.9% 17508 ± 6% sched_debug.cfs_rq[9]:/.tg_load_avg
14454 ± 6% +32.2% 19105 ± 13% sched_debug.cpu#0.ttwu_count
11838 ± 20% -37.9% 7351 ± 30% sched_debug.cpu#1.ttwu_count
-6.75 ±-36% -96.3% -0.25 ±-1424% sched_debug.cpu#4.nr_uninterruptible
6151 ± 23% +113.3% 13119 ± 24% sched_debug.cpu#6.nr_switches
6185 ± 23% +112.6% 13147 ± 24% sched_debug.cpu#6.sched_count
1375 ± 29% +242.6% 4711 ± 35% sched_debug.cpu#6.sched_goidle
4277 ± 21% +88.5% 8063 ± 19% sched_debug.cpu#6.ttwu_count
2263 ± 12% -19.2% 1829 ± 0% sched_debug.cpu#9.curr->pid
72929 ± 11% -20.4% 58086 ± 0% sched_debug.cpu#9.nr_load_updates
356400 ±171% -99.6% 1252 ± 11% sched_debug.cpu#9.ttwu_local
0.05 ±100% +430.4% 0.28 ± 98% sched_debug.rt_rq[1]:/.rt_time


lkp-a06: Atom
Memory: 8G

lkp-sb02: Sandy Bridge-EP
Memory: 4G

nhm4: Nehalem
Memory: 4G

lituya: Grantley Haswell
Memory: 16G

wsm: Westmere
Memory: 6G

lkp-t410: Westmere
Memory: 2G

nhm-white: Nehalem
Memory: 6G


To reproduce:

git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp run job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: fsmark
default-monitors:
wait: activate-monitor
kmsg:
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
interval: 10
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 60
cpufreq_governor: performance
default-watchdogs:
oom-killer:
watchdog:
commit: ba19772fefe56fca1548d91218c3aeca8f207919
model: Sandy Bridge-EP
memory: 4G
hdd_partitions: "/dev/disk/by-id/ata-HDT722516DLA380_VDSD1DTCEKYAEJ-part2"
swap_partitions:
rootfs_partition: "/dev/disk/by-id/ata-HDT722516DLA380_VDSD1DTCEKYAEJ-part1"
category: benchmark
iterations: 1x
nr_threads: 32t
disk: 1HDD
fs: btrfs
fs2:
fsmark:
filesize: 9B
test_size: 400M
sync_method: fsyncBeforeClose
nr_directories: 16d
nr_files_per_directory: 256fpd
queue: cyclic
testbox: lkp-sb02
tbox_group: lkp-sb02
kconfig: x86_64-rhel
enqueue_time: 2015-07-08 12:46:57.152422017 +08:00
user: wfg
compiler: gcc-4.9
head_commit: ba19772fefe56fca1548d91218c3aeca8f207919
base_commit: d770e558e21961ad6cfdf0ff7df0eb5d7d4f0754
branch: linux-devel/devel-hourly-2015071021
kernel: "/pkg/linux/x86_64-rhel/gcc-4.9/ba19772fefe56fca1548d91218c3aeca8f207919/vmlinuz-4.2.0-rc1-wl-00882-gba19772"
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/fsmark/performance-1x-32t-1HDD-btrfs-9B-400M-fsyncBeforeClose-16d-256fpd/lkp-sb02/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/ba19772fefe56fca1548d91218c3aeca8f207919/0"
job_file: "/lkp/scheduled/lkp-sb02/cyclic_fsmark-performance-1x-32t-1HDD-btrfs-9B-400M-fsyncBeforeClose-16d-256fpd-x86_64-rhel-CYCLIC_HEAD-ba19772fefe56fca1548d91218c3aeca8f207919-20150708-88967-m5o3fs-0.yaml"
dequeue_time: 2015-07-11 12:17:26.250837417 +08:00
nr_cpu: "$(nproc)"
max_uptime: 1063.0800000000002
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=wfg
- job=/lkp/scheduled/lkp-sb02/cyclic_fsmark-performance-1x-32t-1HDD-btrfs-9B-400M-fsyncBeforeClose-16d-256fpd-x86_64-rhel-CYCLIC_HEAD-ba19772fefe56fca1548d91218c3aeca8f207919-20150708-88967-m5o3fs-0.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linux-devel/devel-hourly-2015071021
- commit=ba19772fefe56fca1548d91218c3aeca8f207919
- BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/ba19772fefe56fca1548d91218c3aeca8f207919/vmlinuz-4.2.0-rc1-wl-00882-gba19772
- max_uptime=1063
- RESULT_ROOT=/result/fsmark/performance-1x-32t-1HDD-btrfs-9B-400M-fsyncBeforeClose-16d-256fpd/lkp-sb02/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/ba19772fefe56fca1548d91218c3aeca8f207919/0
- LKP_SERVER=inn
- |2-


earlyprintk=ttyS0,115200 systemd.log_level=err
debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0
console=ttyS0,115200 console=tty0 vga=normal

rw
lkp_initrd: "/lkp/wfg/lkp-x86_64.cgz"
modules_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/ba19772fefe56fca1548d91218c3aeca8f207919/modules.cgz"
bm_initrd: "/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs2.cgz,/lkp/benchmarks/fsmark.cgz"
job_state: finished
loadavg: 27.66 13.44 5.22 1/170 3028
start_time: '1436588285'
end_time: '1436588450'
version: "/lkp/wfg/.src-20150711-100505"
echo performance > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
mkfs -t btrfs /dev/sda2
mount -t btrfs /dev/sda2 /fs/sda2
./fs_mark -d /fs/sda2/1 -d /fs/sda2/2 -d /fs/sda2/3 -d /fs/sda2/4 -d /fs/sda2/5 -d /fs/sda2/6 -d /fs/sda2/7 -d /fs/sda2/8 -d /fs/sda2/9 -d /fs/sda2/10 -d /fs/sda2/11 -d /fs/sda2/12 -d /fs/sda2/13 -d /fs/sda2/14 -d /fs/sda2/15 -d /fs/sda2/16 -d /fs/sda2/17 -d /fs/sda2/18 -d /fs/sda2/19 -d /fs/sda2/20 -d /fs/sda2/21 -d /fs/sda2/22 -d /fs/sda2/23 -d /fs/sda2/24 -d /fs/sda2/25 -d /fs/sda2/26 -d /fs/sda2/27 -d /fs/sda2/28 -d /fs/sda2/29 -d /fs/sda2/30 -d /fs/sda2/31 -d /fs/sda2/32 -D 16 -N 256 -n 3200 -L 1 -S 1 -s 9
\
 
 \ /
  Last update: 2015-07-14 05:01    [W:0.103 / U:0.032 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site