lkml.org 
[lkml]   [2015]   [Mar]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Subject[LKP] [locking/mutex] 07d2413a61d: -3.6% unixbench.score +60.8% unixbench.time.system_time
From
Date
FYI, we noticed the below changes on

git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
commit 07d2413a61db6500f58e614e873eed79d7f2ed72 ("locking/mutex: In mutex_spin_on_owner(), return true when owner changes")


testbox/testcase/testparams: lituya/unixbench/performance-fsdisk

d6abfdb2022368d8 07d2413a61db6500f58e614e87
---------------- --------------------------
%stddev %change %stddev
\ | \
10131 ± 1% -3.6% 9770 ± 0% unixbench.score
1049 ± 18% +60.8% 1687 ± 0% unixbench.time.system_time
7144 ± 30% +203.6% 21686 ± 3% unixbench.time.involuntary_context_switches
24573900 ± 19% -93.9% 1497516 ± 8% unixbench.time.voluntary_context_switches
369 ± 18% +59.5% 589 ± 0% unixbench.time.percent_of_cpu_this_job_got
3849 ± 5% -17.9% 3159 ± 0% uptime.idle
4060 ± 0% +25.3% 5087 ± 20% meminfo.AnonHugePages
3 ± 23% +66.7% 5 ± 0% vmstat.procs.r
174876 ± 18% -90.7% 16282 ± 5% vmstat.system.cs
87032 ± 9% -48.9% 44484 ± 2% softirqs.RCU
213221 ± 5% -79.7% 43210 ± 0% softirqs.SCHED
551372 ± 17% +60.8% 886553 ± 0% softirqs.TIMER
7144 ± 30% +203.6% 21686 ± 3% time.involuntary_context_switches
369 ± 18% +59.5% 589 ± 0% time.percent_of_cpu_this_job_got
1049 ± 18% +60.8% 1687 ± 0% time.system_time
24573900 ± 19% -93.9% 1497516 ± 8% time.voluntary_context_switches
23.55 ± 17% +57.3% 37.05 ± 0% turbostat.%Busy
779 ± 17% +57.1% 1223 ± 0% turbostat.Avg_MHz
22.32 ± 18% -61.6% 8.56 ± 4% turbostat.CPU%c1
42.53 ± 2% +5.6% 44.92 ± 0% turbostat.PkgWatt
0.57 ± 7% +7.5% 0.61 ± 1% turbostat.RAMWatt
21474227 ± 18% -93.7% 1361886 ± 8% cpuidle.C1-HSW.usage
6.755e+08 ± 27% -89.6% 70473442 ± 17% cpuidle.C1-HSW.time
26571 ± 25% -92.8% 1917 ± 4% cpuidle.C3-HSW.usage
117581 ± 21% -64.5% 41794 ± 1% cpuidle.C6-HSW.usage
1439 ± 28% -88.0% 173 ± 43% cpuidle.POLL.time
524 ± 17% -90.9% 47 ± 24% cpuidle.POLL.usage
4546 ± 15% +46.3% 6651 ± 2% sched_debug.cfs_rq[0]:/.tg->runnable_avg
246 ± 25% +55.4% 382 ± 2% sched_debug.cfs_rq[0]:/.tg_runnable_contrib
157742 ± 23% +129.0% 361183 ± 6% sched_debug.cfs_rq[0]:/.min_vruntime
15438 ± 14% +63.1% 25172 ± 9% sched_debug.cfs_rq[0]:/.exec_clock
11297 ± 25% +54.9% 17499 ± 2% sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum
4559 ± 15% +46.3% 6669 ± 2% sched_debug.cfs_rq[10]:/.tg->runnable_avg
9 ± 35% +84.2% 17 ± 15% sched_debug.cfs_rq[10]:/.runnable_load_avg
11563 ± 18% +57.7% 18232 ± 10% sched_debug.cfs_rq[10]:/.avg->runnable_avg_sum
12727 ± 13% +73.8% 22116 ± 4% sched_debug.cfs_rq[10]:/.exec_clock
251 ± 18% +58.1% 397 ± 10% sched_debug.cfs_rq[10]:/.tg_runnable_contrib
152419 ± 21% +131.9% 353450 ± 4% sched_debug.cfs_rq[10]:/.min_vruntime
4559 ± 15% +46.3% 6669 ± 2% sched_debug.cfs_rq[11]:/.tg->runnable_avg
66 ± 19% -73.1% 17 ± 30% sched_debug.cfs_rq[12]:/.runnable_load_avg
4560 ± 15% +46.3% 6672 ± 2% sched_debug.cfs_rq[12]:/.tg->runnable_avg
78 ± 22% -75.1% 19 ± 31% sched_debug.cfs_rq[12]:/.load
151608 ± 21% +158.0% 391137 ± 20% sched_debug.cfs_rq[13]:/.min_vruntime
272 ± 23% +58.3% 431 ± 11% sched_debug.cfs_rq[13]:/.tg_runnable_contrib
12552 ± 13% +119.5% 27556 ± 37% sched_debug.cfs_rq[13]:/.exec_clock
4557 ± 15% +46.4% 6674 ± 2% sched_debug.cfs_rq[13]:/.tg->runnable_avg
12507 ± 23% +58.0% 19763 ± 11% sched_debug.cfs_rq[13]:/.avg->runnable_avg_sum
314 ± 8% +49.8% 471 ± 28% sched_debug.cfs_rq[14]:/.tg_runnable_contrib
4558 ± 15% +46.4% 6675 ± 2% sched_debug.cfs_rq[14]:/.tg->runnable_avg
152171 ± 21% +187.6% 437699 ± 32% sched_debug.cfs_rq[14]:/.min_vruntime
14442 ± 8% +49.7% 21618 ± 28% sched_debug.cfs_rq[14]:/.avg->runnable_avg_sum
11538 ± 16% +51.7% 17502 ± 1% sched_debug.cfs_rq[15]:/.avg->runnable_avg_sum
252 ± 16% +51.6% 382 ± 0% sched_debug.cfs_rq[15]:/.tg_runnable_contrib
152080 ± 21% +132.5% 353587 ± 4% sched_debug.cfs_rq[15]:/.min_vruntime
4560 ± 15% +46.4% 6675 ± 2% sched_debug.cfs_rq[15]:/.tg->runnable_avg
13692 ± 10% +71.8% 23520 ± 4% sched_debug.cfs_rq[15]:/.exec_clock
12631 ± 21% +59.3% 20127 ± 17% sched_debug.cfs_rq[1]:/.avg->runnable_avg_sum
10 ± 27% +166.7% 28 ± 33% sched_debug.cfs_rq[1]:/.runnable_load_avg
275 ± 21% +59.2% 438 ± 17% sched_debug.cfs_rq[1]:/.tg_runnable_contrib
4548 ± 15% +46.2% 6651 ± 2% sched_debug.cfs_rq[1]:/.tg->runnable_avg
166428 ± 20% +136.5% 393659 ± 14% sched_debug.cfs_rq[1]:/.min_vruntime
14633 ± 11% +100.7% 29361 ± 34% sched_debug.cfs_rq[1]:/.exec_clock
4547 ± 15% +46.3% 6653 ± 2% sched_debug.cfs_rq[2]:/.tg->runnable_avg
170439 ± 20% +114.8% 366033 ± 3% sched_debug.cfs_rq[2]:/.min_vruntime
11846 ± 18% +57.9% 18702 ± 6% sched_debug.cfs_rq[2]:/.avg->runnable_avg_sum
15344 ± 14% +58.9% 24380 ± 2% sched_debug.cfs_rq[2]:/.exec_clock
258 ± 18% +57.9% 408 ± 6% sched_debug.cfs_rq[2]:/.tg_runnable_contrib
168070 ± 16% +114.7% 360822 ± 4% sched_debug.cfs_rq[3]:/.min_vruntime
11531 ± 18% +51.7% 17496 ± 1% sched_debug.cfs_rq[3]:/.avg->runnable_avg_sum
251 ± 18% +52.3% 383 ± 1% sched_debug.cfs_rq[3]:/.tg_runnable_contrib
388 ± 29% -68.0% 124 ± 25% sched_debug.cfs_rq[3]:/.tg_load_contrib
365 ± 30% -72.7% 99 ± 33% sched_debug.cfs_rq[3]:/.blocked_load_avg
4548 ± 15% +46.3% 6655 ± 2% sched_debug.cfs_rq[3]:/.tg->runnable_avg
14874 ± 7% +57.8% 23470 ± 4% sched_debug.cfs_rq[3]:/.exec_clock
247 ± 12% +55.3% 383 ± 3% sched_debug.cfs_rq[4]:/.tg_runnable_contrib
11274 ± 12% +56.0% 17584 ± 3% sched_debug.cfs_rq[4]:/.avg->runnable_avg_sum
4549 ± 15% +46.3% 6657 ± 2% sched_debug.cfs_rq[4]:/.tg->runnable_avg
14121 ± 9% +65.6% 23381 ± 4% sched_debug.cfs_rq[4]:/.exec_clock
160686 ± 17% +124.3% 360493 ± 4% sched_debug.cfs_rq[4]:/.min_vruntime
11730 ± 14% +73.2% 20318 ± 17% sched_debug.cfs_rq[5]:/.avg->runnable_avg_sum
4550 ± 15% +46.3% 6658 ± 2% sched_debug.cfs_rq[5]:/.tg->runnable_avg
161214 ± 21% +173.5% 440993 ± 37% sched_debug.cfs_rq[5]:/.min_vruntime
256 ± 14% +73.0% 443 ± 17% sched_debug.cfs_rq[5]:/.tg_runnable_contrib
4550 ± 15% +46.4% 6662 ± 2% sched_debug.cfs_rq[6]:/.tg->runnable_avg
14431 ± 13% +31.6% 18997 ± 9% sched_debug.cfs_rq[6]:/.avg->runnable_avg_sum
187106 ± 33% +97.8% 370045 ± 5% sched_debug.cfs_rq[6]:/.min_vruntime
314 ± 13% +32.0% 415 ± 9% sched_debug.cfs_rq[6]:/.tg_runnable_contrib
13 ± 35% +132.1% 30 ± 40% sched_debug.cfs_rq[7]:/.runnable_load_avg
161563 ± 22% +124.0% 361967 ± 3% sched_debug.cfs_rq[7]:/.min_vruntime
4552 ± 15% +46.4% 6663 ± 2% sched_debug.cfs_rq[7]:/.tg->runnable_avg
257 ± 16% +48.4% 381 ± 2% sched_debug.cfs_rq[7]:/.tg_runnable_contrib
3 ± 13% -53.8% 1 ± 33% sched_debug.cfs_rq[7]:/.nr_spread_over
14448 ± 18% +63.8% 23661 ± 2% sched_debug.cfs_rq[7]:/.exec_clock
11755 ± 16% +48.6% 17474 ± 2% sched_debug.cfs_rq[7]:/.avg->runnable_avg_sum
4554 ± 15% +46.3% 6665 ± 2% sched_debug.cfs_rq[8]:/.tg->runnable_avg
4557 ± 15% +46.3% 6667 ± 2% sched_debug.cfs_rq[9]:/.tg->runnable_avg
192062 ± 47% +83.6% 352649 ± 4% sched_debug.cfs_rq[9]:/.min_vruntime
14 ± 36% +58.9% 22 ± 10% sched_debug.cpu#0.cpu_load[3]
13 ± 37% +66.7% 22 ± 7% sched_debug.cpu#0.cpu_load[4]
13 ± 35% +61.1% 21 ± 11% sched_debug.cpu#0.cpu_load[2]
873975 ± 12% -75.4% 215052 ± 24% sched_debug.cpu#1.sched_count
16 ± 4% +84.4% 29 ± 39% sched_debug.cpu#1.cpu_load[1]
15 ± 17% +98.4% 30 ± 35% sched_debug.cpu#1.cpu_load[4]
873936 ± 12% -75.4% 214989 ± 24% sched_debug.cpu#1.nr_switches
436585 ± 12% -75.5% 106889 ± 24% sched_debug.cpu#1.sched_goidle
564 ± 33% +106.4% 1165 ± 7% sched_debug.cpu#1.curr->pid
15 ± 14% +95.2% 30 ± 35% sched_debug.cpu#1.cpu_load[3]
465895 ± 8% -77.2% 106210 ± 24% sched_debug.cpu#1.ttwu_count
15 ± 7% +91.9% 29 ± 37% sched_debug.cpu#1.cpu_load[2]
15 ± 29% +50.8% 23 ± 9% sched_debug.cpu#10.cpu_load[3]
315517 ± 11% -94.0% 18947 ± 11% sched_debug.cpu#10.sched_goidle
336250 ± 12% -94.0% 20238 ± 12% sched_debug.cpu#10.ttwu_count
631593 ± 11% -93.9% 38620 ± 11% sched_debug.cpu#10.sched_count
631561 ± 11% -93.9% 38572 ± 11% sched_debug.cpu#10.nr_switches
556 ± 20% +93.0% 1074 ± 17% sched_debug.cpu#10.curr->pid
14 ± 28% +57.6% 23 ± 8% sched_debug.cpu#10.cpu_load[4]
341057 ± 11% -94.1% 20223 ± 8% sched_debug.cpu#11.ttwu_count
623922 ± 9% -93.7% 39166 ± 9% sched_debug.cpu#11.sched_count
311611 ± 9% -93.9% 19080 ± 9% sched_debug.cpu#11.sched_goidle
623869 ± 9% -93.7% 39116 ± 9% sched_debug.cpu#11.nr_switches
338493 ± 10% -94.2% 19504 ± 7% sched_debug.cpu#12.sched_goidle
55 ± 30% -53.4% 26 ± 16% sched_debug.cpu#12.cpu_load[1]
71 ± 17% -59.4% 29 ± 28% sched_debug.cpu#12.cpu_load[0]
79 ± 23% -75.5% 19 ± 31% sched_debug.cpu#12.load
677539 ± 10% -94.1% 40155 ± 7% sched_debug.cpu#12.nr_switches
280203 ± 33% -92.6% 20836 ± 9% sched_debug.cpu#12.ttwu_count
677589 ± 10% -94.1% 40202 ± 7% sched_debug.cpu#12.sched_count
509 ± 22% +55.6% 793 ± 15% sched_debug.cpu#12.ttwu_local
19 ± 14% +163.6% 50 ± 32% sched_debug.cpu#13.cpu_load[1]
24 ± 16% +176.3% 67 ± 36% sched_debug.cpu#13.cpu_load[0]
17 ± 18% +138.2% 40 ± 24% sched_debug.cpu#13.cpu_load[2]
646767 ± 13% -94.2% 37618 ± 11% sched_debug.cpu#13.sched_count
709 ± 37% +125.3% 1598 ± 23% sched_debug.cpu#13.curr->pid
646735 ± 13% -94.2% 37563 ± 11% sched_debug.cpu#13.nr_switches
15 ± 19% +104.8% 31 ± 23% sched_debug.cpu#13.cpu_load[4]
16 ± 19% +117.2% 34 ± 21% sched_debug.cpu#13.cpu_load[3]
327419 ± 14% -93.9% 20012 ± 9% sched_debug.cpu#13.ttwu_count
323068 ± 13% -94.3% 18422 ± 11% sched_debug.cpu#13.sched_goidle
674567 ± 13% -93.6% 43222 ± 11% sched_debug.cpu#14.sched_count
750 ± 30% +90.8% 1432 ± 15% sched_debug.cpu#14.curr->pid
478 ± 22% +33.9% 640 ± 21% sched_debug.cpu#14.ttwu_local
339046 ± 13% -93.9% 20669 ± 7% sched_debug.cpu#14.ttwu_count
336995 ± 13% -93.7% 21107 ± 11% sched_debug.cpu#14.sched_goidle
674534 ± 13% -93.6% 43164 ± 11% sched_debug.cpu#14.nr_switches
19 ± 12% +45.6% 28 ± 18% sched_debug.cpu#15.cpu_load[1]
299069 ± 13% -93.4% 19783 ± 5% sched_debug.cpu#15.sched_goidle
813 ± 42% +65.5% 1346 ± 6% sched_debug.cpu#15.curr->pid
580735 ± 3% +11.3% 646611 ± 4% sched_debug.cpu#15.avg_idle
331682 ± 14% -93.7% 20957 ± 7% sched_debug.cpu#15.ttwu_count
599051 ± 13% -93.2% 41010 ± 5% sched_debug.cpu#15.sched_count
14 ± 15% +69.5% 25 ± 5% sched_debug.cpu#15.cpu_load[4]
15 ± 12% +71.7% 25 ± 7% sched_debug.cpu#15.cpu_load[3]
16 ± 10% +62.1% 26 ± 11% sched_debug.cpu#15.cpu_load[2]
817 ± 11% +61.3% 1318 ± 17% sched_debug.cpu#15.ttwu_local
599015 ± 13% -93.2% 40958 ± 5% sched_debug.cpu#15.nr_switches
1009949 ± 27% -63.0% 373861 ± 42% sched_debug.cpu#2.sched_count
15 ± 16% +54.0% 24 ± 14% sched_debug.cpu#2.cpu_load[1]
1009909 ± 27% -63.0% 373802 ± 42% sched_debug.cpu#2.nr_switches
14 ± 18% +61.0% 23 ± 10% sched_debug.cpu#2.cpu_load[2]
504627 ± 27% -63.1% 186448 ± 42% sched_debug.cpu#2.sched_goidle
14 ± 20% +63.8% 23 ± 8% sched_debug.cpu#2.cpu_load[3]
534870 ± 25% -64.9% 187727 ± 42% sched_debug.cpu#2.ttwu_count
14 ± 23% +66.7% 23 ± 6% sched_debug.cpu#2.cpu_load[4]
16 ± 24% +62.5% 26 ± 19% sched_debug.cpu#2.cpu_load[0]
462676 ± 24% -76.7% 107823 ± 47% sched_debug.cpu#3.sched_goidle
926031 ± 24% -76.6% 217084 ± 47% sched_debug.cpu#3.sched_count
494298 ± 23% -78.2% 107584 ± 46% sched_debug.cpu#3.ttwu_count
18 ± 6% +18.9% 22 ± 5% sched_debug.cpu#3.cpu_load[2]
925992 ± 24% -76.6% 217032 ± 47% sched_debug.cpu#3.nr_switches
16 ± 13% +37.9% 22 ± 3% sched_debug.cpu#3.cpu_load[3]
15 ± 21% +52.5% 23 ± 3% sched_debug.cpu#3.cpu_load[4]
33 ± 20% -36.8% 21 ± 17% sched_debug.cpu#3.cpu_load[0]
14 ± 18% +56.9% 22 ± 3% sched_debug.cpu#4.cpu_load[4]
15 ± 23% +41.3% 22 ± 4% sched_debug.cpu#4.cpu_load[3]
948 ± 27% +36.5% 1295 ± 11% sched_debug.cpu#4.curr->pid
855 ± 43% +67.1% 1429 ± 16% sched_debug.cpu#5.curr->pid
22 ± 9% +87.8% 42 ± 29% sched_debug.cpu#5.cpu_load[0]
15 ± 13% +114.5% 33 ± 43% sched_debug.cpu#5.cpu_load[3]
15 ± 15% +119.7% 33 ± 47% sched_debug.cpu#5.cpu_load[4]
18 ± 8% +100.0% 37 ± 32% sched_debug.cpu#5.cpu_load[1]
16 ± 10% +110.8% 34 ± 38% sched_debug.cpu#5.cpu_load[2]
415247 ± 12% -80.9% 79279 ± 20% sched_debug.cpu#6.sched_goidle
831074 ± 12% -80.8% 159814 ± 20% sched_debug.cpu#6.nr_switches
831113 ± 12% -80.8% 159869 ± 20% sched_debug.cpu#6.sched_count
811 ± 36% +50.0% 1216 ± 15% sched_debug.cpu#6.curr->pid
430138 ± 18% -82.1% 76868 ± 22% sched_debug.cpu#6.ttwu_count
860988 ± 14% -70.9% 250529 ± 33% sched_debug.cpu#7.nr_switches
430158 ± 14% -71.0% 124543 ± 34% sched_debug.cpu#7.sched_goidle
861025 ± 14% -70.9% 250578 ± 33% sched_debug.cpu#7.sched_count
658 ± 21% +123.8% 1473 ± 27% sched_debug.cpu#7.curr->pid
15 ± 19% +60.0% 24 ± 7% sched_debug.cpu#7.cpu_load[4]
15 ± 14% +62.9% 25 ± 13% sched_debug.cpu#7.cpu_load[2]
440642 ± 11% -71.9% 123684 ± 34% sched_debug.cpu#7.ttwu_count
16 ± 7% +58.2% 26 ± 27% sched_debug.cpu#7.cpu_load[1]
15 ± 16% +65.0% 24 ± 9% sched_debug.cpu#7.cpu_load[3]
823 ± 25% +61.0% 1325 ± 31% sched_debug.cpu#8.curr->pid
638635 ± 13% -93.8% 39544 ± 7% sched_debug.cpu#9.sched_count
638594 ± 13% -93.8% 39497 ± 7% sched_debug.cpu#9.nr_switches
319018 ± 13% -93.9% 19378 ± 7% sched_debug.cpu#9.sched_goidle
328979 ± 10% -93.9% 19948 ± 6% sched_debug.cpu#9.ttwu_count

lituya: Grantley Haswell
Memory: 16G




softirqs.SCHED

300000 ++-----------------------------------------------------------------+
| * |
250000 ++ : * |
| : :.* .*. .* : + .* |
*.*. : * + .* * : : *.*.* + |
200000 ++ * * : * * |
| :+ |
150000 ++ * |
| |
100000 ++ |
| |
| |
50000 O+O O OO O O O O O OO O O O O O O OO O O O O O OO O O O O O OO O O O
| |
0 ++-----------------------------------------------------------------+


cpuidle.C1-HSW.usage

3e+07 ++----------------------------------------------------------------+
| * |
2.5e+07 ++* * .* :+ |
| : :: * : : * |
|: : * : * + :: : |
2e+07 ++ : * : : : * * : |
|: :+ : : : : * |
1.5e+07 ++ * *.*.*.* :: |
* * |
1e+07 ++ |
| |
| |
5e+06 ++ |
O O O OO O O OO O O O O OO O O O O O O OO O O
0 ++-----------O-O----O---O---OO-O-O----------------OO-O-O----------+


unixbench.time.voluntary_context_switches

3.5e+07 ++----------------------------------------------------------------+
| |
3e+07 ++* * .* *. |
| : :* * : : * |
2.5e+07 ++ : * : : + : : : |
|: : * : : : * : : |
2e+07 ++ :+ : : : : * * |
| * *.*.*.* :: |
1.5e+07 *+ * |
| |
1e+07 ++ |
| |
5e+06 ++ |
O O O OO O O OO O O O OO O O O O O O OO O O
0 ++-----------O-O----O-O-O---OO-O-O----------------OO-O-O----------+


unixbench.time.involuntary_context_switches

30000 ++------------------------------------------------------------------+
| |
25000 ++O O |
| O O O O O O |
O O O O O O O O O O OO O O O O O O O O
20000 ++ O O O O O O O O O |
| |
15000 ++ |
| |
10000 *+ *.* *.*.*.* *. |
|: : + : : + *. * .* |
|: : * : .* * + + .* |
5000 ++* * * * |
| |
0 ++------------------------------------------------------------------+


time.voluntary_context_switches

3.5e+07 ++----------------------------------------------------------------+
| |
3e+07 ++* * .* *. |
| : :* * : : * |
2.5e+07 ++ : * : : + : : : |
|: : * : : : * : : |
2e+07 ++ :+ : : : : * * |
| * *.*.*.* :: |
1.5e+07 *+ * |
| |
1e+07 ++ |
| |
5e+06 ++ |
O O O OO O O OO O O O OO O O O O O O OO O O
0 ++-----------O-O----O-O-O---OO-O-O----------------OO-O-O----------+


time.involuntary_context_switches

30000 ++------------------------------------------------------------------+
| |
25000 ++O O |
| O O O O O O |
O O O O O O O O O O OO O O O O O O O O
20000 ++ O O O O O O O O O |
| |
15000 ++ |
| |
10000 *+ *.* *.*.*.* *. |
|: : + : : + *. * .* |
|: : * : .* * + + .* |
5000 ++* * * * |
| |
0 ++------------------------------------------------------------------+


vmstat.system.cs

250000 ++-----------------------------------------------------------------+
| |
| * *. .* *. |
200000 ++: : * * : : * |
|: : * : : + : : : |
|: : * : : : * :: : |
150000 ++ :+ : : :: * * |
| * *.*.*.* :: |
100000 *+ * |
| |
| |
50000 ++ |
| |
O O O OO O O O O O OO O O O O O O OO O O O O O OO O O O O O OO O O O
0 ++-----------------------------------------------------------------+

[*] bisect-good sample
[O] bisect-bad sample

To reproduce:

apt-get install ruby
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang

---
testcase: unixbench
default-monitors:
wait: pre-test
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 10
default_watchdogs:
watch-oom:
watchdog:
cpufreq_governor: performance
commit: 159e7763d517804c61a673736660a5a35f2ea5f8
model: Grantley Haswell
nr_cpu: 16
memory: 16G
hdd_partitions:
swap_partitions:
rootfs_partition:
unixbench:
test: fsdisk
testbox: lituya
tbox_group: lituya
kconfig: x86_64-rhel
enqueue_time: 2015-03-13 17:18:20.807960694 +08:00
head_commit: 159e7763d517804c61a673736660a5a35f2ea5f8
base_commit: 9eccca0843205f87c00404b663188b88eb248051
branch: next/master
kernel: "/kernel/x86_64-rhel/159e7763d517804c61a673736660a5a35f2ea5f8/vmlinuz-4.0.0-rc3-next-20150316"
user: lkp
queue: cyclic
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/lituya/unixbench/performance-fsdisk/debian-x86_64-2015-02-07.cgz/x86_64-rhel/159e7763d517804c61a673736660a5a35f2ea5f8/0"
job_file: "/lkp/scheduled/lituya/cyclic_unixbench-performance-fsdisk-x86_64-rhel-HEAD-159e7763d517804c61a673736660a5a35f2ea5f8-0-20150313-22163-1pkno0h.yaml"
dequeue_time: 2015-03-16 17:37:39.210757811 +08:00
max_uptime: 1211.5000000000002
modules_initrd: "/kernel/x86_64-rhel/159e7763d517804c61a673736660a5a35f2ea5f8/modules.cgz"
bm_initrd: "/lkp/benchmarks/turbostat.cgz,/lkp/benchmarks/unixbench-debian.cgz,/lkp/benchmarks/unixbench.cgz"
job_state: finished
loadavg: 8.28 4.83 1.96 1/211 5736
start_time: '1426498684'
end_time: '1426498982'
version: "/lkp/lkp/.src-20150316-152133"
echo performance > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu10/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu11/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu12/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu13/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu14/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu15/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu8/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu9/cpufreq/scaling_governor
./Run fsdisk
_______________________________________________
LKP mailing list
LKP@linux.intel.com
\
 
 \ /
  Last update: 2015-03-17 07:01    [W:0.035 / U:0.520 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site