lkml.org 
[lkml]   [2018]   [Nov]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] sched/fair: make some function static
Date
Make some function static as they are not used outside of fair.c.

This fixes the following warning when building with 'W=1':

kernel/sched/fair.c:2439:6: warning: no previous prototype for ‘task_numa_work’ [-Wmissing-prototypes]
kernel/sched/fair.c:2584:6: warning: no previous prototype for ‘task_tick_numa’ [-Wmissing-prototypes]
kernel/sched/fair.c:3548:6: warning: no previous prototype for ‘sync_entity_load_avg’ [-Wmissing-prototypes]
kernel/sched/fair.c:3561:6: warning: no previous prototype for ‘remove_entity_load_avg’ [-Wmissing-prototypes]

Signed-off-by: Yi Wang <wang.yi59@zte.com.cn>
---
kernel/sched/fair.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ee271bb..615e168 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2436,7 +2436,7 @@ static void reset_ptenuma_scan(struct task_struct *p)
* The expensive part of numa migration is done from task_work context.
* Triggered from task_tick_numa().
*/
-void task_numa_work(struct callback_head *work)
+static void task_numa_work(struct callback_head *work)
{
unsigned long migrate, next_scan, now = jiffies;
struct task_struct *p = current;
@@ -2581,7 +2581,7 @@ void task_numa_work(struct callback_head *work)
/*
* Drive the periodic memory faults..
*/
-void task_tick_numa(struct rq *rq, struct task_struct *curr)
+static void task_tick_numa(struct rq *rq, struct task_struct *curr)
{
struct callback_head *work = &curr->numa_work;
u64 period, now;
@@ -3545,7 +3545,7 @@ static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
* Synchronize entity load avg of dequeued entity without locking
* the previous rq.
*/
-void sync_entity_load_avg(struct sched_entity *se)
+static void sync_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 last_update_time;
@@ -3558,7 +3558,7 @@ void sync_entity_load_avg(struct sched_entity *se)
* Task first catches up with cfs_rq, and then subtract
* itself from the cfs_rq (task must be off the queue now).
*/
-void remove_entity_load_avg(struct sched_entity *se)
+static void remove_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
unsigned long flags;
--
1.8.3.1
\
 
 \ /
  Last update: 2018-11-08 06:53    [W:0.033 / U:1.336 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site