Skip to content

Commit

Permalink
sched: allow architectures to specify sched_clock_stable
Browse files Browse the repository at this point in the history
Allow CONFIG_HAVE_UNSTABLE_SCHED_CLOCK architectures to still specify
that their sched_clock() implementation is reliable.

This will be used by x86 to switch on a faster sched_clock_cpu()
implementation on certain CPU types.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Ingo Molnar committed Feb 26, 2009
1 parent 694593e commit b342501
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 25 deletions.
10 changes: 10 additions & 0 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1670,6 +1670,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
return set_cpus_allowed_ptr(p, &new_mask);
}

/*
* Architectures can set this to 1 if they have specified
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
* but then during bootup it turns out that sched_clock()
* is reliable after all:
*/
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
extern int sched_clock_stable;
#endif

extern unsigned long long sched_clock(void);

extern void sched_clock_init(void);
Expand Down
45 changes: 20 additions & 25 deletions kernel/sched_clock.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,11 @@
* The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
* consistent between cpus (never more than 2 jiffies difference).
*/
#include <linux/sched.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/ktime.h>
#include <linux/sched.h>

/*
* Scheduler clock - returns current time in nanosec units.
Expand All @@ -43,6 +43,10 @@ unsigned long long __attribute__((weak)) sched_clock(void)
static __read_mostly int sched_clock_running;

#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
__read_mostly int sched_clock_stable;
#else
static const int sched_clock_stable = 1;
#endif

struct sched_clock_data {
/*
Expand Down Expand Up @@ -87,7 +91,7 @@ void sched_clock_init(void)
}

/*
* min,max except they take wrapping into account
* min, max except they take wrapping into account
*/

static inline u64 wrap_min(u64 x, u64 y)
Expand Down Expand Up @@ -116,10 +120,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
if (unlikely(delta < 0))
delta = 0;

if (unlikely(!sched_clock_running))
return 0ull;

/*
* scd->clock = clamp(scd->tick_gtod + delta,
* max(scd->tick_gtod, scd->clock),
* scd->tick_gtod + TICK_NSEC);
* max(scd->tick_gtod, scd->clock),
* scd->tick_gtod + TICK_NSEC);
*/

clock = scd->tick_gtod + delta;
Expand Down Expand Up @@ -148,12 +155,13 @@ static void lock_double_clock(struct sched_clock_data *data1,

u64 sched_clock_cpu(int cpu)
{
struct sched_clock_data *scd = cpu_sdc(cpu);
u64 now, clock, this_clock, remote_clock;
struct sched_clock_data *scd;

if (unlikely(!sched_clock_running))
return 0ull;
if (sched_clock_stable)
return sched_clock();

scd = cpu_sdc(cpu);
WARN_ON_ONCE(!irqs_disabled());
now = sched_clock();

Expand Down Expand Up @@ -193,6 +201,8 @@ u64 sched_clock_cpu(int cpu)
return clock;
}

#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK

void sched_clock_tick(void)
{
struct sched_clock_data *scd = this_scd();
Expand Down Expand Up @@ -235,22 +245,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
}
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);

#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */

void sched_clock_init(void)
{
sched_clock_running = 1;
}

u64 sched_clock_cpu(int cpu)
{
if (unlikely(!sched_clock_running))
return 0;

return sched_clock();
}

#endif
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */

unsigned long long cpu_clock(int cpu)
{
Expand Down

0 comments on commit b342501

Please sign in to comment.