mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
Merge branch 'suspend-to-idle'
* suspend-to-idle: cpuidle / sleep: Use broadcast timer for states that stop local timer cpuidle: Clean up fallback handling in cpuidle_idle_call() cpuidle / sleep: Do sanity checks in cpuidle_enter_freeze() too idle / sleep: Avoid excessive disabling and enabling interrupts
This commit is contained in:
commit
eef16e4362
3 changed files with 75 additions and 59 deletions
|
@ -44,6 +44,12 @@ void disable_cpuidle(void)
|
|||
off = 1;
|
||||
}
|
||||
|
||||
bool cpuidle_not_available(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev)
|
||||
{
|
||||
return off || !initialized || !drv || !dev || !dev->enabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_play_dead - cpu off-lining
|
||||
*
|
||||
|
@ -66,14 +72,8 @@ int cpuidle_play_dead(void)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_find_deepest_state - Find deepest state meeting specific conditions.
|
||||
* @drv: cpuidle driver for the given CPU.
|
||||
* @dev: cpuidle device for the given CPU.
|
||||
* @freeze: Whether or not the state should be suitable for suspend-to-idle.
|
||||
*/
|
||||
static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev, bool freeze)
|
||||
static int find_deepest_state(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev, bool freeze)
|
||||
{
|
||||
unsigned int latency_req = 0;
|
||||
int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
|
||||
|
@ -92,6 +92,17 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_find_deepest_state - Find the deepest available idle state.
|
||||
* @drv: cpuidle driver for the given CPU.
|
||||
* @dev: cpuidle device for the given CPU.
|
||||
*/
|
||||
int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev)
|
||||
{
|
||||
return find_deepest_state(drv, dev, false);
|
||||
}
|
||||
|
||||
static void enter_freeze_proper(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev, int index)
|
||||
{
|
||||
|
@ -113,15 +124,14 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
|
|||
|
||||
/**
|
||||
* cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
|
||||
* @drv: cpuidle driver for the given CPU.
|
||||
* @dev: cpuidle device for the given CPU.
|
||||
*
|
||||
* If there are states with the ->enter_freeze callback, find the deepest of
|
||||
* them and enter it with frozen tick. Otherwise, find the deepest state
|
||||
* available and enter it normally.
|
||||
* them and enter it with frozen tick.
|
||||
*/
|
||||
void cpuidle_enter_freeze(void)
|
||||
int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
{
|
||||
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
||||
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
|
||||
int index;
|
||||
|
||||
/*
|
||||
|
@ -129,24 +139,11 @@ void cpuidle_enter_freeze(void)
|
|||
* that interrupts won't be enabled when it exits and allows the tick to
|
||||
* be frozen safely.
|
||||
*/
|
||||
index = cpuidle_find_deepest_state(drv, dev, true);
|
||||
if (index >= 0) {
|
||||
enter_freeze_proper(drv, dev, index);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* It is not safe to freeze the tick, find the deepest state available
|
||||
* at all and try to enter it normally.
|
||||
*/
|
||||
index = cpuidle_find_deepest_state(drv, dev, false);
|
||||
index = find_deepest_state(drv, dev, true);
|
||||
if (index >= 0)
|
||||
cpuidle_enter(drv, dev, index);
|
||||
else
|
||||
arch_cpu_idle();
|
||||
enter_freeze_proper(drv, dev, index);
|
||||
|
||||
/* Interrupts are enabled again here. */
|
||||
local_irq_disable();
|
||||
return index;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -205,12 +202,6 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
|||
*/
|
||||
int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
{
|
||||
if (off || !initialized)
|
||||
return -ENODEV;
|
||||
|
||||
if (!drv || !dev || !dev->enabled)
|
||||
return -EBUSY;
|
||||
|
||||
return cpuidle_curr_governor->select(drv, dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -126,6 +126,8 @@ struct cpuidle_driver {
|
|||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
extern void disable_cpuidle(void);
|
||||
extern bool cpuidle_not_available(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev);
|
||||
|
||||
extern int cpuidle_select(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev);
|
||||
|
@ -150,11 +152,17 @@ extern void cpuidle_resume(void);
|
|||
extern int cpuidle_enable_device(struct cpuidle_device *dev);
|
||||
extern void cpuidle_disable_device(struct cpuidle_device *dev);
|
||||
extern int cpuidle_play_dead(void);
|
||||
extern void cpuidle_enter_freeze(void);
|
||||
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev);
|
||||
extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev);
|
||||
|
||||
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
|
||||
#else
|
||||
static inline void disable_cpuidle(void) { }
|
||||
static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev)
|
||||
{return true; }
|
||||
static inline int cpuidle_select(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev)
|
||||
{return -ENODEV; }
|
||||
|
@ -183,7 +191,12 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
|
|||
{return -ENODEV; }
|
||||
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
|
||||
static inline int cpuidle_play_dead(void) {return -ENODEV; }
|
||||
static inline void cpuidle_enter_freeze(void) { }
|
||||
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev)
|
||||
{return -ENODEV; }
|
||||
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev)
|
||||
{return -ENODEV; }
|
||||
static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
|
||||
struct cpuidle_device *dev) {return NULL; }
|
||||
#endif
|
||||
|
|
|
@ -82,6 +82,7 @@ static void cpuidle_idle_call(void)
|
|||
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
|
||||
int next_state, entered_state;
|
||||
unsigned int broadcast;
|
||||
bool reflect;
|
||||
|
||||
/*
|
||||
* Check if the idle task must be rescheduled. If it is the
|
||||
|
@ -105,6 +106,9 @@ static void cpuidle_idle_call(void)
|
|||
*/
|
||||
rcu_idle_enter();
|
||||
|
||||
if (cpuidle_not_available(drv, dev))
|
||||
goto use_default;
|
||||
|
||||
/*
|
||||
* Suspend-to-idle ("freeze") is a system state in which all user space
|
||||
* has been frozen, all I/O devices have been suspended and the only
|
||||
|
@ -115,30 +119,24 @@ static void cpuidle_idle_call(void)
|
|||
* until a proper wakeup interrupt happens.
|
||||
*/
|
||||
if (idle_should_freeze()) {
|
||||
cpuidle_enter_freeze();
|
||||
local_irq_enable();
|
||||
goto exit_idle;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ask the cpuidle framework to choose a convenient idle state.
|
||||
* Fall back to the default arch idle method on errors.
|
||||
*/
|
||||
next_state = cpuidle_select(drv, dev);
|
||||
if (next_state < 0) {
|
||||
use_default:
|
||||
/*
|
||||
* We can't use the cpuidle framework, let's use the default
|
||||
* idle routine.
|
||||
*/
|
||||
if (current_clr_polling_and_test())
|
||||
entered_state = cpuidle_enter_freeze(drv, dev);
|
||||
if (entered_state >= 0) {
|
||||
local_irq_enable();
|
||||
else
|
||||
arch_cpu_idle();
|
||||
goto exit_idle;
|
||||
}
|
||||
|
||||
goto exit_idle;
|
||||
reflect = false;
|
||||
next_state = cpuidle_find_deepest_state(drv, dev);
|
||||
} else {
|
||||
reflect = true;
|
||||
/*
|
||||
* Ask the cpuidle framework to choose a convenient idle state.
|
||||
*/
|
||||
next_state = cpuidle_select(drv, dev);
|
||||
}
|
||||
|
||||
/* Fall back to the default arch idle method on errors. */
|
||||
if (next_state < 0)
|
||||
goto use_default;
|
||||
|
||||
/*
|
||||
* The idle task must be scheduled, it is pointless to
|
||||
|
@ -183,7 +181,8 @@ static void cpuidle_idle_call(void)
|
|||
/*
|
||||
* Give the governor an opportunity to reflect on the outcome
|
||||
*/
|
||||
cpuidle_reflect(dev, entered_state);
|
||||
if (reflect)
|
||||
cpuidle_reflect(dev, entered_state);
|
||||
|
||||
exit_idle:
|
||||
__current_set_polling();
|
||||
|
@ -196,6 +195,19 @@ static void cpuidle_idle_call(void)
|
|||
|
||||
rcu_idle_exit();
|
||||
start_critical_timings();
|
||||
return;
|
||||
|
||||
use_default:
|
||||
/*
|
||||
* We can't use the cpuidle framework, let's use the default
|
||||
* idle routine.
|
||||
*/
|
||||
if (current_clr_polling_and_test())
|
||||
local_irq_enable();
|
||||
else
|
||||
arch_cpu_idle();
|
||||
|
||||
goto exit_idle;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue