Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

327
kernel/power/Kconfig Normal file
View file

@ -0,0 +1,327 @@
config SUSPEND
bool "Suspend to RAM and standby"
depends on ARCH_SUSPEND_POSSIBLE
default y
---help---
Allow the system to enter sleep states in which main memory is
powered and thus its contents are preserved, such as the
suspend-to-RAM state (e.g. the ACPI S3 state).
config SUSPEND_FREEZER
bool "Enable freezer for suspend to RAM/standby" \
if ARCH_WANTS_FREEZER_CONTROL || BROKEN
depends on SUSPEND
default y
help
This allows you to turn off the freezer for suspend. If this is
done, no tasks are frozen for suspend to RAM/standby.
Turning OFF this setting is NOT recommended! If in doubt, say Y.
config WAKELOCK
bool "Android's method of preventing suspend"
default y
---help---
This allows applications to prevent the CPU from suspending while
they need it.
Say Y if you are running an android userspace.
config HIBERNATE_CALLBACKS
bool
config HIBERNATION
bool "Hibernation (aka 'suspend to disk')"
depends on SWAP && ARCH_HIBERNATION_POSSIBLE
select HIBERNATE_CALLBACKS
select LZO_COMPRESS
select LZO_DECOMPRESS
select CRC32
---help---
Enable the suspend to disk (STD) functionality, which is usually
called "hibernation" in user interfaces. STD checkpoints the
system and powers it off; and restores that checkpoint on reboot.
You can suspend your machine with 'echo disk > /sys/power/state'
after placing resume=/dev/swappartition on the kernel command line
in your bootloader's configuration file.
Alternatively, you can use the additional userland tools available
from <http://suspend.sf.net>.
In principle it does not require ACPI or APM, although for example
ACPI will be used for the final steps when it is available. One
of the reasons to use software suspend is that the firmware hooks
for suspend states like suspend-to-RAM (STR) often don't work very
well with Linux.
It creates an image which is saved in your active swap. Upon the next
boot, pass the 'resume=/dev/swappartition' argument to the kernel to
have it detect the saved image, restore memory state from it, and
continue to run as before. If you do not want the previous state to
be reloaded, then use the 'noresume' kernel command line argument.
Note, however, that fsck will be run on your filesystems and you will
need to run mkswap against the swap partition used for the suspend.
It also works with swap files to a limited extent (for details see
<file:Documentation/power/swsusp-and-swap-files.txt>).
Right now you may boot without resuming and resume later but in the
meantime you cannot use the swap partition(s)/file(s) involved in
suspending. Also in this case you must not use the filesystems
that were mounted before the suspend. In particular, you MUST NOT
MOUNT any journaled filesystems mounted before the suspend or they
will get corrupted in a nasty way.
For more information take a look at <file:Documentation/power/swsusp.txt>.
config ARCH_SAVE_PAGE_KEYS
bool
config PM_STD_PARTITION
string "Default resume partition"
depends on HIBERNATION
default ""
---help---
The default resume partition is the partition that the suspend-
to-disk implementation will look for a suspended disk image.
The partition specified here will be different for almost every user.
It should be a valid swap partition (at least for now) that is turned
on before suspending.
The partition specified can be overridden by specifying:
resume=/dev/<other device>
which will set the resume partition to the device specified.
Note there is currently not a way to specify which device to save the
suspended image to. It will simply pick the first available swap
device.
config PM_SLEEP
def_bool y
depends on SUSPEND || HIBERNATE_CALLBACKS
config PM_SLEEP_SMP
def_bool y
depends on SMP
depends on ARCH_SUSPEND_POSSIBLE || ARCH_HIBERNATION_POSSIBLE
depends on PM_SLEEP
select HOTPLUG_CPU
config PM_AUTOSLEEP
bool "Opportunistic sleep"
depends on PM_SLEEP
default n
---help---
Allow the kernel to trigger a system transition into a global sleep
state automatically whenever there are no active wakeup sources.
config PM_WAKELOCKS
bool "User space wakeup sources interface"
depends on PM_SLEEP
default n
---help---
Allow user space to create, activate and deactivate wakeup source
objects with the help of a sysfs-based interface.
config PM_WAKELOCKS_LIMIT
int "Maximum number of user space wakeup sources (0 = no limit)"
range 0 100000
default 100
depends on PM_WAKELOCKS
config PM_WAKELOCKS_GC
bool "Garbage collector for user space wakeup sources"
depends on PM_WAKELOCKS
default y
config PM_RUNTIME
bool "Run-time PM core functionality"
depends on !IA64_HP_SIM
---help---
Enable functionality allowing I/O devices to be put into energy-saving
(low power) states at run time (or autosuspended) after a specified
period of inactivity and woken up in response to a hardware-generated
wake-up event or a driver's request.
Hardware support is generally required for this functionality to work
and the bus type drivers of the buses the devices are on are
responsible for the actual handling of the autosuspend requests and
wake-up events.
config PM
def_bool y
depends on PM_SLEEP || PM_RUNTIME
config PM_DEBUG
bool "Power Management Debug Support"
depends on PM
---help---
This option enables various debugging support in the Power Management
code. This is helpful when debugging and reporting PM bugs, like
suspend support.
config PM_ADVANCED_DEBUG
bool "Extra PM attributes in sysfs for low-level debugging/testing"
depends on PM_DEBUG
---help---
Add extra sysfs attributes allowing one to access some Power Management
fields of device objects from user space. If you are not a kernel
developer interested in debugging/testing Power Management, say "no".
config PM_TEST_SUSPEND
bool "Test suspend/resume and wakealarm during bootup"
depends on SUSPEND && PM_DEBUG && RTC_CLASS=y
---help---
This option will let you suspend your machine during bootup, and
make it wake up a few seconds later using an RTC wakeup alarm.
Enable this with a kernel parameter like "test_suspend=mem".
You probably want to have your system's RTC driver statically
linked, ensuring that it's available when this test runs.
config PM_SLEEP_DEBUG
def_bool y
depends on PM_DEBUG && PM_SLEEP
config DPM_WATCHDOG
bool "Device suspend/resume watchdog"
depends on PM_DEBUG && PSTORE
---help---
Sets up a watchdog timer to capture drivers that are
locked up attempting to suspend/resume a device.
A detected lockup causes system panic with message
captured in pstore device for inspection in subsequent
boot session.
config DPM_WATCHDOG_TIMEOUT
int "Watchdog timeout in seconds"
range 1 120
default 12
depends on DPM_WATCHDOG
config PM_TRACE
bool
help
This enables code to save the last PM event point across
reboot. The architecture needs to support this, x86 for
example does by saving things in the RTC, see below.
The architecture specific code must provide the extern
functions from <linux/resume-trace.h> as well as the
<asm/resume-trace.h> header with a TRACE_RESUME() macro.
The way the information is presented is architecture-
dependent, x86 will print the information during a
late_initcall.
config PM_TRACE_RTC
bool "Suspend/resume event tracing"
depends on PM_SLEEP_DEBUG
depends on X86
select PM_TRACE
---help---
This enables some cheesy code to save the last PM event point in the
RTC across reboots, so that you can debug a machine that just hangs
during suspend (or more commonly, during resume).
To use this debugging feature you should attempt to suspend the
machine, reboot it and then run
dmesg -s 1000000 | grep 'hash matches'
CAUTION: this option will cause your machine's real-time clock to be
set to an invalid time after a resume.
config APM_EMULATION
tristate "Advanced Power Management Emulation"
depends on PM && SYS_SUPPORTS_APM_EMULATION
help
APM is a BIOS specification for saving power using several different
techniques. This is mostly useful for battery powered laptops with
APM compliant BIOSes. If you say Y here, the system time will be
reset after a RESUME operation, the /proc/apm device will provide
battery status information, and user-space programs will receive
notification of APM "events" (e.g. battery status change).
In order to use APM, you will need supporting software. For location
and more information, read <file:Documentation/power/apm-acpi.txt>
and the Battery Powered Linux mini-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
This driver does not spin down disk drives (see the hdparm(8)
manpage ("man 8 hdparm") for that), and it doesn't turn off
VESA-compliant "green" monitors.
Generally, if you don't have a battery in your machine, there isn't
much point in using this driver and you should say N. If you get
random kernel OOPSes or reboots that don't seem to be related to
anything, try disabling/enabling this option (or disabling/enabling
APM in your BIOS).
config PM_OPP
bool
---help---
SOCs have a standard set of tuples consisting of frequency and
voltage pairs that the device will support per voltage domain. This
is called Operating Performance Point or OPP. The actual definitions
of OPP varies over silicon within the same family of devices.
OPP layer organizes the data internally using device pointers
representing individual voltage domains and provides SOC
implementations a ready to use framework to manage OPPs.
For more information, read <file:Documentation/power/opp.txt>
config PM_CLK
def_bool y
depends on PM && HAVE_CLK
config PM_GENERIC_DOMAINS
bool
depends on PM
config WQ_POWER_EFFICIENT_DEFAULT
bool "Enable workqueue power-efficient mode by default"
depends on PM
default n
help
Per-cpu workqueues are generally preferred because they show
better performance thanks to cache locality; unfortunately,
per-cpu workqueues tend to be more power hungry than unbound
workqueues.
Enabling workqueue.power_efficient kernel parameter makes the
per-cpu workqueues which were observed to contribute
significantly to power consumption unbound, leading to measurably
lower power usage at the cost of small performance overhead.
This config option determines whether workqueue.power_efficient
is enabled by default.
If in doubt, say N.
config PM_GENERIC_DOMAINS_SLEEP
def_bool y
depends on PM_SLEEP && PM_GENERIC_DOMAINS
config PM_GENERIC_DOMAINS_RUNTIME
def_bool y
depends on PM_RUNTIME && PM_GENERIC_DOMAINS
config PM_GENERIC_DOMAINS_OF
def_bool y
depends on PM_GENERIC_DOMAINS && OF
config CPU_PM
bool
depends on SUSPEND || CPU_IDLE
config SUSPEND_TIME
bool "Log time spent in suspend"
---help---
Prints the time spent in suspend in the kernel log, and
keeps statistics on the time spent in suspend in
/sys/kernel/debug/suspend_time

18
kernel/power/Makefile Normal file
View file

@ -0,0 +1,18 @@
ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG
obj-y += qos.o
obj-$(CONFIG_PM) += main.o
obj-$(CONFIG_VT_CONSOLE_SLEEP) += console.o
obj-$(CONFIG_FREEZER) += process.o
obj-$(CONFIG_SUSPEND) += suspend.o
obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \
block_io.o
obj-$(CONFIG_PM_AUTOSLEEP) += autosleep.o
obj-$(CONFIG_PM_WAKELOCKS) += wakelock.o
obj-$(CONFIG_SUSPEND_TIME) += suspend_time.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
obj-$(CONFIG_SUSPEND) += wakeup_reason.o

128
kernel/power/autosleep.c Normal file
View file

@ -0,0 +1,128 @@
/*
* kernel/power/autosleep.c
*
* Opportunistic sleep support.
*
* Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl>
*/
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/pm_wakeup.h>
#include "power.h"
static suspend_state_t autosleep_state;
static struct workqueue_struct *autosleep_wq;
/*
* Note: it is only safe to mutex_lock(&autosleep_lock) if a wakeup_source
* is active, otherwise a deadlock with try_to_suspend() is possible.
* Alternatively mutex_lock_interruptible() can be used. This will then fail
* if an auto_sleep cycle tries to freeze processes.
*/
static DEFINE_MUTEX(autosleep_lock);
static struct wakeup_source *autosleep_ws;
static void try_to_suspend(struct work_struct *work)
{
unsigned int initial_count, final_count;
if (!pm_get_wakeup_count(&initial_count, true))
goto out;
mutex_lock(&autosleep_lock);
if (!pm_save_wakeup_count(initial_count) ||
system_state != SYSTEM_RUNNING) {
mutex_unlock(&autosleep_lock);
goto out;
}
if (autosleep_state == PM_SUSPEND_ON) {
mutex_unlock(&autosleep_lock);
return;
}
if (autosleep_state >= PM_SUSPEND_MAX)
hibernate();
else
pm_suspend(autosleep_state);
mutex_unlock(&autosleep_lock);
if (!pm_get_wakeup_count(&final_count, false))
goto out;
/*
* If the wakeup occured for an unknown reason, wait to prevent the
* system from trying to suspend and waking up in a tight loop.
*/
if (final_count == initial_count)
schedule_timeout_uninterruptible(HZ / 2);
out:
queue_up_suspend_work();
}
static DECLARE_WORK(suspend_work, try_to_suspend);
void queue_up_suspend_work(void)
{
if (autosleep_state > PM_SUSPEND_ON)
queue_work(autosleep_wq, &suspend_work);
}
suspend_state_t pm_autosleep_state(void)
{
return autosleep_state;
}
int pm_autosleep_lock(void)
{
return mutex_lock_interruptible(&autosleep_lock);
}
void pm_autosleep_unlock(void)
{
mutex_unlock(&autosleep_lock);
}
int pm_autosleep_set_state(suspend_state_t state)
{
#ifndef CONFIG_HIBERNATION
if (state >= PM_SUSPEND_MAX)
return -EINVAL;
#endif
__pm_stay_awake(autosleep_ws);
mutex_lock(&autosleep_lock);
autosleep_state = state;
__pm_relax(autosleep_ws);
if (state > PM_SUSPEND_ON) {
pm_wakep_autosleep_enabled(true);
queue_up_suspend_work();
} else {
pm_wakep_autosleep_enabled(false);
}
mutex_unlock(&autosleep_lock);
return 0;
}
int __init pm_autosleep_init(void)
{
autosleep_ws = wakeup_source_register("autosleep");
if (!autosleep_ws)
return -ENOMEM;
autosleep_wq = alloc_ordered_workqueue("autosleep", 0);
if (autosleep_wq)
return 0;
wakeup_source_unregister(autosleep_ws);
return -ENOMEM;
}

103
kernel/power/block_io.c Normal file
View file

@ -0,0 +1,103 @@
/*
* This file provides functions for block I/O operations on swap/file.
*
* Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
*
* This file is released under the GPLv2.
*/
#include <linux/bio.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include "power.h"
/**
* submit - submit BIO request.
* @rw: READ or WRITE.
* @off physical offset of page.
* @page: page we're reading or writing.
* @bio_chain: list of pending biod (for async reading)
*
* Straight from the textbook - allocate and initialize the bio.
* If we're reading, make sure the page is marked as dirty.
* Then submit it and, if @bio_chain == NULL, wait.
*/
static int submit(int rw, struct block_device *bdev, sector_t sector,
struct page *page, struct bio **bio_chain)
{
const int bio_rw = rw | REQ_SYNC;
struct bio *bio;
bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_end_io = end_swap_bio_read;
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
(unsigned long long)sector);
bio_put(bio);
return -EFAULT;
}
lock_page(page);
bio_get(bio);
if (bio_chain == NULL) {
submit_bio(bio_rw, bio);
wait_on_page_locked(page);
if (rw == READ)
bio_set_pages_dirty(bio);
bio_put(bio);
} else {
if (rw == READ)
get_page(page); /* These pages are freed later */
bio->bi_private = *bio_chain;
*bio_chain = bio;
submit_bio(bio_rw, bio);
}
return 0;
}
int hib_bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
{
return submit(READ, hib_resume_bdev, page_off * (PAGE_SIZE >> 9),
virt_to_page(addr), bio_chain);
}
int hib_bio_write_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
{
return submit(WRITE, hib_resume_bdev, page_off * (PAGE_SIZE >> 9),
virt_to_page(addr), bio_chain);
}
int hib_wait_on_bio_chain(struct bio **bio_chain)
{
struct bio *bio;
struct bio *next_bio;
int ret = 0;
if (bio_chain == NULL)
return 0;
bio = *bio_chain;
if (bio == NULL)
return 0;
while (bio) {
struct page *page;
next_bio = bio->bi_private;
page = bio->bi_io_vec[0].bv_page;
wait_on_page_locked(page);
if (!PageUptodate(page) || PageError(page))
ret = -EIO;
put_page(page);
bio_put(bio);
bio = next_bio;
}
*bio_chain = NULL;
return ret;
}

151
kernel/power/console.c Normal file
View file

@ -0,0 +1,151 @@
/*
* Functions for saving/restoring console.
*
* Originally from swsusp.
*/
#include <linux/console.h>
#include <linux/vt_kern.h>
#include <linux/kbd_kern.h>
#include <linux/vt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "power.h"
#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
static int orig_fgconsole, orig_kmsg;
static DEFINE_MUTEX(vt_switch_mutex);
struct pm_vt_switch {
struct list_head head;
struct device *dev;
bool required;
};
static LIST_HEAD(pm_vt_switch_list);
/**
* pm_vt_switch_required - indicate VT switch at suspend requirements
* @dev: device
* @required: if true, caller needs VT switch at suspend/resume time
*
* The different console drivers may or may not require VT switches across
* suspend/resume, depending on how they handle restoring video state and
* what may be running.
*
* Drivers can indicate support for switchless suspend/resume, which can
* save time and flicker, by using this routine and passing 'false' as
* the argument. If any loaded driver needs VT switching, or the
* no_console_suspend argument has been passed on the command line, VT
* switches will occur.
*/
void pm_vt_switch_required(struct device *dev, bool required)
{
struct pm_vt_switch *entry, *tmp;
mutex_lock(&vt_switch_mutex);
list_for_each_entry(tmp, &pm_vt_switch_list, head) {
if (tmp->dev == dev) {
/* already registered, update requirement */
tmp->required = required;
goto out;
}
}
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
goto out;
entry->required = required;
entry->dev = dev;
list_add(&entry->head, &pm_vt_switch_list);
out:
mutex_unlock(&vt_switch_mutex);
}
EXPORT_SYMBOL(pm_vt_switch_required);
/**
* pm_vt_switch_unregister - stop tracking a device's VT switching needs
* @dev: device
*
* Remove @dev from the vt switch list.
*/
void pm_vt_switch_unregister(struct device *dev)
{
struct pm_vt_switch *tmp;
mutex_lock(&vt_switch_mutex);
list_for_each_entry(tmp, &pm_vt_switch_list, head) {
if (tmp->dev == dev) {
list_del(&tmp->head);
kfree(tmp);
break;
}
}
mutex_unlock(&vt_switch_mutex);
}
EXPORT_SYMBOL(pm_vt_switch_unregister);
/*
* There are three cases when a VT switch on suspend/resume are required:
* 1) no driver has indicated a requirement one way or another, so preserve
* the old behavior
* 2) console suspend is disabled, we want to see debug messages across
* suspend/resume
* 3) any registered driver indicates it needs a VT switch
*
* If none of these conditions is present, meaning we have at least one driver
* that doesn't need the switch, and none that do, we can avoid it to make
* resume look a little prettier (and suspend too, but that's usually hidden,
* e.g. when closing the lid on a laptop).
*/
static bool pm_vt_switch(void)
{
struct pm_vt_switch *entry;
bool ret = true;
mutex_lock(&vt_switch_mutex);
if (list_empty(&pm_vt_switch_list))
goto out;
if (!console_suspend_enabled)
goto out;
list_for_each_entry(entry, &pm_vt_switch_list, head) {
if (entry->required)
goto out;
}
ret = false;
out:
mutex_unlock(&vt_switch_mutex);
return ret;
}
int pm_prepare_console(void)
{
if (!pm_vt_switch())
return 0;
orig_fgconsole = vt_move_to_console(SUSPEND_CONSOLE, 1);
if (orig_fgconsole < 0)
return 1;
orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE);
return 0;
}
void pm_restore_console(void)
{
if (!pm_vt_switch())
return;
if (orig_fgconsole >= 0) {
vt_move_to_console(orig_fgconsole, 0);
vt_kmsg_redirect(orig_kmsg);
}
}

1168
kernel/power/hibernate.c Normal file

File diff suppressed because it is too large Load diff

675
kernel/power/main.c Normal file
View file

@ -0,0 +1,675 @@
/*
* kernel/power/main.c - PM subsystem core functionality.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
*
* This file is released under the GPLv2
*
*/
#include <linux/export.h>
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/resume-trace.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include "power.h"
DEFINE_MUTEX(pm_mutex);
#ifdef CONFIG_PM_SLEEP
/* Routines for PM-transition notifications */
static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
int register_pm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&pm_chain_head, nb);
}
EXPORT_SYMBOL_GPL(register_pm_notifier);
int unregister_pm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&pm_chain_head, nb);
}
EXPORT_SYMBOL_GPL(unregister_pm_notifier);
int pm_notifier_call_chain(unsigned long val)
{
int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
return notifier_to_errno(ret);
}
/* If set, devices may be suspended and resumed asynchronously. */
int pm_async_enabled = 0;
static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", pm_async_enabled);
}
static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val > 1)
return -EINVAL;
pm_async_enabled = 0;
return n;
}
power_attr(pm_async);
#ifdef CONFIG_PM_DEBUG
int pm_test_level = TEST_NONE;
static const char * const pm_tests[__TEST_AFTER_LAST] = {
[TEST_NONE] = "none",
[TEST_CORE] = "core",
[TEST_CPUS] = "processors",
[TEST_PLATFORM] = "platform",
[TEST_DEVICES] = "devices",
[TEST_FREEZER] = "freezer",
};
static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *s = buf;
int level;
for (level = TEST_FIRST; level <= TEST_MAX; level++)
if (pm_tests[level]) {
if (level == pm_test_level)
s += sprintf(s, "[%s] ", pm_tests[level]);
else
s += sprintf(s, "%s ", pm_tests[level]);
}
if (s != buf)
/* convert the last space to a newline */
*(s-1) = '\n';
return (s - buf);
}
static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
const char * const *s;
int level;
char *p;
int len;
int error = -EINVAL;
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
lock_system_sleep();
level = TEST_FIRST;
for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
pm_test_level = level;
error = 0;
break;
}
unlock_system_sleep();
return error ? error : n;
}
power_attr(pm_test);
#endif /* CONFIG_PM_DEBUG */
#ifdef CONFIG_DEBUG_FS
static char *suspend_step_name(enum suspend_stat_step step)
{
switch (step) {
case SUSPEND_FREEZE:
return "freeze";
case SUSPEND_PREPARE:
return "prepare";
case SUSPEND_SUSPEND:
return "suspend";
case SUSPEND_SUSPEND_NOIRQ:
return "suspend_noirq";
case SUSPEND_RESUME_NOIRQ:
return "resume_noirq";
case SUSPEND_RESUME:
return "resume";
default:
return "";
}
}
static int suspend_stats_show(struct seq_file *s, void *unused)
{
int i, index, last_dev, last_errno, last_step;
last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
last_dev %= REC_FAILED_NUM;
last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
last_errno %= REC_FAILED_NUM;
last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
last_step %= REC_FAILED_NUM;
seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
"%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
"success", suspend_stats.success,
"fail", suspend_stats.fail,
"failed_freeze", suspend_stats.failed_freeze,
"failed_prepare", suspend_stats.failed_prepare,
"failed_suspend", suspend_stats.failed_suspend,
"failed_suspend_late",
suspend_stats.failed_suspend_late,
"failed_suspend_noirq",
suspend_stats.failed_suspend_noirq,
"failed_resume", suspend_stats.failed_resume,
"failed_resume_early",
suspend_stats.failed_resume_early,
"failed_resume_noirq",
suspend_stats.failed_resume_noirq);
seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
suspend_stats.failed_devs[last_dev]);
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_dev + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-s\n",
suspend_stats.failed_devs[index]);
}
seq_printf(s, " last_failed_errno:\t%-d\n",
suspend_stats.errno[last_errno]);
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_errno + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-d\n",
suspend_stats.errno[index]);
}
seq_printf(s, " last_failed_step:\t%-s\n",
suspend_step_name(
suspend_stats.failed_steps[last_step]));
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_step + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-s\n",
suspend_step_name(
suspend_stats.failed_steps[index]));
}
return 0;
}
static int suspend_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, suspend_stats_show, NULL);
}
static const struct file_operations suspend_stats_operations = {
.open = suspend_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init pm_debugfs_init(void)
{
debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
NULL, NULL, &suspend_stats_operations);
return 0;
}
late_initcall(pm_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_SLEEP_DEBUG
/*
* pm_print_times: print time taken by devices to suspend and resume.
*
* show() returns whether printing of suspend and resume times is enabled.
* store() accepts 0 or 1. 0 disables printing and 1 enables it.
*/
bool pm_print_times_enabled;
static ssize_t pm_print_times_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", pm_print_times_enabled);
}
static ssize_t pm_print_times_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val > 1)
return -EINVAL;
pm_print_times_enabled = !!val;
return n;
}
power_attr(pm_print_times);
static inline void pm_print_times_init(void)
{
pm_print_times_enabled = !!initcall_debug;
}
#else /* !CONFIG_PP_SLEEP_DEBUG */
static inline void pm_print_times_init(void) {}
#endif /* CONFIG_PM_SLEEP_DEBUG */
struct kobject *power_kobj;
/**
* state - control system sleep states.
*
* show() returns available sleep state labels, which may be "mem", "standby",
* "freeze" and "disk" (hibernation). See Documentation/power/states.txt for a
* description of what they mean.
*
* store() accepts one of those strings, translates it into the proper
* enumerated value, and initiates a suspend transition.
*/
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *s = buf;
#ifdef CONFIG_SUSPEND
suspend_state_t i;
for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
if (pm_states[i])
s += sprintf(s,"%s ", pm_states[i]);
#endif
if (hibernation_available())
s += sprintf(s, "disk ");
if (s != buf)
/* convert the last space to a newline */
*(s-1) = '\n';
return (s - buf);
}
static suspend_state_t decode_state(const char *buf, size_t n)
{
#ifdef CONFIG_SUSPEND
suspend_state_t state;
#endif
char *p;
int len;
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
/* Check hibernation first. */
if (len == 4 && !strncmp(buf, "disk", len))
return PM_SUSPEND_MAX;
#ifdef CONFIG_SUSPEND
for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) {
const char *label = pm_states[state];
if (label && len == strlen(label) && !strncmp(buf, label, len))
return state;
}
#endif
return PM_SUSPEND_ON;
}
static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
suspend_state_t state;
int error;
error = pm_autosleep_lock();
if (error)
return error;
if (pm_autosleep_state() > PM_SUSPEND_ON) {
error = -EBUSY;
goto out;
}
state = decode_state(buf, n);
if (state < PM_SUSPEND_MAX)
error = pm_suspend(state);
else if (state == PM_SUSPEND_MAX)
error = hibernate();
else
error = -EINVAL;
out:
pm_autosleep_unlock();
return error ? error : n;
}
power_attr(state);
#ifdef CONFIG_PM_SLEEP
/*
* The 'wakeup_count' attribute, along with the functions defined in
* drivers/base/power/wakeup.c, provides a means by which wakeup events can be
* handled in a non-racy way.
*
* If a wakeup event occurs when the system is in a sleep state, it simply is
* woken up. In turn, if an event that would wake the system up from a sleep
* state occurs when it is undergoing a transition to that sleep state, the
* transition should be aborted. Moreover, if such an event occurs when the
* system is in the working state, an attempt to start a transition to the
* given sleep state should fail during certain period after the detection of
* the event. Using the 'state' attribute alone is not sufficient to satisfy
* these requirements, because a wakeup event may occur exactly when 'state'
* is being written to and may be delivered to user space right before it is
* frozen, so the event will remain only partially processed until the system is
* woken up by another event. In particular, it won't cause the transition to
* a sleep state to be aborted.
*
* This difficulty may be overcome if user space uses 'wakeup_count' before
* writing to 'state'. It first should read from 'wakeup_count' and store
* the read value. Then, after carrying out its own preparations for the system
* transition to a sleep state, it should write the stored value to
* 'wakeup_count'. If that fails, at least one wakeup event has occurred since
* 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
* is allowed to write to 'state', but the transition will be aborted if there
* are any wakeup events detected after 'wakeup_count' was written to.
*/
static ssize_t wakeup_count_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
unsigned int val;
return pm_get_wakeup_count(&val, true) ?
sprintf(buf, "%u\n", val) : -EINTR;
}
static ssize_t wakeup_count_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned int val;
int error;
error = pm_autosleep_lock();
if (error)
return error;
if (pm_autosleep_state() > PM_SUSPEND_ON) {
error = -EBUSY;
goto out;
}
error = -EINVAL;
if (sscanf(buf, "%u", &val) == 1) {
if (pm_save_wakeup_count(val))
error = n;
else
pm_print_active_wakeup_sources();
}
out:
pm_autosleep_unlock();
return error;
}
power_attr(wakeup_count);
#ifdef CONFIG_PM_AUTOSLEEP
static ssize_t autosleep_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
suspend_state_t state = pm_autosleep_state();
if (state == PM_SUSPEND_ON)
return sprintf(buf, "off\n");
#ifdef CONFIG_SUSPEND
if (state < PM_SUSPEND_MAX)
return sprintf(buf, "%s\n", pm_states[state] ?
pm_states[state] : "error");
#endif
#ifdef CONFIG_HIBERNATION
return sprintf(buf, "disk\n");
#else
return sprintf(buf, "error");
#endif
}
static ssize_t autosleep_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
suspend_state_t state = decode_state(buf, n);
int error;
if (state == PM_SUSPEND_ON
&& strcmp(buf, "off") && strcmp(buf, "off\n"))
return -EINVAL;
error = pm_autosleep_set_state(state);
return error ? error : n;
}
power_attr(autosleep);
#endif /* CONFIG_PM_AUTOSLEEP */
#ifdef CONFIG_PM_WAKELOCKS
static ssize_t wake_lock_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return pm_show_wakelocks(buf, true);
}
static ssize_t wake_lock_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
int error = pm_wake_lock(buf);
return error ? error : n;
}
power_attr(wake_lock);
static ssize_t wake_unlock_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return pm_show_wakelocks(buf, false);
}
static ssize_t wake_unlock_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
int error = pm_wake_unlock(buf);
return error ? error : n;
}
power_attr(wake_unlock);
#endif /* CONFIG_PM_WAKELOCKS */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_TRACE
int pm_trace_enabled;
static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", pm_trace_enabled);
}
static ssize_t
pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
int val;
if (sscanf(buf, "%d", &val) == 1) {
pm_trace_enabled = !!val;
if (pm_trace_enabled) {
pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n"
"PM: Correct system time has to be restored manually after resume.\n");
}
return n;
}
return -EINVAL;
}
power_attr(pm_trace);
static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return show_trace_dev_match(buf, PAGE_SIZE);
}
static ssize_t
pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
return -EINVAL;
}
power_attr(pm_trace_dev_match);
#endif /* CONFIG_PM_TRACE */
#ifdef CONFIG_FREEZER
static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", freeze_timeout_msecs);
}
static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
freeze_timeout_msecs = val;
return n;
}
power_attr(pm_freeze_timeout);
#endif /* CONFIG_FREEZER*/
#ifdef CONFIG_SW_SELF_DISCHARGING
static char selfdischg_usage_str[] =
"[START]\n"
"/sys/power/cpuhotplug/enable 0\n"
"/sys/power/cpufreq_self_discharging 1144000\n"
"[STOP]\n"
"/sys/power/cpufreq_self_discharging 0\n"
"/sys/power/cpuhotplug/enable 1\n"
"[END]\n";
static ssize_t selfdischg_usage_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%s", selfdischg_usage_str);
}
static struct kobj_attribute selfdischg_usage_attr = {
.attr = {
.name = __stringify(selfdischg_usage),
.mode = 0444,
},
.show = selfdischg_usage_show,
};
#endif
static struct attribute * g[] = {
&state_attr.attr,
#ifdef CONFIG_PM_TRACE
&pm_trace_attr.attr,
&pm_trace_dev_match_attr.attr,
#endif
#ifdef CONFIG_PM_SLEEP
&pm_async_attr.attr,
&wakeup_count_attr.attr,
#ifdef CONFIG_PM_AUTOSLEEP
&autosleep_attr.attr,
#endif
#ifdef CONFIG_PM_WAKELOCKS
&wake_lock_attr.attr,
&wake_unlock_attr.attr,
#endif
#ifdef CONFIG_PM_DEBUG
&pm_test_attr.attr,
#endif
#ifdef CONFIG_PM_SLEEP_DEBUG
&pm_print_times_attr.attr,
#endif
#endif
#ifdef CONFIG_FREEZER
&pm_freeze_timeout_attr.attr,
#endif
#ifdef CONFIG_SW_SELF_DISCHARGING
&selfdischg_usage_attr.attr,
#endif
NULL,
};
static struct attribute_group attr_group = {
.attrs = g,
};
struct workqueue_struct *pm_wq;
EXPORT_SYMBOL_GPL(pm_wq);
static int __init pm_start_workqueue(void)
{
pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
return pm_wq ? 0 : -ENOMEM;
}
static int __init pm_init(void)
{
int error = pm_start_workqueue();
if (error)
return error;
hibernate_image_size_init();
hibernate_reserved_size_init();
power_kobj = kobject_create_and_add("power", NULL);
if (!power_kobj)
return -ENOMEM;
error = sysfs_create_group(power_kobj, &attr_group);
if (error)
return error;
pm_print_times_init();
return pm_autosleep_init();
}
core_initcall(pm_init);

298
kernel/power/power.h Normal file
View file

@ -0,0 +1,298 @@
#include <linux/suspend.h>
#include <linux/suspend_ioctls.h>
#include <linux/utsname.h>
#include <linux/freezer.h>
#include <linux/compiler.h>
struct swsusp_info {
struct new_utsname uts;
u32 version_code;
unsigned long num_physpages;
int cpus;
unsigned long image_pages;
unsigned long pages;
unsigned long size;
} __aligned(PAGE_SIZE);
#ifdef CONFIG_HIBERNATION
/* kernel/power/snapshot.c */
extern void __init hibernate_reserved_size_init(void);
extern void __init hibernate_image_size_init(void);
#ifdef CONFIG_ARCH_HIBERNATION_HEADER
/* Maximum size of architecture specific data in a hibernation header */
#define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4)
extern int arch_hibernation_header_save(void *addr, unsigned int max_size);
extern int arch_hibernation_header_restore(void *addr);
static inline int init_header_complete(struct swsusp_info *info)
{
return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
}
static inline char *check_image_kernel(struct swsusp_info *info)
{
return arch_hibernation_header_restore(info) ?
"architecture specific data" : NULL;
}
#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
/*
* Keep some memory free so that I/O operations can succeed without paging
* [Might this be more than 4 MB?]
*/
#define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT)
/*
* Keep 1 MB of memory free so that device drivers can allocate some pages in
* their .suspend() routines without breaking the suspend to disk.
*/
#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
asmlinkage int swsusp_save(void);
/* kernel/power/hibernate.c */
extern bool freezer_test_done;
extern int hibernation_snapshot(int platform_mode);
extern int hibernation_restore(int platform_mode);
extern int hibernation_platform_enter(void);
#else /* !CONFIG_HIBERNATION */
static inline void hibernate_reserved_size_init(void) {}
static inline void hibernate_image_size_init(void) {}
#endif /* !CONFIG_HIBERNATION */
extern int pfn_is_nosave(unsigned long);
#define power_attr(_name) \
static struct kobj_attribute _name##_attr = { \
.attr = { \
.name = __stringify(_name), \
.mode = 0644, \
}, \
.show = _name##_show, \
.store = _name##_store, \
}
/* Preferred image size in bytes (default 500 MB) */
extern unsigned long image_size;
/* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
extern unsigned long reserved_size;
extern int in_suspend;
extern dev_t swsusp_resume_device;
extern sector_t swsusp_resume_block;
extern asmlinkage int swsusp_arch_suspend(void);
extern asmlinkage int swsusp_arch_resume(void);
extern int create_basic_memory_bitmaps(void);
extern void free_basic_memory_bitmaps(void);
extern int hibernate_preallocate_memory(void);
/**
* Auxiliary structure used for reading the snapshot image data and
* metadata from and writing them to the list of page backup entries
* (PBEs) which is the main data structure of swsusp.
*
* Using struct snapshot_handle we can transfer the image, including its
* metadata, as a continuous sequence of bytes with the help of
* snapshot_read_next() and snapshot_write_next().
*
* The code that writes the image to a storage or transfers it to
* the user land is required to use snapshot_read_next() for this
* purpose and it should not make any assumptions regarding the internal
* structure of the image. Similarly, the code that reads the image from
* a storage or transfers it from the user land is required to use
* snapshot_write_next().
*
* This may allow us to change the internal structure of the image
* in the future with considerably less effort.
*/
struct snapshot_handle {
unsigned int cur; /* number of the block of PAGE_SIZE bytes the
* next operation will refer to (ie. current)
*/
void *buffer; /* address of the block to read from
* or write to
*/
int sync_read; /* Set to one to notify the caller of
* snapshot_write_next() that it may
* need to call wait_on_bio_chain()
*/
};
/* This macro returns the address from/to which the caller of
* snapshot_read_next()/snapshot_write_next() is allowed to
* read/write data after the function returns
*/
#define data_of(handle) ((handle).buffer)
extern unsigned int snapshot_additional_pages(struct zone *zone);
extern unsigned long snapshot_get_image_size(void);
extern int snapshot_read_next(struct snapshot_handle *handle);
extern int snapshot_write_next(struct snapshot_handle *handle);
extern void snapshot_write_finalize(struct snapshot_handle *handle);
extern int snapshot_image_loaded(struct snapshot_handle *handle);
/* If unset, the snapshot device cannot be open. */
extern atomic_t snapshot_device_available;
extern sector_t alloc_swapdev_block(int swap);
extern void free_all_swap_pages(int swap);
extern int swsusp_swap_in_use(void);
/*
* Flags that can be passed from the hibernatig hernel to the "boot" kernel in
* the image header.
*/
#define SF_PLATFORM_MODE 1
#define SF_NOCOMPRESS_MODE 2
#define SF_CRC32_MODE 4
/* kernel/power/hibernate.c */
extern int swsusp_check(void);
extern void swsusp_free(void);
extern int swsusp_read(unsigned int *flags_p);
extern int swsusp_write(unsigned int flags);
extern void swsusp_close(fmode_t);
#ifdef CONFIG_SUSPEND
extern int swsusp_unmark(void);
#endif
/* kernel/power/block_io.c */
extern struct block_device *hib_resume_bdev;
extern int hib_bio_read_page(pgoff_t page_off, void *addr,
struct bio **bio_chain);
extern int hib_bio_write_page(pgoff_t page_off, void *addr,
struct bio **bio_chain);
extern int hib_wait_on_bio_chain(struct bio **bio_chain);
struct timeval;
/* kernel/power/swsusp.c */
extern void swsusp_show_speed(struct timeval *, struct timeval *,
unsigned int, char *);
#ifdef CONFIG_SUSPEND
/* kernel/power/suspend.c */
extern const char *pm_labels[];
extern const char *pm_states[];
extern int suspend_devices_and_enter(suspend_state_t state);
#else /* !CONFIG_SUSPEND */
static inline int suspend_devices_and_enter(suspend_state_t state)
{
return -ENOSYS;
}
#endif /* !CONFIG_SUSPEND */
#ifdef CONFIG_PM_TEST_SUSPEND
/* kernel/power/suspend_test.c */
extern void suspend_test_start(void);
extern void suspend_test_finish(const char *label);
#else /* !CONFIG_PM_TEST_SUSPEND */
static inline void suspend_test_start(void) {}
static inline void suspend_test_finish(const char *label) {}
#endif /* !CONFIG_PM_TEST_SUSPEND */
#ifdef CONFIG_PM_SLEEP
/* kernel/power/main.c */
extern int pm_notifier_call_chain(unsigned long val);
#endif
#ifdef CONFIG_HIGHMEM
int restore_highmem(void);
#else
static inline unsigned int count_highmem_pages(void) { return 0; }
static inline int restore_highmem(void) { return 0; }
#endif
/*
* Suspend test levels
*/
enum {
/* keep first */
TEST_NONE,
TEST_CORE,
TEST_CPUS,
TEST_PLATFORM,
TEST_DEVICES,
TEST_FREEZER,
/* keep last */
__TEST_AFTER_LAST
};
#define TEST_FIRST TEST_NONE
#define TEST_MAX (__TEST_AFTER_LAST - 1)
extern int pm_test_level;
#ifdef CONFIG_SUSPEND_FREEZER
static inline int suspend_freeze_processes(void)
{
int error;
error = freeze_processes();
/*
* freeze_processes() automatically thaws every task if freezing
* fails. So we need not do anything extra upon error.
*/
if (error)
return error;
error = freeze_kernel_threads();
/*
* freeze_kernel_threads() thaws only kernel threads upon freezing
* failure. So we have to thaw the userspace tasks ourselves.
*/
if (error)
thaw_processes();
return error;
}
static inline void suspend_thaw_processes(void)
{
thaw_processes();
}
#else
static inline int suspend_freeze_processes(void)
{
return 0;
}
static inline void suspend_thaw_processes(void)
{
}
#endif
#ifdef CONFIG_PM_AUTOSLEEP
/* kernel/power/autosleep.c */
extern int pm_autosleep_init(void);
extern int pm_autosleep_lock(void);
extern void pm_autosleep_unlock(void);
extern suspend_state_t pm_autosleep_state(void);
extern int pm_autosleep_set_state(suspend_state_t state);
#else /* !CONFIG_PM_AUTOSLEEP */
static inline int pm_autosleep_init(void) { return 0; }
static inline int pm_autosleep_lock(void) { return 0; }
static inline void pm_autosleep_unlock(void) {}
static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; }
#endif /* !CONFIG_PM_AUTOSLEEP */
#ifdef CONFIG_PM_WAKELOCKS
/* kernel/power/wakelock.c */
extern ssize_t pm_show_wakelocks(char *buf, bool show_active);
extern int pm_wake_lock(const char *buf);
extern int pm_wake_unlock(const char *buf);
#endif /* !CONFIG_PM_WAKELOCKS */

46
kernel/power/poweroff.c Normal file
View file

@ -0,0 +1,46 @@
/*
* poweroff.c - sysrq handler to gracefully power down machine.
*
* This file is released under the GPL v2
*/
#include <linux/kernel.h>
#include <linux/sysrq.h>
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/workqueue.h>
#include <linux/reboot.h>
#include <linux/cpumask.h>
/*
* When the user hits Sys-Rq o to power down the machine this is the
* callback we use.
*/
static void do_poweroff(struct work_struct *dummy)
{
kernel_power_off();
}
static DECLARE_WORK(poweroff_work, do_poweroff);
static void handle_poweroff(int key)
{
/* run sysrq poweroff on boot cpu */
schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
}
static struct sysrq_key_op sysrq_poweroff_op = {
.handler = handle_poweroff,
.help_msg = "poweroff(o)",
.action_msg = "Power Off",
.enable_mask = SYSRQ_ENABLE_BOOT,
};
static int __init pm_sysrq_init(void)
{
register_sysrq_key('o', &sysrq_poweroff_op);
return 0;
}
subsys_initcall(pm_sysrq_init);

278
kernel/power/process.c Normal file
View file

@ -0,0 +1,278 @@
/*
* drivers/power/process.c - Functions for starting/stopping processes on
* suspend transitions.
*
* Originally from swsusp.
*/
#undef DEBUG
#include <linux/interrupt.h>
#include <linux/oom.h>
#include <linux/suspend.h>
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/kmod.h>
#include <trace/events/power.h>
#include <linux/wakeup_reason.h>
/*
* Timeout for stopping processes
*/
unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
static int try_to_freeze_tasks(bool user_only)
{
struct task_struct *g, *p;
unsigned long end_time;
unsigned int todo;
bool wq_busy = false;
struct timeval start, end;
u64 elapsed_msecs64;
unsigned int elapsed_msecs;
bool wakeup = false;
int sleep_usecs = USEC_PER_MSEC;
#ifdef CONFIG_PM_SLEEP
char suspend_abort[MAX_SUSPEND_ABORT_LEN];
#endif
do_gettimeofday(&start);
end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
if (!user_only)
freeze_workqueues_begin();
while (true) {
todo = 0;
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
if (p == current || !freeze_task(p))
continue;
if (!freezer_should_skip(p))
todo++;
}
read_unlock(&tasklist_lock);
if (!user_only) {
wq_busy = freeze_workqueues_busy();
todo += wq_busy;
}
if (!todo || time_after(jiffies, end_time))
break;
if (pm_wakeup_pending()) {
#ifdef CONFIG_PM_SLEEP
pm_get_active_wakeup_sources(suspend_abort,
MAX_SUSPEND_ABORT_LEN);
log_suspend_abort_reason(suspend_abort);
#endif
wakeup = true;
break;
}
/*
* We need to retry, but first give the freezing tasks some
* time to enter the refrigerator. Start with an initial
* 1 ms sleep followed by exponential backoff until 8 ms.
*/
usleep_range(sleep_usecs / 2, sleep_usecs);
if (sleep_usecs < 8 * USEC_PER_MSEC)
sleep_usecs *= 2;
}
do_gettimeofday(&end);
elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
do_div(elapsed_msecs64, NSEC_PER_MSEC);
elapsed_msecs = elapsed_msecs64;
if (wakeup) {
printk("\n");
printk(KERN_ERR "Freezing of tasks aborted after %d.%03d seconds",
elapsed_msecs / 1000, elapsed_msecs % 1000);
} else if (todo) {
printk("\n");
printk(KERN_ERR "Freezing of tasks failed after %d.%03d seconds"
" (%d tasks refusing to freeze, wq_busy=%d):\n",
elapsed_msecs / 1000, elapsed_msecs % 1000,
todo - wq_busy, wq_busy);
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
if (p != current && !freezer_should_skip(p)
&& freezing(p) && !frozen(p))
sched_show_task(p);
}
read_unlock(&tasklist_lock);
} else {
printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
elapsed_msecs % 1000);
}
return todo ? -EBUSY : 0;
}
static bool __check_frozen_processes(void)
{
struct task_struct *g, *p;
for_each_process_thread(g, p)
if (p != current && !freezer_should_skip(p) && !frozen(p))
return false;
return true;
}
/*
* Returns true if all freezable tasks (except for current) are frozen already
*/
static bool check_frozen_processes(void)
{
bool ret;
read_lock(&tasklist_lock);
ret = __check_frozen_processes();
read_unlock(&tasklist_lock);
return ret;
}
/**
* freeze_processes - Signal user space processes to enter the refrigerator.
* The current thread will not be frozen. The same process that calls
* freeze_processes must later call thaw_processes.
*
* On success, returns 0. On failure, -errno and system is fully thawed.
*/
int freeze_processes(void)
{
int error;
int oom_kills_saved;
error = __usermodehelper_disable(UMH_FREEZING);
if (error)
return error;
/* Make sure this task doesn't get frozen */
current->flags |= PF_SUSPEND_TASK;
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
pm_wakeup_clear();
printk("Freezing user space processes ... ");
pm_freezing = true;
oom_kills_saved = oom_kills_count();
error = try_to_freeze_tasks(true);
if (!error) {
__usermodehelper_set_disable_depth(UMH_DISABLED);
oom_killer_disable();
/*
* There might have been an OOM kill while we were
* freezing tasks and the killed task might be still
* on the way out so we have to double check for race.
*/
if (oom_kills_count() != oom_kills_saved &&
!check_frozen_processes()) {
__usermodehelper_set_disable_depth(UMH_ENABLED);
printk("OOM in progress.");
error = -EBUSY;
} else {
printk("done.");
}
}
printk("\n");
BUG_ON(in_atomic());
if (error)
thaw_processes();
return error;
}
/**
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
*
* On success, returns 0. On failure, -errno and only the kernel threads are
* thawed, so as to give a chance to the caller to do additional cleanups
* (if any) before thawing the userspace tasks. So, it is the responsibility
* of the caller to thaw the userspace tasks, when the time is right.
*/
int freeze_kernel_threads(void)
{
int error;
printk("Freezing remaining freezable tasks ... ");
pm_nosig_freezing = true;
error = try_to_freeze_tasks(false);
if (!error)
printk("done.");
printk("\n");
BUG_ON(in_atomic());
if (error)
thaw_kernel_threads();
return error;
}
void thaw_processes(void)
{
struct task_struct *g, *p;
struct task_struct *curr = current;
trace_suspend_resume(TPS("thaw_processes"), 0, true);
if (pm_freezing)
atomic_dec(&system_freezing_cnt);
pm_freezing = false;
pm_nosig_freezing = false;
oom_killer_enable();
printk("Restarting tasks ... ");
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
/* No other threads should have PF_SUSPEND_TASK set */
WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
__thaw_task(p);
}
read_unlock(&tasklist_lock);
WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
curr->flags &= ~PF_SUSPEND_TASK;
usermodehelper_enable();
schedule();
printk("done.\n");
trace_suspend_resume(TPS("thaw_processes"), 0, false);
}
void thaw_kernel_threads(void)
{
struct task_struct *g, *p;
pm_nosig_freezing = false;
printk("Restarting kernel threads ... ");
thaw_workqueues();
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
__thaw_task(p);
}
read_unlock(&tasklist_lock);
schedule();
printk("done.\n");
}

981
kernel/power/qos.c Normal file
View file

@ -0,0 +1,981 @@
/*
* This module exposes the interface to kernel space for specifying
* QoS dependencies. It provides infrastructure for registration of:
*
* Dependents on a QoS value : register requests
* Watchers of QoS value : get notified when target QoS value changes
*
* This QoS design is best effort based. Dependents register their QoS needs.
* Watchers register to keep track of the current QoS needs of the system.
*
* There are 3 basic classes of QoS parameter: latency, timeout, throughput
* each have defined units:
* latency: usec
* timeout: usec <-- currently not used.
* throughput: kbs (kilo byte / sec)
*
* There are lists of pm_qos_objects each one wrapping requests, notifiers
*
* User mode requests on a QOS parameter register themselves to the
* subsystem by opening the device node /dev/... and writing there request to
* the node. As long as the process holds a file handle open to the node the
* client continues to be accounted for. Upon file release the usermode
* request is removed and a new qos target is computed. This way when the
* request that the application has is cleaned up when closes the file
* pointer or exits the pm_qos_object will get an opportunity to clean up.
*
* Mark Gross <mgross@linux.intel.com>
*/
/*#define DEBUG*/
#include <linux/pm_qos.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <trace/events/power.h>
/*
* locking rule: all changes to constraints or notifiers lists
* or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
* held, taken with _irqsave. One lock to rule them all
*/
struct pm_qos_object {
struct pm_qos_constraints *constraints;
struct miscdevice pm_qos_power_miscdev;
char *name;
};
static DEFINE_SPINLOCK(pm_qos_lock);
static struct pm_qos_object null_pm_qos;
static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
static struct pm_qos_constraints cpu_dma_constraints = {
.list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
.target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN,
.notifiers = &cpu_dma_lat_notifier,
};
static struct pm_qos_object cpu_dma_pm_qos = {
.constraints = &cpu_dma_constraints,
.name = "cpu_dma_latency",
};
static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
static struct pm_qos_constraints network_lat_constraints = {
.list = PLIST_HEAD_INIT(network_lat_constraints.list),
.target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN,
.notifiers = &network_lat_notifier,
};
static struct pm_qos_object network_lat_pm_qos = {
.constraints = &network_lat_constraints,
.name = "network_latency",
};
static BLOCKING_NOTIFIER_HEAD(device_throughput_notifier);
static struct pm_qos_constraints device_tput_constraints = {
.list = PLIST_HEAD_INIT(device_tput_constraints.list),
.target_value = PM_QOS_DEVICE_THROUGHPUT_DEFAULT_VALUE,
.default_value = PM_QOS_DEVICE_THROUGHPUT_DEFAULT_VALUE,
.type = PM_QOS_FORCE_MAX,
.notifiers = &device_throughput_notifier,
};
static struct pm_qos_object device_throughput_pm_qos = {
.constraints = &device_tput_constraints,
.name = "device_throughput",
};
#ifdef CONFIG_ARM_EXYNOS_DEVFREQ_DEBUG
static BLOCKING_NOTIFIER_HEAD(device_throughput_max_notifier);
static struct pm_qos_constraints device_tput_max_constraints = {
.list = PLIST_HEAD_INIT(device_tput_max_constraints.list),
.target_value = PM_QOS_DEVICE_THROUGHPUT_MAX_DEFAULT_VALUE,
.default_value = PM_QOS_DEVICE_THROUGHPUT_MAX_DEFAULT_VALUE,
.type = PM_QOS_MIN,
.notifiers = &device_throughput_max_notifier,
};
static struct pm_qos_object device_throughput_max_pm_qos = {
.constraints = &device_tput_max_constraints,
.name = "device_throughput_max",
};
#endif
static BLOCKING_NOTIFIER_HEAD(bus_throughput_notifier);
static struct pm_qos_constraints bus_tput_constraints = {
.list = PLIST_HEAD_INIT(bus_tput_constraints.list),
.target_value = PM_QOS_BUS_THROUGHPUT_DEFAULT_VALUE,
.default_value = PM_QOS_BUS_THROUGHPUT_DEFAULT_VALUE,
.type = PM_QOS_MAX,
.notifiers = &bus_throughput_notifier,
};
static struct pm_qos_object bus_throughput_pm_qos = {
.constraints = &bus_tput_constraints,
.name = "bus_throughput",
};
static BLOCKING_NOTIFIER_HEAD(bus_throughput_max_notifier);
static struct pm_qos_constraints bus_tput_max_constraints = {
.list = PLIST_HEAD_INIT(bus_tput_max_constraints.list),
.target_value = PM_QOS_BUS_THROUGHPUT_MAX_DEFAULT_VALUE,
.default_value = PM_QOS_BUS_THROUGHPUT_MAX_DEFAULT_VALUE,
.type = PM_QOS_MIN,
.notifiers = &bus_throughput_max_notifier,
};
static struct pm_qos_object bus_throughput_max_pm_qos = {
.constraints = &bus_tput_max_constraints,
.name = "bus_throughput_max",
};
static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
static struct pm_qos_constraints network_tput_constraints = {
.list = PLIST_HEAD_INIT(network_tput_constraints.list),
.target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.type = PM_QOS_MAX,
.notifiers = &network_throughput_notifier,
};
static struct pm_qos_object network_throughput_pm_qos = {
.constraints = &network_tput_constraints,
.name = "network_throughput",
};
static BLOCKING_NOTIFIER_HEAD(cluster1_freq_min_notifier);
static struct pm_qos_constraints cluster1_freq_min_constraints = {
.list = PLIST_HEAD_INIT(cluster1_freq_min_constraints.list),
.target_value = PM_QOS_CLUSTER1_FREQ_MIN_DEFAULT_VALUE,
.default_value = PM_QOS_CLUSTER1_FREQ_MIN_DEFAULT_VALUE,
.type = PM_QOS_MAX,
.notifiers = &cluster1_freq_min_notifier,
};
static struct pm_qos_object cluster1_freq_min_pm_qos = {
.constraints = &cluster1_freq_min_constraints,
.name = "cluster1_freq_min",
};
static BLOCKING_NOTIFIER_HEAD(memory_bandwidth_notifier);
static struct pm_qos_constraints memory_bw_constraints = {
.list = PLIST_HEAD_INIT(memory_bw_constraints.list),
.target_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
.default_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
.type = PM_QOS_SUM,
.notifiers = &memory_bandwidth_notifier,
};
static struct pm_qos_object memory_bandwidth_pm_qos = {
.constraints = &memory_bw_constraints,
.name = "memory_bandwidth",
};
static BLOCKING_NOTIFIER_HEAD(cluster1_freq_max_notifier);
static struct pm_qos_constraints cluster1_freq_max_constraints = {
.list = PLIST_HEAD_INIT(cluster1_freq_max_constraints.list),
.target_value = PM_QOS_CLUSTER1_FREQ_MAX_DEFAULT_VALUE,
.default_value = PM_QOS_CLUSTER1_FREQ_MAX_DEFAULT_VALUE,
.type = PM_QOS_MIN,
.notifiers = &cluster1_freq_max_notifier,
};
static struct pm_qos_object cluster1_freq_max_pm_qos = {
.constraints = &cluster1_freq_max_constraints,
.name = "cluster1_freq_max",
};
static BLOCKING_NOTIFIER_HEAD(cluster0_freq_min_notifier);
static struct pm_qos_constraints cluster0_freq_min_constraints = {
.list = PLIST_HEAD_INIT(cluster0_freq_min_constraints.list),
.target_value = PM_QOS_CLUSTER0_FREQ_MIN_DEFAULT_VALUE,
.default_value = PM_QOS_CLUSTER0_FREQ_MIN_DEFAULT_VALUE,
.type = PM_QOS_MAX,
.notifiers = &cluster0_freq_min_notifier,
};
static struct pm_qos_object cluster0_freq_min_pm_qos = {
.constraints = &cluster0_freq_min_constraints,
.name = "cluster0_freq_min",
};
static BLOCKING_NOTIFIER_HEAD(cluster0_freq_max_notifier);
static struct pm_qos_constraints cluster0_freq_max_constraints = {
.list = PLIST_HEAD_INIT(cluster0_freq_max_constraints.list),
.target_value = PM_QOS_CLUSTER0_FREQ_MAX_DEFAULT_VALUE,
.default_value = PM_QOS_CLUSTER0_FREQ_MAX_DEFAULT_VALUE,
.type = PM_QOS_MIN,
.notifiers = &cluster0_freq_max_notifier,
};
static struct pm_qos_object cluster0_freq_max_pm_qos = {
.constraints = &cluster0_freq_max_constraints,
.name = "cluster0_freq_max",
};
static BLOCKING_NOTIFIER_HEAD(display_throughput_notifier);
static struct pm_qos_constraints display_tput_constraints = {
.list = PLIST_HEAD_INIT(display_tput_constraints.list),
.target_value = PM_QOS_DISPLAY_THROUGHPUT_DEFAULT_VALUE,
.default_value = PM_QOS_DISPLAY_THROUGHPUT_DEFAULT_VALUE,
.type = PM_QOS_MAX,
.notifiers = &display_throughput_notifier,
};
static struct pm_qos_object display_throughput_pm_qos = {
.constraints = &display_tput_constraints,
.name = "display_throughput",
};
#ifdef CONFIG_ARM_EXYNOS_DEVFREQ_DEBUG
static BLOCKING_NOTIFIER_HEAD(display_throughput_max_notifier);
static struct pm_qos_constraints display_tput_max_constraints = {
.list = PLIST_HEAD_INIT(display_tput_max_constraints.list),
.target_value = PM_QOS_DISPLAY_THROUGHPUT_MAX_DEFAULT_VALUE,
.default_value = PM_QOS_DISPLAY_THROUGHPUT_MAX_DEFAULT_VALUE,
.type = PM_QOS_MIN,
.notifiers = &display_throughput_max_notifier,
};
static struct pm_qos_object display_throughput_max_pm_qos = {
.constraints = &display_tput_max_constraints,
.name = "display_throughput_max",
};
#endif
static BLOCKING_NOTIFIER_HEAD(cam_throughput_notifier);
static struct pm_qos_constraints cam_tput_constraints = {
.list = PLIST_HEAD_INIT(cam_tput_constraints.list),
.target_value = PM_QOS_CAM_THROUGHPUT_DEFAULT_VALUE,
.default_value = PM_QOS_CAM_THROUGHPUT_DEFAULT_VALUE,
.type = PM_QOS_MAX,
.notifiers = &cam_throughput_notifier,
};
static struct pm_qos_object cam_throughput_pm_qos = {
.constraints = &cam_tput_constraints,
.name = "cam_throughput",
};
#ifdef CONFIG_ARM_EXYNOS_DEVFREQ_DEBUG
static BLOCKING_NOTIFIER_HEAD(cam_throughput_max_notifier);
static struct pm_qos_constraints cam_tput_max_constraints = {
.list = PLIST_HEAD_INIT(cam_tput_max_constraints.list),
.target_value = PM_QOS_CAM_THROUGHPUT_MAX_DEFAULT_VALUE,
.default_value = PM_QOS_CAM_THROUGHPUT_MAX_DEFAULT_VALUE,
.type = PM_QOS_MIN,
.notifiers = &cam_throughput_max_notifier,
};
static struct pm_qos_object cam_throughput_max_pm_qos = {
.constraints = &cam_tput_max_constraints,
.name = "cam_throughput_max",
};
#endif
static BLOCKING_NOTIFIER_HEAD(cpu_online_min_notifier);
static struct pm_qos_constraints cpu_online_min_constraints = {
.list = PLIST_HEAD_INIT(cpu_online_min_constraints.list),
.target_value = PM_QOS_CPU_ONLINE_MIN_DEFAULT_VALUE,
.default_value = PM_QOS_CPU_ONLINE_MIN_DEFAULT_VALUE,
.type = PM_QOS_MAX,
.notifiers = &cpu_online_min_notifier,
};
static struct pm_qos_object cpu_online_min_pm_qos = {
.constraints = &cpu_online_min_constraints,
.name = "cpu_online_min",
};
static BLOCKING_NOTIFIER_HEAD(cpu_online_max_notifier);
static struct pm_qos_constraints cpu_online_max_constraints = {
.list = PLIST_HEAD_INIT(cpu_online_max_constraints.list),
.target_value = PM_QOS_CPU_ONLINE_MAX_DEFAULT_VALUE,
.default_value = PM_QOS_CPU_ONLINE_MAX_DEFAULT_VALUE,
.type = PM_QOS_MIN,
.notifiers = &cpu_online_max_notifier,
};
static struct pm_qos_object cpu_online_max_pm_qos = {
.constraints = &cpu_online_max_constraints,
.name = "cpu_online_max",
};
static struct pm_qos_object *pm_qos_array[] = {
&null_pm_qos,
&cpu_dma_pm_qos,
&network_lat_pm_qos,
&device_throughput_pm_qos,
#ifdef CONFIG_ARM_EXYNOS_DEVFREQ_DEBUG
&device_throughput_max_pm_qos,
#endif
&bus_throughput_pm_qos,
&bus_throughput_max_pm_qos,
&network_throughput_pm_qos,
&memory_bandwidth_pm_qos,
&cluster1_freq_min_pm_qos,
&cluster1_freq_max_pm_qos,
&cluster0_freq_min_pm_qos,
&cluster0_freq_max_pm_qos,
&display_throughput_pm_qos,
#ifdef CONFIG_ARM_EXYNOS_DEVFREQ_DEBUG
&display_throughput_max_pm_qos,
#endif
&cam_throughput_pm_qos,
#ifdef CONFIG_ARM_EXYNOS_DEVFREQ_DEBUG
&cam_throughput_max_pm_qos,
#endif
&cpu_online_min_pm_qos,
&cpu_online_max_pm_qos,
};
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos);
static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos);
static int pm_qos_power_open(struct inode *inode, struct file *filp);
static int pm_qos_power_release(struct inode *inode, struct file *filp);
static const struct file_operations pm_qos_power_fops = {
.write = pm_qos_power_write,
.read = pm_qos_power_read,
.open = pm_qos_power_open,
.release = pm_qos_power_release,
.llseek = noop_llseek,
};
/* unlocked internal variant */
static inline int pm_qos_get_value(struct pm_qos_constraints *c)
{
struct plist_node *node;
int total_value = 0;
if (plist_head_empty(&c->list))
return c->no_constraint_value;
switch (c->type) {
case PM_QOS_MIN:
return plist_first(&c->list)->prio;
case PM_QOS_MAX:
case PM_QOS_FORCE_MAX:
return plist_last(&c->list)->prio;
case PM_QOS_SUM:
plist_for_each(node, &c->list)
total_value += node->prio;
return total_value;
default:
/* runtime check for not using enum */
BUG();
return PM_QOS_DEFAULT_VALUE;
}
}
s32 pm_qos_read_value(struct pm_qos_constraints *c)
{
return c->target_value;
}
static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
{
c->target_value = value;
}
/**
* pm_qos_update_target - manages the constraints list and calls the notifiers
* if needed
* @c: constraints data struct
* @node: request to add to the list, to update or to remove
* @action: action to take on the constraints list
* @value: value of the request to add or update
*
* This function returns 1 if the aggregated constraint value has changed, 0
* otherwise.
*/
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
enum pm_qos_req_action action, int value)
{
unsigned long flags;
int prev_value, curr_value, new_value;
int ret;
#ifdef CONFIG_ARCH_EXYNOS
struct pm_qos_constraints *cluster1_max_const;
struct pm_qos_constraints *cluster0_max_const;
#endif
spin_lock_irqsave(&pm_qos_lock, flags);
#ifdef CONFIG_ARCH_EXYNOS
cluster1_max_const = cluster1_freq_max_pm_qos.constraints;
cluster0_max_const = cluster0_freq_max_pm_qos.constraints;
if ((c == cluster1_max_const || c == cluster0_max_const) &&
(value > c->default_value))
value = c->default_value;
#endif
prev_value = pm_qos_get_value(c);
if (value == PM_QOS_DEFAULT_VALUE)
new_value = c->default_value;
else
new_value = value;
switch (action) {
case PM_QOS_REMOVE_REQ:
plist_del(node, &c->list);
break;
case PM_QOS_UPDATE_REQ:
/*
* to change the list, we atomically remove, reinit
* with new value and add, then see if the extremal
* changed
*/
plist_del(node, &c->list);
case PM_QOS_ADD_REQ:
plist_node_init(node, new_value);
plist_add(node, &c->list);
break;
default:
/* no action */
;
}
curr_value = pm_qos_get_value(c);
pm_qos_set_value(c, curr_value);
spin_unlock_irqrestore(&pm_qos_lock, flags);
trace_pm_qos_update_target(action, prev_value, curr_value);
if (c->type == PM_QOS_FORCE_MAX) {
blocking_notifier_call_chain(c->notifiers,
(unsigned long)curr_value,
NULL);
return 1;
}
if (prev_value != curr_value) {
ret = 1;
if (c->notifiers)
blocking_notifier_call_chain(c->notifiers,
(unsigned long)curr_value,
NULL);
} else {
ret = 0;
}
return ret;
}
/**
* pm_qos_update_constraints - update new constraints attributes
* @pm_qos_class: identification of which qos value is requested
* @constraints: new constraints data struct
*
* This function updates new constraints attributes.
*/
int pm_qos_update_constraints(int pm_qos_class,
struct pm_qos_constraints *constraints)
{
struct pm_qos_constraints *r_constraints;
int ret = -EINVAL;
int i;
if (!constraints) {
printk(KERN_ERR "%s: invalid constraints\n",
__func__);
return ret;
}
for (i = 1; i < PM_QOS_NUM_CLASSES; i++) {
if (i != pm_qos_class)
continue;
r_constraints = pm_qos_array[i]->constraints;
if (constraints->target_value)
r_constraints->target_value = constraints->target_value;
if (constraints->default_value)
r_constraints->default_value = constraints->default_value;
if (constraints->type)
r_constraints->type = constraints->type;
if (constraints->notifiers)
r_constraints->notifiers = constraints->notifiers;
return 0;
}
printk(KERN_ERR "%s: no search PM QoS CLASS(%d)\n",
__func__, pm_qos_class);
return ret;
}
EXPORT_SYMBOL_GPL(pm_qos_update_constraints);
/**
* pm_qos_flags_remove_req - Remove device PM QoS flags request.
* @pqf: Device PM QoS flags set to remove the request from.
* @req: Request to remove from the set.
*/
static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
struct pm_qos_flags_request *req)
{
s32 val = 0;
list_del(&req->node);
list_for_each_entry(req, &pqf->list, node)
val |= req->flags;
pqf->effective_flags = val;
}
/**
* pm_qos_update_flags - Update a set of PM QoS flags.
* @pqf: Set of flags to update.
* @req: Request to add to the set, to modify, or to remove from the set.
* @action: Action to take on the set.
* @val: Value of the request to add or modify.
*
* Update the given set of PM QoS flags and call notifiers if the aggregate
* value has changed. Returns 1 if the aggregate constraint value has changed,
* 0 otherwise.
*/
bool pm_qos_update_flags(struct pm_qos_flags *pqf,
struct pm_qos_flags_request *req,
enum pm_qos_req_action action, s32 val)
{
unsigned long irqflags;
s32 prev_value, curr_value;
spin_lock_irqsave(&pm_qos_lock, irqflags);
prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
switch (action) {
case PM_QOS_REMOVE_REQ:
pm_qos_flags_remove_req(pqf, req);
break;
case PM_QOS_UPDATE_REQ:
pm_qos_flags_remove_req(pqf, req);
case PM_QOS_ADD_REQ:
req->flags = val;
INIT_LIST_HEAD(&req->node);
list_add_tail(&req->node, &pqf->list);
pqf->effective_flags |= val;
break;
default:
/* no action */
;
}
curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
spin_unlock_irqrestore(&pm_qos_lock, irqflags);
trace_pm_qos_update_flags(action, prev_value, curr_value);
return prev_value != curr_value;
}
/**
* pm_qos_read_req_value - returns requested qos value
* @pm_qos_class: identification of which qos value is requested
* @req: request wanted to find set value
*
* This function returns the requested qos value by sysfs node.
*/
int pm_qos_read_req_value(int pm_qos_class, struct pm_qos_request *req)
{
struct plist_node *p;
unsigned long flags;
spin_lock_irqsave(&pm_qos_lock, flags);
plist_for_each(p, &pm_qos_array[pm_qos_class]->constraints->list) {
if (req == container_of(p, struct pm_qos_request, node)) {
spin_unlock_irqrestore(&pm_qos_lock, flags);
return p->prio;
}
}
spin_unlock_irqrestore(&pm_qos_lock, flags);
return -ENODATA;
}
EXPORT_SYMBOL_GPL(pm_qos_read_req_value);
/**
* pm_qos_request - returns current system wide qos expectation
* @pm_qos_class: identification of which qos value is requested
*
* This function returns the current target value.
*/
int pm_qos_request(int pm_qos_class)
{
return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
}
EXPORT_SYMBOL_GPL(pm_qos_request);
int pm_qos_request_active(struct pm_qos_request *req)
{
return req->pm_qos_class != 0;
}
EXPORT_SYMBOL_GPL(pm_qos_request_active);
static void __pm_qos_update_request(struct pm_qos_request *req,
s32 new_value)
{
trace_pm_qos_update_request(req->pm_qos_class, new_value);
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_UPDATE_REQ, new_value);
}
/**
* pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
* @work: work struct for the delayed work (timeout)
*
* This cancels the timeout request by falling back to the default at timeout.
*/
static void pm_qos_work_fn(struct work_struct *work)
{
struct pm_qos_request *req = container_of(to_delayed_work(work),
struct pm_qos_request,
work);
__pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
}
/**
* pm_qos_add_request - inserts new qos request into the list
* @req: pointer to a preallocated handle
* @pm_qos_class: identifies which list of qos request to use
* @value: defines the qos request
*
* This function inserts a new entry in the pm_qos_class list of requested qos
* performance characteristics. It recomputes the aggregate QoS expectations
* for the pm_qos_class of parameters and initializes the pm_qos_request
* handle. Caller needs to save this handle for later use in updates and
* removal.
*/
void pm_qos_add_request_trace(char *func, unsigned int line,
struct pm_qos_request *req,
int pm_qos_class, s32 value)
{
if (!req) /*guard against callers passing in null */
return;
if (pm_qos_request_active(req)) {
WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
return;
}
req->pm_qos_class = pm_qos_class;
req->func = func;
req->line = line;
INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
trace_pm_qos_add_request(pm_qos_class, value);
pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
&req->node, PM_QOS_ADD_REQ, value);
}
EXPORT_SYMBOL_GPL(pm_qos_add_request_trace);
/**
* pm_qos_update_request - modifies an existing qos request
* @req : handle to list element holding a pm_qos request to use
* @value: defines the qos request
*
* Updates an existing qos request for the pm_qos_class of parameters along
* with updating the target pm_qos_class value.
*
* Attempts are made to make this code callable on hot code paths.
*/
void pm_qos_update_request(struct pm_qos_request *req,
s32 new_value)
{
if (!req) /*guard against callers passing in null */
return;
if (!pm_qos_request_active(req)) {
WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
return;
}
if (delayed_work_pending(&req->work))
cancel_delayed_work_sync(&req->work);
__pm_qos_update_request(req, new_value);
}
EXPORT_SYMBOL_GPL(pm_qos_update_request);
/**
* pm_qos_update_request_timeout - modifies an existing qos request temporarily.
* @req : handle to list element holding a pm_qos request to use
* @new_value: defines the temporal qos request
* @timeout_us: the effective duration of this qos request in usecs.
*
* After timeout_us, this qos request is cancelled automatically.
*/
void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
unsigned long timeout_us)
{
if (!req)
return;
if (WARN(!pm_qos_request_active(req),
"%s called for unknown object.", __func__))
return;
if (delayed_work_pending(&req->work))
cancel_delayed_work_sync(&req->work);
trace_pm_qos_update_request_timeout(req->pm_qos_class,
new_value, timeout_us);
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_UPDATE_REQ, new_value);
schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
}
/**
* pm_qos_remove_request - modifies an existing qos request
* @req: handle to request list element
*
* Will remove pm qos request from the list of constraints and
* recompute the current target value for the pm_qos_class. Call this
* on slow code paths.
*/
void pm_qos_remove_request(struct pm_qos_request *req)
{
if (!req) /*guard against callers passing in null */
return;
/* silent return to keep pcm code cleaner */
if (!pm_qos_request_active(req)) {
WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
return;
}
if (delayed_work_pending(&req->work))
cancel_delayed_work_sync(&req->work);
trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_REMOVE_REQ,
PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
EXPORT_SYMBOL_GPL(pm_qos_remove_request);
/**
* pm_qos_add_notifier - sets notification entry for changes to target value
* @pm_qos_class: identifies which qos target changes should be notified.
* @notifier: notifier block managed by caller.
*
* will register the notifier into a notification chain that gets called
* upon changes to the pm_qos_class target value.
*/
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
{
int retval;
retval = blocking_notifier_chain_register(
pm_qos_array[pm_qos_class]->constraints->notifiers,
notifier);
return retval;
}
EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
/**
* pm_qos_remove_notifier - deletes notification entry from chain.
* @pm_qos_class: identifies which qos target changes are notified.
* @notifier: notifier block to be removed.
*
* will remove the notifier from the notification chain that gets called
* upon changes to the pm_qos_class target value.
*/
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
{
int retval;
retval = blocking_notifier_chain_unregister(
pm_qos_array[pm_qos_class]->constraints->notifiers,
notifier);
return retval;
}
EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
/* User space interface to PM QoS classes via misc devices */
static int register_pm_qos_misc(struct pm_qos_object *qos)
{
qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
qos->pm_qos_power_miscdev.name = qos->name;
qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
return misc_register(&qos->pm_qos_power_miscdev);
}
static int find_pm_qos_object_by_minor(int minor)
{
int pm_qos_class;
for (pm_qos_class = PM_QOS_CPU_DMA_LATENCY;
pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
if (minor ==
pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
return pm_qos_class;
}
return -1;
}
static int pm_qos_power_open(struct inode *inode, struct file *filp)
{
long pm_qos_class;
pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
if (pm_qos_class >= PM_QOS_CPU_DMA_LATENCY) {
struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
filp->private_data = req;
return 0;
}
return -EPERM;
}
static int pm_qos_power_release(struct inode *inode, struct file *filp)
{
struct pm_qos_request *req;
req = filp->private_data;
pm_qos_remove_request(req);
kfree(req);
return 0;
}
static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos)
{
s32 value;
unsigned long flags;
struct pm_qos_request *req = filp->private_data;
if (!req)
return -EINVAL;
if (!pm_qos_request_active(req))
return -EINVAL;
spin_lock_irqsave(&pm_qos_lock, flags);
value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
spin_unlock_irqrestore(&pm_qos_lock, flags);
return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
}
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
s32 value;
struct pm_qos_request *req;
if (count == sizeof(s32)) {
if (copy_from_user(&value, buf, sizeof(s32)))
return -EFAULT;
} else {
int ret;
ret = kstrtos32_from_user(buf, count, 16, &value);
if (ret)
return ret;
}
req = filp->private_data;
pm_qos_update_request(req, value);
return count;
}
static void pm_qos_debug_show_one(struct seq_file *s, struct pm_qos_object *qos)
{
struct plist_node *p;
unsigned long flags;
spin_lock_irqsave(&pm_qos_lock, flags);
seq_printf(s, "%s\n", qos->name);
seq_printf(s, " default value: %d\n", qos->constraints->default_value);
seq_printf(s, " target value: %d\n", qos->constraints->target_value);
seq_printf(s, " requests:\n");
plist_for_each(p, &qos->constraints->list)
seq_printf(s, " %pk(%s:%d): %d\n",
container_of(p, struct pm_qos_request, node),
(container_of(p, struct pm_qos_request, node))->func,
(container_of(p, struct pm_qos_request, node))->line,
p->prio);
spin_unlock_irqrestore(&pm_qos_lock, flags);
}
static int pm_qos_debug_show(struct seq_file *s, void *d)
{
int i;
for (i = 1; i < PM_QOS_NUM_CLASSES; i++)
pm_qos_debug_show_one(s, pm_qos_array[i]);
return 0;
}
static int pm_qos_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, pm_qos_debug_show, inode->i_private);
}
const static struct file_operations pm_qos_debug_fops = {
.open = pm_qos_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init pm_qos_power_init(void)
{
int ret = 0;
int i;
BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
ret = register_pm_qos_misc(pm_qos_array[i]);
if (ret < 0) {
printk(KERN_ERR "pm_qos_param: %s setup failed\n",
pm_qos_array[i]->name);
return ret;
}
}
debugfs_create_file("pm_qos", S_IRUGO, NULL, NULL, &pm_qos_debug_fops);
return ret;
}
late_initcall(pm_qos_power_init);

2605
kernel/power/snapshot.c Normal file

File diff suppressed because it is too large Load diff

531
kernel/power/suspend.c Normal file
View file

@ -0,0 +1,531 @@
/*
* kernel/power/suspend.c - Suspend to RAM and standby functionality.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
*
* This file is released under the GPLv2.
*/
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/syscalls.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/ftrace.h>
#include <linux/rtc.h>
#include <trace/events/power.h>
#include <linux/compiler.h>
#include <linux/wakeup_reason.h>
#include "power.h"
const char *pm_labels[] = { "mem", "standby", "freeze", NULL };
const char *pm_states[PM_SUSPEND_MAX];
static const struct platform_suspend_ops *suspend_ops;
static const struct platform_freeze_ops *freeze_ops;
static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
static bool suspend_freeze_wake;
void freeze_set_ops(const struct platform_freeze_ops *ops)
{
lock_system_sleep();
freeze_ops = ops;
unlock_system_sleep();
}
static void freeze_begin(void)
{
suspend_freeze_wake = false;
}
static void freeze_enter(void)
{
cpuidle_use_deepest_state(true);
cpuidle_resume();
wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
cpuidle_pause();
cpuidle_use_deepest_state(false);
}
void freeze_wake(void)
{
suspend_freeze_wake = true;
wake_up(&suspend_freeze_wait_head);
}
EXPORT_SYMBOL_GPL(freeze_wake);
static bool valid_state(suspend_state_t state)
{
/*
* PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
* support and need to be valid to the low level
* implementation, no valid callback implies that none are valid.
*/
return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
}
/*
* If this is set, the "mem" label always corresponds to the deepest sleep state
* available, the "standby" label corresponds to the second deepest sleep state
* available (if any), and the "freeze" label corresponds to the remaining
* available sleep state (if there is one).
*/
static bool relative_states;
static int __init sleep_states_setup(char *str)
{
relative_states = !strncmp(str, "1", 1);
pm_states[PM_SUSPEND_FREEZE] = pm_labels[relative_states ? 0 : 2];
return 1;
}
__setup("relative_sleep_states=", sleep_states_setup);
/**
* suspend_set_ops - Set the global suspend method table.
* @ops: Suspend operations to use.
*/
void suspend_set_ops(const struct platform_suspend_ops *ops)
{
suspend_state_t i;
int j = 0;
lock_system_sleep();
suspend_ops = ops;
for (i = PM_SUSPEND_MEM; i >= PM_SUSPEND_STANDBY; i--)
if (valid_state(i)) {
pm_states[i] = pm_labels[j++];
} else if (!relative_states) {
pm_states[i] = NULL;
j++;
}
pm_states[PM_SUSPEND_FREEZE] = pm_labels[j];
unlock_system_sleep();
}
EXPORT_SYMBOL_GPL(suspend_set_ops);
/**
* suspend_valid_only_mem - Generic memory-only valid callback.
*
* Platform drivers that implement mem suspend only and only need to check for
* that in their .valid() callback can use this instead of rolling their own
* .valid() callback.
*/
int suspend_valid_only_mem(suspend_state_t state)
{
return state == PM_SUSPEND_MEM;
}
EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
static bool sleep_state_supported(suspend_state_t state)
{
return state == PM_SUSPEND_FREEZE || (suspend_ops && suspend_ops->enter);
}
static int platform_suspend_prepare(suspend_state_t state)
{
return state != PM_SUSPEND_FREEZE && suspend_ops->prepare ?
suspend_ops->prepare() : 0;
}
static int platform_suspend_prepare_late(suspend_state_t state)
{
return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ?
freeze_ops->prepare() : 0;
}
static int platform_suspend_prepare_noirq(suspend_state_t state)
{
return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ?
suspend_ops->prepare_late() : 0;
}
static void platform_resume_noirq(suspend_state_t state)
{
if (state != PM_SUSPEND_FREEZE && suspend_ops->wake)
suspend_ops->wake();
}
static void platform_resume_early(suspend_state_t state)
{
if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore)
freeze_ops->restore();
}
static void platform_resume_finish(suspend_state_t state)
{
if (state != PM_SUSPEND_FREEZE && suspend_ops->finish)
suspend_ops->finish();
}
static int platform_suspend_begin(suspend_state_t state)
{
if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin)
return freeze_ops->begin();
else if (suspend_ops->begin)
return suspend_ops->begin(state);
else
return 0;
}
static void platform_resume_end(suspend_state_t state)
{
if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
freeze_ops->end();
else if (suspend_ops->end)
suspend_ops->end();
}
static void platform_recover(suspend_state_t state)
{
if (state != PM_SUSPEND_FREEZE && suspend_ops->recover)
suspend_ops->recover();
}
static bool platform_suspend_again(suspend_state_t state)
{
return state != PM_SUSPEND_FREEZE && suspend_ops->suspend_again ?
suspend_ops->suspend_again() : false;
}
static int suspend_test(int level)
{
#ifdef CONFIG_PM_DEBUG
if (pm_test_level == level) {
printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
mdelay(5000);
return 1;
}
#endif /* !CONFIG_PM_DEBUG */
return 0;
}
/**
* suspend_prepare - Prepare for entering system sleep state.
*
* Common code run for every system sleep state that can be entered (except for
* hibernation). Run suspend notifiers, allocate the "suspend" console and
* freeze processes.
*/
static int suspend_prepare(suspend_state_t state)
{
int error;
if (!sleep_state_supported(state))
return -EPERM;
pm_prepare_console();
error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
if (error)
goto Finish;
trace_suspend_resume(TPS("freeze_processes"), 0, true);
error = suspend_freeze_processes();
trace_suspend_resume(TPS("freeze_processes"), 0, false);
if (!error)
return 0;
suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
return error;
}
/* default implementation */
void __weak arch_suspend_disable_irqs(void)
{
local_irq_disable();
}
/* default implementation */
void __weak arch_suspend_enable_irqs(void)
{
local_irq_enable();
}
/**
* suspend_enter - Make the system enter the given sleep state.
* @state: System sleep state to enter.
* @wakeup: Returns information that the sleep state should not be re-entered.
*
* This function should be called after devices have been suspended.
*/
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
char suspend_abort[MAX_SUSPEND_ABORT_LEN];
int error, last_dev;
error = platform_suspend_prepare(state);
if (error)
goto Platform_finish;
error = dpm_suspend_late(PMSG_SUSPEND);
if (error) {
last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
last_dev %= REC_FAILED_NUM;
printk(KERN_ERR "PM: late suspend of devices failed\n");
log_suspend_abort_reason("%s device failed to power down",
suspend_stats.failed_devs[last_dev]);
goto Platform_finish;
}
error = platform_suspend_prepare_late(state);
if (error)
goto Devices_early_resume;
error = dpm_suspend_noirq(PMSG_SUSPEND);
if (error) {
last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
last_dev %= REC_FAILED_NUM;
printk(KERN_ERR "PM: noirq suspend of devices failed\n");
log_suspend_abort_reason("noirq suspend of %s device failed",
suspend_stats.failed_devs[last_dev]);
goto Platform_early_resume;
}
error = platform_suspend_prepare_noirq(state);
if (error)
goto Platform_wake;
if (suspend_test(TEST_PLATFORM))
goto Platform_wake;
/*
* PM_SUSPEND_FREEZE equals
* frozen processes + suspended devices + idle processors.
* Thus we should invoke freeze_enter() soon after
* all the devices are suspended.
*/
if (state == PM_SUSPEND_FREEZE) {
trace_suspend_resume(TPS("machine_suspend"), state, true);
freeze_enter();
trace_suspend_resume(TPS("machine_suspend"), state, false);
goto Platform_wake;
}
error = disable_nonboot_cpus();
if (error || suspend_test(TEST_CPUS)) {
log_suspend_abort_reason("Disabling non-boot cpus failed");
goto Enable_cpus;
}
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
error = syscore_suspend();
if (!error) {
*wakeup = pm_wakeup_pending();
if (!(suspend_test(TEST_CORE) || *wakeup)) {
trace_suspend_resume(TPS("machine_suspend"),
state, true);
error = suspend_ops->enter(state);
trace_suspend_resume(TPS("machine_suspend"),
state, false);
events_check_enabled = false;
} else if (*wakeup) {
pm_get_active_wakeup_sources(suspend_abort,
MAX_SUSPEND_ABORT_LEN);
log_suspend_abort_reason(suspend_abort);
error = -EBUSY;
}
syscore_resume();
}
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
Enable_cpus:
enable_nonboot_cpus();
Platform_wake:
platform_resume_noirq(state);
dpm_resume_noirq(PMSG_RESUME);
Platform_early_resume:
platform_resume_early(state);
Devices_early_resume:
dpm_resume_early(PMSG_RESUME);
Platform_finish:
platform_resume_finish(state);
return error;
}
/**
* suspend_devices_and_enter - Suspend devices and enter system sleep state.
* @state: System sleep state to enter.
*/
int suspend_devices_and_enter(suspend_state_t state)
{
int error;
bool wakeup = false;
if (!sleep_state_supported(state))
return -ENOSYS;
error = platform_suspend_begin(state);
if (error)
goto Close;
suspend_console();
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
pr_err("PM: Some devices failed to suspend, or early wake event detected\n");
log_suspend_abort_reason("Some devices failed to suspend, or early wake event detected");
goto Recover_platform;
}
suspend_test_finish("suspend devices");
if (suspend_test(TEST_DEVICES))
goto Recover_platform;
do {
error = suspend_enter(state, &wakeup);
} while (!error && !wakeup && platform_suspend_again(state));
Resume_devices:
suspend_test_start();
dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices");
trace_suspend_resume(TPS("resume_console"), state, true);
resume_console();
trace_suspend_resume(TPS("resume_console"), state, false);
Close:
platform_resume_end(state);
return error;
Recover_platform:
platform_recover(state);
goto Resume_devices;
}
/**
* suspend_finish - Clean up before finishing the suspend sequence.
*
* Call platform code to clean up, restart processes, and free the console that
* we've allocated. This routine is not called for hibernation.
*/
static void suspend_finish(void)
{
suspend_thaw_processes();
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
}
/**
* enter_state - Do common work needed to enter system sleep state.
* @state: System sleep state to enter.
*
* Make sure that no one else is trying to put the system into a sleep state.
* Fail if that's not the case. Otherwise, prepare for system suspend, make the
* system enter the given sleep state and clean up after wakeup.
*/
static int enter_state(suspend_state_t state)
{
int error;
trace_suspend_resume(TPS("suspend_enter"), state, true);
if (state == PM_SUSPEND_FREEZE) {
#ifdef CONFIG_PM_DEBUG
if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
pr_warning("PM: Unsupported test mode for freeze state,"
"please choose none/freezer/devices/platform.\n");
return -EAGAIN;
}
#endif
} else if (!valid_state(state)) {
return -EINVAL;
}
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
if (state == PM_SUSPEND_FREEZE)
freeze_begin();
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync();
printk("done.\n");
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
error = suspend_prepare(state);
if (error)
goto Unlock;
if (suspend_test(TEST_FREEZER))
goto Finish;
trace_suspend_resume(TPS("suspend_enter"), state, false);
pr_debug("PM: Entering %s sleep\n", pm_states[state]);
pm_restrict_gfp_mask();
error = suspend_devices_and_enter(state);
pm_restore_gfp_mask();
Finish:
pr_debug("PM: Finishing wakeup.\n");
suspend_finish();
Unlock:
mutex_unlock(&pm_mutex);
return error;
}
static void pm_suspend_marker(char *annotation)
{
struct timespec ts;
struct rtc_time tm;
getnstimeofday(&ts);
rtc_time_to_tm(ts.tv_sec, &tm);
pr_info("PM: suspend %s %d-%02d-%02d %02d:%02d:%02d.%09lu UTC\n",
annotation, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
}
/**
* pm_suspend - Externally visible function for suspending the system.
* @state: System sleep state to enter.
*
* Check if the value of @state represents one of the supported states,
* execute enter_state() and update system suspend statistics.
*/
int pm_suspend(suspend_state_t state)
{
int error;
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
return -EINVAL;
pm_suspend_marker("entry");
error = enter_state(state);
if (error) {
suspend_stats.fail++;
dpm_save_failed_errno(error);
} else {
suspend_stats.success++;
}
pm_suspend_marker("exit");
return error;
}
EXPORT_SYMBOL(pm_suspend);

218
kernel/power/suspend_test.c Normal file
View file

@ -0,0 +1,218 @@
/*
* kernel/power/suspend_test.c - Suspend to RAM and standby test facility.
*
* Copyright (c) 2009 Pavel Machek <pavel@ucw.cz>
*
* This file is released under the GPLv2.
*/
#include <linux/init.h>
#include <linux/rtc.h>
#include "power.h"
/*
* We test the system suspend code by setting an RTC wakealarm a short
* time in the future, then suspending. Suspending the devices won't
* normally take long ... some systems only need a few milliseconds.
*
* The time it takes is system-specific though, so when we test this
* during system bootup we allow a LOT of time.
*/
#define TEST_SUSPEND_SECONDS 10
static unsigned long suspend_test_start_time;
static u32 test_repeat_count_max = 1;
static u32 test_repeat_count_current;
void suspend_test_start(void)
{
/* FIXME Use better timebase than "jiffies", ideally a clocksource.
* What we want is a hardware counter that will work correctly even
* during the irqs-are-off stages of the suspend/resume cycle...
*/
suspend_test_start_time = jiffies;
}
void suspend_test_finish(const char *label)
{
long nj = jiffies - suspend_test_start_time;
unsigned msec;
msec = jiffies_to_msecs(abs(nj));
pr_info("PM: %s took %d.%03d seconds\n", label,
msec / 1000, msec % 1000);
/* Warning on suspend means the RTC alarm period needs to be
* larger -- the system was sooo slooowwww to suspend that the
* alarm (should have) fired before the system went to sleep!
*
* Warning on either suspend or resume also means the system
* has some performance issues. The stack dump of a WARN_ON
* is more likely to get the right attention than a printk...
*/
WARN(msec > (TEST_SUSPEND_SECONDS * 1000),
"Component: %s, time: %u\n", label, msec);
}
/*
* To test system suspend, we need a hands-off mechanism to resume the
* system. RTCs wake alarms are a common self-contained mechanism.
*/
static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
{
static char err_readtime[] __initdata =
KERN_ERR "PM: can't read %s time, err %d\n";
static char err_wakealarm [] __initdata =
KERN_ERR "PM: can't set %s wakealarm, err %d\n";
static char err_suspend[] __initdata =
KERN_ERR "PM: suspend test failed, error %d\n";
static char info_test[] __initdata =
KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
unsigned long now;
struct rtc_wkalrm alm;
int status;
/* this may fail if the RTC hasn't been initialized */
repeat:
status = rtc_read_time(rtc, &alm.time);
if (status < 0) {
printk(err_readtime, dev_name(&rtc->dev), status);
return;
}
rtc_tm_to_time(&alm.time, &now);
memset(&alm, 0, sizeof alm);
rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
alm.enabled = true;
status = rtc_set_alarm(rtc, &alm);
if (status < 0) {
printk(err_wakealarm, dev_name(&rtc->dev), status);
return;
}
if (state == PM_SUSPEND_MEM) {
printk(info_test, pm_states[state]);
status = pm_suspend(state);
if (status == -ENODEV)
state = PM_SUSPEND_STANDBY;
}
if (state == PM_SUSPEND_STANDBY) {
printk(info_test, pm_states[state]);
status = pm_suspend(state);
if (status < 0)
state = PM_SUSPEND_FREEZE;
}
if (state == PM_SUSPEND_FREEZE) {
printk(info_test, pm_states[state]);
status = pm_suspend(state);
}
if (status < 0)
printk(err_suspend, status);
test_repeat_count_current++;
if (test_repeat_count_current < test_repeat_count_max)
goto repeat;
/* Some platforms can't detect that the alarm triggered the
* wakeup, or (accordingly) disable it after it afterwards.
* It's supposed to give oneshot behavior; cope.
*/
alm.enabled = false;
rtc_set_alarm(rtc, &alm);
}
static int __init has_wakealarm(struct device *dev, const void *data)
{
struct rtc_device *candidate = to_rtc_device(dev);
if (!candidate->ops->set_alarm)
return 0;
if (!device_may_wakeup(candidate->dev.parent))
return 0;
return 1;
}
/*
* Kernel options like "test_suspend=mem" force suspend/resume sanity tests
* at startup time. They're normally disabled, for faster boot and because
* we can't know which states really work on this particular system.
*/
static const char *test_state_label __initdata;
static char warn_bad_state[] __initdata =
KERN_WARNING "PM: can't test '%s' suspend state\n";
static int __init setup_test_suspend(char *value)
{
int i;
char *repeat;
char *suspend_type;
/* example : "=mem[,N]" ==> "mem[,N]" */
value++;
suspend_type = strsep(&value, ",");
if (!suspend_type)
return 0;
repeat = strsep(&value, ",");
if (repeat) {
if (kstrtou32(repeat, 0, &test_repeat_count_max))
return 0;
}
for (i = 0; pm_labels[i]; i++)
if (!strcmp(pm_labels[i], suspend_type)) {
test_state_label = pm_labels[i];
return 0;
}
printk(warn_bad_state, suspend_type);
return 0;
}
__setup("test_suspend", setup_test_suspend);
static int __init test_suspend(void)
{
static char warn_no_rtc[] __initdata =
KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n";
struct rtc_device *rtc = NULL;
struct device *dev;
suspend_state_t test_state;
/* PM is initialized by now; is that state testable? */
if (!test_state_label)
return 0;
for (test_state = PM_SUSPEND_MIN; test_state < PM_SUSPEND_MAX; test_state++) {
const char *state_label = pm_states[test_state];
if (state_label && !strcmp(test_state_label, state_label))
break;
}
if (test_state == PM_SUSPEND_MAX) {
printk(warn_bad_state, test_state_label);
return 0;
}
/* RTCs have initialized by now too ... can we use one? */
dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
if (dev)
rtc = rtc_class_open(dev_name(dev));
if (!rtc) {
printk(warn_no_rtc);
return 0;
}
/* go for it */
test_wakealarm(rtc, test_state);
rtc_class_close(rtc);
return 0;
}
late_initcall(test_suspend);

111
kernel/power/suspend_time.c Normal file
View file

@ -0,0 +1,111 @@
/*
* debugfs file to track time spent in suspend
*
* Copyright (c) 2011, Google, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/syscore_ops.h>
#include <linux/time.h>
static struct timespec suspend_time_before;
static unsigned int time_in_suspend_bins[32];
#ifdef CONFIG_DEBUG_FS
static int suspend_time_debug_show(struct seq_file *s, void *data)
{
int bin;
seq_printf(s, "time (secs) count\n");
seq_printf(s, "------------------\n");
for (bin = 0; bin < 32; bin++) {
if (time_in_suspend_bins[bin] == 0)
continue;
seq_printf(s, "%4d - %4d %4u\n",
bin ? 1 << (bin - 1) : 0, 1 << bin,
time_in_suspend_bins[bin]);
}
return 0;
}
static int suspend_time_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, suspend_time_debug_show, NULL);
}
static const struct file_operations suspend_time_debug_fops = {
.open = suspend_time_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init suspend_time_debug_init(void)
{
struct dentry *d;
d = debugfs_create_file("suspend_time", 0755, NULL, NULL,
&suspend_time_debug_fops);
if (!d) {
pr_err("Failed to create suspend_time debug file\n");
return -ENOMEM;
}
return 0;
}
late_initcall(suspend_time_debug_init);
#endif
static int suspend_time_syscore_suspend(void)
{
read_persistent_clock(&suspend_time_before);
return 0;
}
static void suspend_time_syscore_resume(void)
{
struct timespec after;
read_persistent_clock(&after);
after = timespec_sub(after, suspend_time_before);
time_in_suspend_bins[fls(after.tv_sec)]++;
pr_info("Suspended for %lu.%03lu seconds\n", after.tv_sec,
after.tv_nsec / NSEC_PER_MSEC);
}
static struct syscore_ops suspend_time_syscore_ops = {
.suspend = suspend_time_syscore_suspend,
.resume = suspend_time_syscore_resume,
};
static int suspend_time_syscore_init(void)
{
register_syscore_ops(&suspend_time_syscore_ops);
return 0;
}
static void suspend_time_syscore_exit(void)
{
unregister_syscore_ops(&suspend_time_syscore_ops);
}
module_init(suspend_time_syscore_init);
module_exit(suspend_time_syscore_exit);

1511
kernel/power/swap.c Normal file

File diff suppressed because it is too large Load diff

478
kernel/power/user.c Normal file
View file

@ -0,0 +1,478 @@
/*
* linux/kernel/power/user.c
*
* This file provides the user space interface for software suspend/resume.
*
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
*
* This file is released under the GPLv2.
*
*/
#include <linux/suspend.h>
#include <linux/syscalls.h>
#include <linux/reboot.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pm.h>
#include <linux/fs.h>
#include <linux/compat.h>
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/freezer.h>
#include <asm/uaccess.h>
#include "power.h"
#define SNAPSHOT_MINOR 231
static struct snapshot_data {
struct snapshot_handle handle;
int swap;
int mode;
bool frozen;
bool ready;
bool platform_support;
bool free_bitmaps;
} snapshot_state;
atomic_t snapshot_device_available = ATOMIC_INIT(1);
static int snapshot_open(struct inode *inode, struct file *filp)
{
struct snapshot_data *data;
int error;
if (!hibernation_available())
return -EPERM;
lock_system_sleep();
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
error = -EBUSY;
goto Unlock;
}
if ((filp->f_flags & O_ACCMODE) == O_RDWR) {
atomic_inc(&snapshot_device_available);
error = -ENOSYS;
goto Unlock;
}
nonseekable_open(inode, filp);
data = &snapshot_state;
filp->private_data = data;
memset(&data->handle, 0, sizeof(struct snapshot_handle));
if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
/* Hibernating. The image device should be accessible. */
data->swap = swsusp_resume_device ?
swap_type_of(swsusp_resume_device, 0, NULL) : -1;
data->mode = O_RDONLY;
data->free_bitmaps = false;
error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
if (error)
pm_notifier_call_chain(PM_POST_HIBERNATION);
} else {
/*
* Resuming. We may need to wait for the image device to
* appear.
*/
wait_for_device_probe();
data->swap = -1;
data->mode = O_WRONLY;
error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
if (!error) {
error = create_basic_memory_bitmaps();
data->free_bitmaps = !error;
}
if (error)
pm_notifier_call_chain(PM_POST_RESTORE);
}
if (error)
atomic_inc(&snapshot_device_available);
data->frozen = false;
data->ready = false;
data->platform_support = false;
Unlock:
unlock_system_sleep();
return error;
}
static int snapshot_release(struct inode *inode, struct file *filp)
{
struct snapshot_data *data;
lock_system_sleep();
swsusp_free();
data = filp->private_data;
free_all_swap_pages(data->swap);
if (data->frozen) {
pm_restore_gfp_mask();
free_basic_memory_bitmaps();
thaw_processes();
} else if (data->free_bitmaps) {
free_basic_memory_bitmaps();
}
pm_notifier_call_chain(data->mode == O_RDONLY ?
PM_POST_HIBERNATION : PM_POST_RESTORE);
atomic_inc(&snapshot_device_available);
unlock_system_sleep();
return 0;
}
static ssize_t snapshot_read(struct file *filp, char __user *buf,
size_t count, loff_t *offp)
{
struct snapshot_data *data;
ssize_t res;
loff_t pg_offp = *offp & ~PAGE_MASK;
lock_system_sleep();
data = filp->private_data;
if (!data->ready) {
res = -ENODATA;
goto Unlock;
}
if (!pg_offp) { /* on page boundary? */
res = snapshot_read_next(&data->handle);
if (res <= 0)
goto Unlock;
} else {
res = PAGE_SIZE - pg_offp;
}
res = simple_read_from_buffer(buf, count, &pg_offp,
data_of(data->handle), res);
if (res > 0)
*offp += res;
Unlock:
unlock_system_sleep();
return res;
}
static ssize_t snapshot_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offp)
{
struct snapshot_data *data;
ssize_t res;
loff_t pg_offp = *offp & ~PAGE_MASK;
lock_system_sleep();
data = filp->private_data;
if (!pg_offp) {
res = snapshot_write_next(&data->handle);
if (res <= 0)
goto unlock;
} else {
res = PAGE_SIZE - pg_offp;
}
res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp,
buf, count);
if (res > 0)
*offp += res;
unlock:
unlock_system_sleep();
return res;
}
static long snapshot_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int error = 0;
struct snapshot_data *data;
loff_t size;
sector_t offset;
if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
return -ENOTTY;
if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR)
return -ENOTTY;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
lock_device_hotplug();
data = filp->private_data;
switch (cmd) {
case SNAPSHOT_FREEZE:
if (data->frozen)
break;
printk("Syncing filesystems ... ");
sys_sync();
printk("done.\n");
error = freeze_processes();
if (error)
break;
error = create_basic_memory_bitmaps();
if (error)
thaw_processes();
else
data->frozen = true;
break;
case SNAPSHOT_UNFREEZE:
if (!data->frozen || data->ready)
break;
pm_restore_gfp_mask();
free_basic_memory_bitmaps();
data->free_bitmaps = false;
thaw_processes();
data->frozen = false;
break;
case SNAPSHOT_CREATE_IMAGE:
if (data->mode != O_RDONLY || !data->frozen || data->ready) {
error = -EPERM;
break;
}
pm_restore_gfp_mask();
error = hibernation_snapshot(data->platform_support);
if (!error) {
error = put_user(in_suspend, (int __user *)arg);
data->ready = !freezer_test_done && !error;
freezer_test_done = false;
}
break;
case SNAPSHOT_ATOMIC_RESTORE:
snapshot_write_finalize(&data->handle);
if (data->mode != O_WRONLY || !data->frozen ||
!snapshot_image_loaded(&data->handle)) {
error = -EPERM;
break;
}
error = hibernation_restore(data->platform_support);
break;
case SNAPSHOT_FREE:
swsusp_free();
memset(&data->handle, 0, sizeof(struct snapshot_handle));
data->ready = false;
/*
* It is necessary to thaw kernel threads here, because
* SNAPSHOT_CREATE_IMAGE may be invoked directly after
* SNAPSHOT_FREE. In that case, if kernel threads were not
* thawed, the preallocation of memory carried out by
* hibernation_snapshot() might run into problems (i.e. it
* might fail or even deadlock).
*/
thaw_kernel_threads();
break;
case SNAPSHOT_PREF_IMAGE_SIZE:
image_size = arg;
break;
case SNAPSHOT_GET_IMAGE_SIZE:
if (!data->ready) {
error = -ENODATA;
break;
}
size = snapshot_get_image_size();
size <<= PAGE_SHIFT;
error = put_user(size, (loff_t __user *)arg);
break;
case SNAPSHOT_AVAIL_SWAP_SIZE:
size = count_swap_pages(data->swap, 1);
size <<= PAGE_SHIFT;
error = put_user(size, (loff_t __user *)arg);
break;
case SNAPSHOT_ALLOC_SWAP_PAGE:
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
error = -ENODEV;
break;
}
offset = alloc_swapdev_block(data->swap);
if (offset) {
offset <<= PAGE_SHIFT;
error = put_user(offset, (loff_t __user *)arg);
} else {
error = -ENOSPC;
}
break;
case SNAPSHOT_FREE_SWAP_PAGES:
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
error = -ENODEV;
break;
}
free_all_swap_pages(data->swap);
break;
case SNAPSHOT_S2RAM:
if (!data->frozen) {
error = -EPERM;
break;
}
/*
* Tasks are frozen and the notifiers have been called with
* PM_HIBERNATION_PREPARE
*/
error = suspend_devices_and_enter(PM_SUSPEND_MEM);
data->ready = false;
break;
case SNAPSHOT_PLATFORM_SUPPORT:
data->platform_support = !!arg;
break;
case SNAPSHOT_POWER_OFF:
if (data->platform_support)
error = hibernation_platform_enter();
break;
case SNAPSHOT_SET_SWAP_AREA:
if (swsusp_swap_in_use()) {
error = -EPERM;
} else {
struct resume_swap_area swap_area;
dev_t swdev;
error = copy_from_user(&swap_area, (void __user *)arg,
sizeof(struct resume_swap_area));
if (error) {
error = -EFAULT;
break;
}
/*
* User space encodes device types as two-byte values,
* so we need to recode them
*/
swdev = new_decode_dev(swap_area.dev);
if (swdev) {
offset = swap_area.offset;
data->swap = swap_type_of(swdev, offset, NULL);
if (data->swap < 0)
error = -ENODEV;
} else {
data->swap = -1;
error = -EINVAL;
}
}
break;
default:
error = -ENOTTY;
}
unlock_device_hotplug();
mutex_unlock(&pm_mutex);
return error;
}
#ifdef CONFIG_COMPAT
struct compat_resume_swap_area {
compat_loff_t offset;
u32 dev;
} __packed;
static long
snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t));
switch (cmd) {
case SNAPSHOT_GET_IMAGE_SIZE:
case SNAPSHOT_AVAIL_SWAP_SIZE:
case SNAPSHOT_ALLOC_SWAP_PAGE: {
compat_loff_t __user *uoffset = compat_ptr(arg);
loff_t offset;
mm_segment_t old_fs;
int err;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = snapshot_ioctl(file, cmd, (unsigned long) &offset);
set_fs(old_fs);
if (!err && put_user(offset, uoffset))
err = -EFAULT;
return err;
}
case SNAPSHOT_CREATE_IMAGE:
return snapshot_ioctl(file, cmd,
(unsigned long) compat_ptr(arg));
case SNAPSHOT_SET_SWAP_AREA: {
struct compat_resume_swap_area __user *u_swap_area =
compat_ptr(arg);
struct resume_swap_area swap_area;
mm_segment_t old_fs;
int err;
err = get_user(swap_area.offset, &u_swap_area->offset);
err |= get_user(swap_area.dev, &u_swap_area->dev);
if (err)
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = snapshot_ioctl(file, SNAPSHOT_SET_SWAP_AREA,
(unsigned long) &swap_area);
set_fs(old_fs);
return err;
}
default:
return snapshot_ioctl(file, cmd, arg);
}
}
#endif /* CONFIG_COMPAT */
static const struct file_operations snapshot_fops = {
.open = snapshot_open,
.release = snapshot_release,
.read = snapshot_read,
.write = snapshot_write,
.llseek = no_llseek,
.unlocked_ioctl = snapshot_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = snapshot_compat_ioctl,
#endif
};
static struct miscdevice snapshot_device = {
.minor = SNAPSHOT_MINOR,
.name = "snapshot",
.fops = &snapshot_fops,
};
static int __init snapshot_device_init(void)
{
return misc_register(&snapshot_device);
};
device_initcall(snapshot_device_init);

262
kernel/power/wakelock.c Normal file
View file

@ -0,0 +1,262 @@
/*
* kernel/power/wakelock.c
*
* User space wakeup sources support.
*
* Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl>
*
* This code is based on the analogous interface allowing user space to
* manipulate wakelocks on Android.
*/
#include <linux/capability.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include "power.h"
static DEFINE_MUTEX(wakelocks_lock);
struct wakelock {
char *name;
struct rb_node node;
struct wakeup_source ws;
#ifdef CONFIG_PM_WAKELOCKS_GC
struct list_head lru;
#endif
};
static struct rb_root wakelocks_tree = RB_ROOT;
ssize_t pm_show_wakelocks(char *buf, bool show_active)
{
struct rb_node *node;
struct wakelock *wl;
char *str = buf;
char *end = buf + PAGE_SIZE;
mutex_lock(&wakelocks_lock);
for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
wl = rb_entry(node, struct wakelock, node);
if (wl->ws.active == show_active)
str += scnprintf(str, end - str, "%s ", wl->name);
}
if (str > buf)
str--;
str += scnprintf(str, end - str, "\n");
mutex_unlock(&wakelocks_lock);
return (str - buf);
}
#if CONFIG_PM_WAKELOCKS_LIMIT > 0
static unsigned int number_of_wakelocks;
static inline bool wakelocks_limit_exceeded(void)
{
return number_of_wakelocks > CONFIG_PM_WAKELOCKS_LIMIT;
}
static inline void increment_wakelocks_number(void)
{
number_of_wakelocks++;
}
static inline void decrement_wakelocks_number(void)
{
number_of_wakelocks--;
}
#else /* CONFIG_PM_WAKELOCKS_LIMIT = 0 */
static inline bool wakelocks_limit_exceeded(void) { return false; }
static inline void increment_wakelocks_number(void) {}
static inline void decrement_wakelocks_number(void) {}
#endif /* CONFIG_PM_WAKELOCKS_LIMIT */
#ifdef CONFIG_PM_WAKELOCKS_GC
#define WL_GC_COUNT_MAX 100
#define WL_GC_TIME_SEC 300
static LIST_HEAD(wakelocks_lru_list);
static unsigned int wakelocks_gc_count;
static inline void wakelocks_lru_add(struct wakelock *wl)
{
list_add(&wl->lru, &wakelocks_lru_list);
}
static inline void wakelocks_lru_most_recent(struct wakelock *wl)
{
list_move(&wl->lru, &wakelocks_lru_list);
}
static void wakelocks_gc(void)
{
struct wakelock *wl, *aux;
ktime_t now;
if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
return;
now = ktime_get();
list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
u64 idle_time_ns;
bool active;
spin_lock_irq(&wl->ws.lock);
idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws.last_time));
active = wl->ws.active;
spin_unlock_irq(&wl->ws.lock);
if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC))
break;
if (!active) {
wakeup_source_remove(&wl->ws);
rb_erase(&wl->node, &wakelocks_tree);
list_del(&wl->lru);
kfree(wl->name);
kfree(wl);
decrement_wakelocks_number();
}
}
wakelocks_gc_count = 0;
}
#else /* !CONFIG_PM_WAKELOCKS_GC */
static inline void wakelocks_lru_add(struct wakelock *wl) {}
static inline void wakelocks_lru_most_recent(struct wakelock *wl) {}
static inline void wakelocks_gc(void) {}
#endif /* !CONFIG_PM_WAKELOCKS_GC */
static struct wakelock *wakelock_lookup_add(const char *name, size_t len,
bool add_if_not_found)
{
struct rb_node **node = &wakelocks_tree.rb_node;
struct rb_node *parent = *node;
struct wakelock *wl;
while (*node) {
int diff;
parent = *node;
wl = rb_entry(*node, struct wakelock, node);
diff = strncmp(name, wl->name, len);
if (diff == 0) {
if (wl->name[len])
diff = -1;
else
return wl;
}
if (diff < 0)
node = &(*node)->rb_left;
else
node = &(*node)->rb_right;
}
if (!add_if_not_found)
return ERR_PTR(-EINVAL);
if (wakelocks_limit_exceeded())
return ERR_PTR(-ENOSPC);
/* Not found, we have to add a new one. */
wl = kzalloc(sizeof(*wl), GFP_KERNEL);
if (!wl)
return ERR_PTR(-ENOMEM);
wl->name = kstrndup(name, len, GFP_KERNEL);
if (!wl->name) {
kfree(wl);
return ERR_PTR(-ENOMEM);
}
wl->ws.name = wl->name;
wakeup_source_add(&wl->ws);
rb_link_node(&wl->node, parent, node);
rb_insert_color(&wl->node, &wakelocks_tree);
wakelocks_lru_add(wl);
increment_wakelocks_number();
return wl;
}
int pm_wake_lock(const char *buf)
{
const char *str = buf;
struct wakelock *wl;
u64 timeout_ns = 0;
size_t len;
int ret = 0;
while (*str && !isspace(*str))
str++;
len = str - buf;
if (!len)
return -EINVAL;
if (*str && *str != '\n') {
/* Find out if there's a valid timeout string appended. */
ret = kstrtou64(skip_spaces(str), 10, &timeout_ns);
if (ret)
return -EINVAL;
}
mutex_lock(&wakelocks_lock);
wl = wakelock_lookup_add(buf, len, true);
if (IS_ERR(wl)) {
ret = PTR_ERR(wl);
goto out;
}
if (timeout_ns) {
u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1;
do_div(timeout_ms, NSEC_PER_MSEC);
__pm_wakeup_event(&wl->ws, timeout_ms);
} else {
__pm_stay_awake(&wl->ws);
}
wakelocks_lru_most_recent(wl);
out:
mutex_unlock(&wakelocks_lock);
return ret;
}
int pm_wake_unlock(const char *buf)
{
struct wakelock *wl;
size_t len;
int ret = 0;
len = strlen(buf);
if (!len)
return -EINVAL;
if (buf[len-1] == '\n')
len--;
if (!len)
return -EINVAL;
mutex_lock(&wakelocks_lock);
wl = wakelock_lookup_add(buf, len, false);
if (IS_ERR(wl)) {
ret = PTR_ERR(wl);
goto out;
}
__pm_relax(&wl->ws);
wakelocks_lru_most_recent(wl);
wakelocks_gc();
out:
mutex_unlock(&wakelocks_lock);
return ret;
}

View file

@ -0,0 +1,291 @@
/*
* kernel/power/wakeup_reason.c
*
* Logs the reasons which caused the kernel to resume from
* the suspend mode.
*
* Copyright (C) 2014 Google, Inc.
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/wakeup_reason.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <linux/suspend.h>
#include <linux/debugfs.h>
#define MAX_WAKEUP_REASON_IRQS 32
static int irq_list[MAX_WAKEUP_REASON_IRQS];
static int irqcount;
static bool suspend_abort;
static char abort_reason[MAX_SUSPEND_ABORT_LEN];
static struct kobject *wakeup_reason;
static DEFINE_SPINLOCK(resume_reason_lock);
static ktime_t last_monotime; /* monotonic time before last suspend */
static ktime_t curr_monotime; /* monotonic time after last suspend */
static ktime_t last_stime; /* monotonic boottime offset before last suspend */
static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int irq_no, buf_offset = 0;
struct irq_desc *desc;
spin_lock(&resume_reason_lock);
if (suspend_abort) {
buf_offset = sprintf(buf, "Abort: %s", abort_reason);
} else {
for (irq_no = 0; irq_no < irqcount; irq_no++) {
desc = irq_to_desc(irq_list[irq_no]);
if (desc && desc->action && desc->action->name)
buf_offset += sprintf(buf + buf_offset, "%d %s\n",
irq_list[irq_no], desc->action->name);
else
buf_offset += sprintf(buf + buf_offset, "%d\n",
irq_list[irq_no]);
}
}
spin_unlock(&resume_reason_lock);
return buf_offset;
}
static ssize_t last_suspend_time_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct timespec sleep_time;
struct timespec total_time;
struct timespec suspend_resume_time;
/*
* total_time is calculated from monotonic bootoffsets because
* unlike CLOCK_MONOTONIC it include the time spent in suspend state.
*/
total_time = ktime_to_timespec(ktime_sub(curr_stime, last_stime));
/*
* suspend_resume_time is calculated as monotonic (CLOCK_MONOTONIC)
* time interval before entering suspend and post suspend.
*/
suspend_resume_time = ktime_to_timespec(ktime_sub(curr_monotime, last_monotime));
/* sleep_time = total_time - suspend_resume_time */
sleep_time = timespec_sub(total_time, suspend_resume_time);
/* Export suspend_resume_time and sleep_time in pair here. */
return sprintf(buf, "%lu.%09lu %lu.%09lu\n",
suspend_resume_time.tv_sec, suspend_resume_time.tv_nsec,
sleep_time.tv_sec, sleep_time.tv_nsec);
}
static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
static struct kobj_attribute suspend_time = __ATTR_RO(last_suspend_time);
static struct attribute *attrs[] = {
&resume_reason.attr,
&suspend_time.attr,
NULL,
};
static struct attribute_group attr_group = {
.attrs = attrs,
};
/*
* logs all the wake up reasons to the kernel
* stores the irqs to expose them to the userspace via sysfs
*/
void log_wakeup_reason(int irq)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
if (desc && desc->action && desc->action->name)
printk(KERN_INFO "Resume caused by IRQ %d, %s\n", irq,
desc->action->name);
else
printk(KERN_INFO "Resume caused by IRQ %d\n", irq);
spin_lock(&resume_reason_lock);
if (irqcount == MAX_WAKEUP_REASON_IRQS) {
spin_unlock(&resume_reason_lock);
printk(KERN_WARNING "Resume caused by more than %d IRQs\n",
MAX_WAKEUP_REASON_IRQS);
return;
}
irq_list[irqcount++] = irq;
spin_unlock(&resume_reason_lock);
}
int check_wakeup_reason(int irq)
{
int irq_no;
int ret = false;
spin_lock(&resume_reason_lock);
for (irq_no = 0; irq_no < irqcount; irq_no++)
if (irq_list[irq_no] == irq) {
ret = true;
break;
}
spin_unlock(&resume_reason_lock);
return ret;
}
void log_suspend_abort_reason(const char *fmt, ...)
{
va_list args;
spin_lock(&resume_reason_lock);
//Suspend abort reason has already been logged.
if (suspend_abort) {
spin_unlock(&resume_reason_lock);
return;
}
suspend_abort = true;
va_start(args, fmt);
vsnprintf(abort_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
va_end(args);
spin_unlock(&resume_reason_lock);
}
/* Detects a suspend and clears all the previous wake up reasons*/
static int wakeup_reason_pm_event(struct notifier_block *notifier,
unsigned long pm_event, void *unused)
{
switch (pm_event) {
case PM_SUSPEND_PREPARE:
spin_lock(&resume_reason_lock);
irqcount = 0;
suspend_abort = false;
spin_unlock(&resume_reason_lock);
/* monotonic time since boot */
last_monotime = ktime_get();
/* monotonic time since boot including the time spent in suspend */
last_stime = ktime_get_boottime();
break;
case PM_POST_SUSPEND:
/* monotonic time since boot */
curr_monotime = ktime_get();
/* monotonic time since boot including the time spent in suspend */
curr_stime = ktime_get_boottime();
break;
default:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block wakeup_reason_pm_notifier_block = {
.notifier_call = wakeup_reason_pm_event,
};
/* Initializes the sysfs parameter
* registers the pm_event notifier
*/
int __init wakeup_reason_init(void)
{
int retval;
retval = register_pm_notifier(&wakeup_reason_pm_notifier_block);
if (retval)
printk(KERN_WARNING "[%s] failed to register PM notifier %d\n",
__func__, retval);
wakeup_reason = kobject_create_and_add("wakeup_reasons", kernel_kobj);
if (!wakeup_reason) {
printk(KERN_WARNING "[%s] failed to create a sysfs kobject\n",
__func__);
return 1;
}
retval = sysfs_create_group(wakeup_reason, &attr_group);
if (retval) {
kobject_put(wakeup_reason);
printk(KERN_WARNING "[%s] failed to create a sysfs group %d\n",
__func__, retval);
}
return 0;
}
late_initcall(wakeup_reason_init);
#ifdef CONFIG_ARCH_EXYNOS
#define NR_EINT 32
struct wakeup_reason_stats {
int irq;
unsigned int wakeup_count;
};
static struct wakeup_reason_stats wakeup_reason_stats[NR_EINT] = {{0,},};
void update_wakeup_reason_stats(int irq, int eint)
{
if (eint >= NR_EINT) {
pr_info("%s : can't update wakeup reason stat infomation\n", __func__);
return;
}
wakeup_reason_stats[eint].irq = irq;
wakeup_reason_stats[eint].wakeup_count++;
}
#ifdef CONFIG_DEBUG_FS
static int wakeup_reason_stats_show(struct seq_file *s, void *unused)
{
int i;
seq_puts(s, "eint_no\tirq\twakeup_count\tname\n");
for (i = 0; i < NR_EINT; i++) {
struct irq_desc *desc = irq_to_desc(wakeup_reason_stats[i].irq);
const char *irq_name = NULL;
if (!wakeup_reason_stats[i].irq)
continue;
if (desc && desc->action && desc->action->name)
irq_name = desc->action->name;
seq_printf(s, "%d\t%d\t%u\t\t%s\n", i,
wakeup_reason_stats[i].irq,
wakeup_reason_stats[i].wakeup_count, irq_name);
}
return 0;
}
static int wakeup_reason_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, wakeup_reason_stats_show, NULL);
}
static const struct file_operations wakeup_reason_stats_ops = {
.open = wakeup_reason_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init wakeup_reason_debugfs_init(void)
{
debugfs_create_file("wakeup_reason_stats", S_IFREG | S_IRUGO,
NULL, NULL, &wakeup_reason_stats_ops);
return 0;
}
late_initcall(wakeup_reason_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_ARCH_EXYNOS */