Hello Markus,
My patch file seems to be in the order of 200-300 kByte. :-)
According to my research, the (changed) files below seem to be
sufficient to run Android on real HW. Could you look through your
patch file and tell me if there are any that I'm missing?:
arch/arm/Kconfig
arch/arm/kernel/process.c
arch/arm/kernel/signal.c
drivers/android/alarm.c
drivers/android/android_gadget.c
drivers/android/android_kernel_debug.c
drivers/android/android_kernel_debug.h
drivers/android/binder.c
drivers/android/Kconfig
drivers/android/logger.c
drivers/android/Makefile
drivers/android/power.c
drivers/android/ram_console.c
drivers/android/timed_gpio.c
drivers/input/evdev.c
drivers/Kconfig
drivers/misc/Kconfig
drivers/misc/lowmemorykiller/lowmemorykiller.c
drivers/misc/lowmemorykiller/Makefile
drivers/misc/Makefile
fs/inotify_user.c
include/linux/android_alarm.h
include/linux/android_gadget.h
include/linux/android_power.h
include/linux/android_timed_gpio.h
include/linux/binder_module.h
include/linux/binder_type_constants.h
include/linux/logger.h
kernel/power/process.c
drivers/Makefile
Does this match up to your patch file?
My patch file is as follows:
======================================================
diff -Naur linux-2.6.25/arch/arm/Kconfig linux-2.6.25-android-0.9_r1/
arch/arm/Kconfig
--- linux-2.6.25/arch/arm/Kconfig 2008-04-17 03:49:44.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/arch/arm/Kconfig 2008-07-24
02:04:04.000000000 +0100
@@ -83,6 +83,10 @@
bool
default y
+config HAVE_LATENCYTOP_SUPPORT
+ bool
+ default y if !SMP
+
config LOCKDEP_SUPPORT
bool
default y
@@ -185,6 +189,13 @@
help
This enables support for systems based on the Agilent AAEC-2000
+config ARCH_GOLDFISH
+ bool "Goldfish"
+ select GENERIC_TIME
+ select GENERIC_CLOCKEVENTS
+ help
+ Support for Goldfish Virtual Platform.
+
config ARCH_INTEGRATOR
bool "ARM Ltd. Integrator family"
select ARM_AMBA
@@ -486,6 +497,8 @@
endchoice
+source "arch/arm/mach-goldfish/Kconfig"
+
source "arch/arm/mach-clps711x/Kconfig"
source "arch/arm/mach-ep93xx/Kconfig"
@@ -1168,10 +1181,14 @@
source "drivers/leds/Kconfig"
+source "drivers/switch/Kconfig"
+
source "drivers/rtc/Kconfig"
source "drivers/dma/Kconfig"
+source "drivers/android/Kconfig"
+
source "drivers/dca/Kconfig"
endmenu
diff -Naur linux-2.6.25/arch/arm/kernel/process.c linux-2.6.25-
android-0.9_r1/arch/arm/kernel/process.c
--- linux-2.6.25/arch/arm/kernel/process.c 2008-04-17
03:49:44.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/arch/arm/kernel/process.c 2008-07-24
02:04:04.000000000 +0100
@@ -367,6 +367,16 @@
EXPORT_SYMBOL(dump_fpu);
/*
+ * Capture the user space registers if the task is not running (in
user space)
+ */
+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
+{
+ struct pt_regs ptregs = *task_pt_regs(tsk);
+ elf_core_copy_regs(regs, &ptregs);
+ return 1;
+}
+
+/*
* Shuffle the argument into the correct register before calling the
* thread function. r1 is the thread argument, r2 is the pointer to
* the thread function, and r3 points to the exit function.
diff -Naur linux-2.6.25/arch/arm/kernel/signal.c linux-2.6.25-
android-0.9_r1/arch/arm/kernel/signal.c
--- linux-2.6.25/arch/arm/kernel/signal.c 2008-04-17
03:49:44.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/arch/arm/kernel/signal.c 2008-07-24
02:04:04.000000000 +0100
@@ -534,6 +534,14 @@
static inline void restart_syscall(struct pt_regs *regs)
{
+ if (regs->ARM_ORIG_r0 == -ERESTARTNOHAND ||
+ regs->ARM_ORIG_r0 == -ERESTARTSYS ||
+ regs->ARM_ORIG_r0 == -ERESTARTNOINTR ||
+ regs->ARM_ORIG_r0 == -ERESTART_RESTARTBLOCK) {
+ /* the syscall cannot be safely restarted, return -EINTR instead */
+ regs->ARM_r0 = -EINTR;
+ return;
+ }
regs->ARM_r0 = regs->ARM_ORIG_r0;
regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
}
@@ -650,6 +658,7 @@
*/
if (syscall) {
if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) {
+ regs->ARM_r0 = -EAGAIN; /* prevent multiple restarts */
if (thumb_mode(regs)) {
regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
regs->ARM_pc -= 2;
diff -Naur linux-2.6.25/drivers/Kconfig linux-2.6.25-android-0.9_r1/
drivers/Kconfig
--- linux-2.6.25/drivers/Kconfig 2008-04-17 03:49:44.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/drivers/Kconfig 2008-07-24
02:04:04.000000000 +0100
@@ -84,6 +84,8 @@
source "drivers/leds/Kconfig"
+source "drivers/switch/Kconfig"
+
source "drivers/infiniband/Kconfig"
source "drivers/edac/Kconfig"
@@ -97,4 +99,7 @@
source "drivers/auxdisplay/Kconfig"
source "drivers/uio/Kconfig"
+
+source "drivers/android/Kconfig"
+
endmenu
diff -Naur linux-2.6.25/drivers/Makefile linux-2.6.25-android-0.9_r1/
drivers/Makefile
--- linux-2.6.25/drivers/Makefile 2008-04-17 03:49:44.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/drivers/Makefile 2008-07-24
02:04:04.000000000 +0100
@@ -80,6 +80,7 @@
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-$(CONFIG_NEW_LEDS) += leds/
+obj-$(CONFIG_SWITCH) += switch/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
@@ -91,5 +92,6 @@
obj-$(CONFIG_HID) += hid/
obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_OF) += of/
+obj-y += android/
obj-$(CONFIG_SSB) += ssb/
obj-$(CONFIG_VIRTIO) += virtio/
diff -Naur linux-2.6.25/drivers/android/Kconfig linux-2.6.25-
android-0.9_r1/drivers/android/Kconfig
--- linux-2.6.25/drivers/android/Kconfig 1970-01-01 01:00:00.000000000
+0100
+++ linux-2.6.25-android-0.9_r1/drivers/android/Kconfig 2008-07-24
02:04:04.000000000 +0100
@@ -0,0 +1,95 @@
+menu "Android"
+
+config ANDROID_RAM_CONSOLE
+ bool "RAM buffer console"
+ default n
+
+config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
+ bool "Enable verbose console messages"
+ default y
+ depends on ANDROID_RAM_CONSOLE
+
+menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ bool "Enable error correction"
+ default n
+ depends on ANDROID_RAM_CONSOLE
+ select REED_SOLOMON
+ select REED_SOLOMON_ENC8
+ select REED_SOLOMON_DEC8
+
+if ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
+ int "Data data size"
+ default 128
+ help
+ Must be a power of 2.
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
+ int "ECC size"
+ default 16
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
+ int "Symbol size"
+ default 8
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
+ hex "Polynomial"
+ default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE =
4)
+ default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE =
5)
+ default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE =
6)
+ default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE =
7)
+ default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE =
8)
+
+endif #ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+
+config ANDROID_RAM_CONSOLE_EARLY_INIT
+ bool "Start ram console early"
+ default n
+ depends on ANDROID_RAM_CONSOLE
+
+config ANDROID_RAM_CONSOLE_EARLY_ADDR
+ hex "RAM console virtual address"
+ default 0
+ depends on ANDROID_RAM_CONSOLE_EARLY_INIT
+
+config ANDROID_RAM_CONSOLE_EARLY_SIZE
+ hex "RAM console buffer size"
+ default 0
+ depends on ANDROID_RAM_CONSOLE_EARLY_INIT
+
+config ANDROID_POWER
+ bool "Android power driver"
+ depends on PM && RTC_CLASS
+ default n
+
+config ANDROID_POWER_ALARM
+ bool "Android alarm driver"
+ depends on ANDROID_POWER
+ default y
+
+config ANDROID_POWER_STAT
+ bool "Android power driver lock stats"
+ depends on ANDROID_POWER
+ default y
+
+config ANDROID_LOGGER
+ bool "Android log driver"
+ default y
+
+config ANDROID_TIMED_GPIO
+ bool "Android timed gpio driver"
+ depends on GENERIC_GPIO
+ default y
+
+config ANDROID_BINDER_IPC
+ tristate "Binder IPC Driver"
+ default y
+
+config ANDROID_PARANOID_NETWORK
+ bool "Only allow certain groups to create sockets"
+ default y
+ help
+ none
+
+endmenu
diff -Naur linux-2.6.25/drivers/android/Makefile linux-2.6.25-
android-0.9_r1/drivers/android/Makefile
--- linux-2.6.25/drivers/android/Makefile 1970-01-01
01:00:00.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/drivers/android/Makefile 2008-07-24
02:04:04.000000000 +0100
@@ -0,0 +1,6 @@
+obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
+obj-$(CONFIG_ANDROID_POWER) += power.o
+obj-$(CONFIG_ANDROID_POWER_ALARM) += alarm.o
+obj-$(CONFIG_ANDROID_LOGGER) += logger.o
+obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
diff -Naur linux-2.6.25/drivers/android/alarm.c linux-2.6.25-
android-0.9_r1/drivers/android/alarm.c
--- linux-2.6.25/drivers/android/alarm.c 1970-01-01 01:00:00.000000000
+0100
+++ linux-2.6.25-android-0.9_r1/drivers/android/alarm.c 2008-07-24
02:04:04.000000000 +0100
@@ -0,0 +1,533 @@
+/* drivers/android/alarm.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General
Public
+ * License version 2, as published by the Free Software Foundation,
and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/mach/time.h>
+#include <linux/android_alarm.h>
+#include <linux/android_power.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+
+#define ANDROID_ALARM_PRINT_ERRORS (1U << 0)
+#define ANDROID_ALARM_PRINT_INIT_STATUS (1U << 1)
+#define ANDROID_ALARM_PRINT_INFO (1U << 2)
+#define ANDROID_ALARM_PRINT_IO (1U << 3)
+#define ANDROID_ALARM_PRINT_INT (1U << 4)
+#define ANDROID_ALARM_PRINT_FLOW (1U << 5)
+
+#if 0
+#define ANDROID_ALARM_DPRINTF_MASK (~0)
+#define ANDROID_ALARM_DPRINTF(debug_level_mask, args...) \
+ do { \
+ if(ANDROID_ALARM_DPRINTF_MASK & debug_level_mask) { \
+ printk(args); \
+ } \
+ } while(0)
+#else
+#define ANDROID_ALARM_DPRINTF(args...)
+#endif
+
+// support old usespace code
+#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) //
set alarm
+#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
+
+static struct rtc_device *alarm_rtc_dev;
+static int alarm_opened;
+static DEFINE_SPINLOCK(alarm_slock);
+static DEFINE_MUTEX(alarm_setrtc_mutex);
+static android_suspend_lock_t alarm_suspend_lock = {
+ .name = "android_alarm"
+};
+static android_suspend_lock_t alarm_rtc_suspend_lock = {
+ .name = "android_alarm_rtc"
+};
+static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
+static uint32_t alarm_pending;
+static uint32_t alarm_enabled;
+static uint32_t wait_pending;
+static struct platform_device *alarm_platform_dev;
+static struct hrtimer alarm_timer[ANDROID_ALARM_TYPE_COUNT];
+static struct timespec alarm_time[ANDROID_ALARM_TYPE_COUNT];
+static struct timespec elapsed_rtc_delta;
+
+static void alarm_start_hrtimer(android_alarm_type_t alarm_type)
+{
+ struct timespec hr_alarm_time;
+ if(!(alarm_enabled & (1U << alarm_type)))
+ return;
+ hr_alarm_time = alarm_time[alarm_type];
+ if(alarm_type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP || alarm_type
== ANDROID_ALARM_ELAPSED_REALTIME)
+ set_normalized_timespec(&hr_alarm_time, hr_alarm_time.tv_sec +
elapsed_rtc_delta.tv_sec,
+ hr_alarm_time.tv_nsec + elapsed_rtc_delta.tv_nsec);
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_FLOW, "alarm start hrtimer
%d at %ld.%09ld\n", alarm_type, hr_alarm_time.tv_sec,
hr_alarm_time.tv_nsec);
+ hrtimer_start(&alarm_timer[alarm_type],
timespec_to_ktime(hr_alarm_time), HRTIMER_MODE_ABS);
+}
+
+static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned
long arg)
+{
+ int rv = 0;
+ unsigned long flags;
+ struct timespec new_alarm_time;
+ struct timespec new_rtc_time;
+ struct timespec tmp_time;
+ struct rtc_time rtc_new_rtc_time;
+ android_alarm_type_t alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd);
+ uint32_t alarm_type_mask = 1U << alarm_type;
+
+ if(alarm_type >= ANDROID_ALARM_TYPE_COUNT)
+ return -EINVAL;
+
+ if(ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) {
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EPERM;
+ if(file->private_data == NULL && cmd != ANDROID_ALARM_SET_RTC) {
+ spin_lock_irqsave(&alarm_slock, flags);
+ if(alarm_opened) {
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return -EBUSY;
+ }
+ alarm_opened = 1;
+ file->private_data = (void *)1;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ }
+ }
+
+ switch(ANDROID_ALARM_BASE_CMD(cmd)) {
+ //case ANDROID_ALARM_CLEAR_OLD: // same as ANDROID_ALARM_CLEAR(0)
+ case ANDROID_ALARM_CLEAR(0):
+ spin_lock_irqsave(&alarm_slock, flags);
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_IO, "alarm %d clear\n",
alarm_type);
+ hrtimer_cancel(&alarm_timer[alarm_type]);
+ if(alarm_pending) {
+ alarm_pending &= ~alarm_type_mask;
+ if(!alarm_pending && !wait_pending) {
+ android_unlock_suspend(&alarm_suspend_lock);
+ }
+ }
+ alarm_enabled &= ~alarm_type_mask;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ break;
+
+ case ANDROID_ALARM_SET_OLD:
+ case ANDROID_ALARM_SET_AND_WAIT_OLD:
+ if(get_user(new_alarm_time.tv_sec, (int __user *)arg)) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ new_alarm_time.tv_nsec = 0;
+ goto from_old_alarm_set;
+
+ case ANDROID_ALARM_SET_AND_WAIT(0):
+ case ANDROID_ALARM_SET(0):
+ if(copy_from_user(&new_alarm_time, (void __user *)arg,
sizeof(new_alarm_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+from_old_alarm_set:
+ spin_lock_irqsave(&alarm_slock, flags);
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_IO, "alarm %d set %ld.
%09ld\n", alarm_type, new_alarm_time.tv_sec, new_alarm_time.tv_nsec);
+ alarm_time[alarm_type] = new_alarm_time;
+ alarm_enabled |= alarm_type_mask;
+ alarm_start_hrtimer(alarm_type);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if(ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0) &&
cmd != ANDROID_ALARM_SET_AND_WAIT_OLD)
+ break;
+ // fall though
+ case ANDROID_ALARM_WAIT:
+ spin_lock_irqsave(&alarm_slock, flags);
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_IO, "alarm wait\n");
+ if(!alarm_pending && wait_pending) {
+ android_unlock_suspend(&alarm_suspend_lock);
+ wait_pending = 0;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
+ if(rv)
+ goto err1;
+ spin_lock_irqsave(&alarm_slock, flags);
+ rv = alarm_pending;
+ wait_pending = 1;
+ alarm_pending = 0;
+ if(rv & (ANDROID_ALARM_RTC_WAKEUP_MASK |
ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)) {
+ android_unlock_suspend(&alarm_rtc_suspend_lock);
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ break;
+ case ANDROID_ALARM_SET_RTC:
+ if(copy_from_user(&new_rtc_time, (void __user *)arg,
sizeof(new_rtc_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ rtc_time_to_tm(new_rtc_time.tv_sec, &rtc_new_rtc_time);
+
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_IO,
+ "set rtc %ld %ld - rtc %02d:%02d:%02d %02d/%02d/%04d\n",
+ new_rtc_time.tv_sec, new_rtc_time.tv_nsec,
+ rtc_new_rtc_time.tm_hour, rtc_new_rtc_time.tm_min,
+ rtc_new_rtc_time.tm_sec, rtc_new_rtc_time.tm_mon + 1,
+ rtc_new_rtc_time.tm_mday, rtc_new_rtc_time.tm_year + 1900);
+
+ mutex_lock(&alarm_setrtc_mutex);
+ spin_lock_irqsave(&alarm_slock, flags);
+ getnstimeofday(&tmp_time);
+ elapsed_rtc_delta = timespec_sub(elapsed_rtc_delta,
timespec_sub(tmp_time, new_rtc_time));
+ rv = do_settimeofday(&new_rtc_time);
+ if(rv >= 0) {
+ alarm_start_hrtimer(ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP); //
restart with new offset
+ alarm_start_hrtimer(ANDROID_ALARM_ELAPSED_REALTIME);
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if(rv < 0) {
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_ERRORS, "Failed to set
time\n");
+ mutex_unlock(&alarm_setrtc_mutex);
+ goto err1;
+ }
+ rv = rtc_set_time(alarm_rtc_dev, &rtc_new_rtc_time);
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
+ wake_up(&alarm_wait_queue);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ mutex_unlock(&alarm_setrtc_mutex);
+ if(rv < 0) {
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_ERRORS, "Failed to set
RTC, time will be lost on reboot\n");
+ goto err1;
+ }
+ break;
+ case ANDROID_ALARM_GET_TIME(0):
+ spin_lock_irqsave(&alarm_slock, flags);
+ if(alarm_type != ANDROID_ALARM_SYSTEMTIME) {
+ getnstimeofday(&tmp_time);
+ if(alarm_type >= ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP) {
+ tmp_time = timespec_sub(tmp_time, elapsed_rtc_delta);
+ }
+ }
+ else
+ ktime_get_ts(&tmp_time);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if(copy_to_user((void __user *)arg, &tmp_time, sizeof(tmp_time)))
{
+ rv = -EFAULT;
+ goto err1;
+ }
+ break;
+
+ default:
+ rv = -EINVAL;
+ goto err1;
+ }
+err1:
+ return rv;
+}
+
+static int alarm_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return 0;
+}
+
+static int alarm_release(struct inode *inode, struct file *file)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ if(file->private_data != 0) {
+ for(i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
+ uint32_t alarm_type_mask = 1U << i;
+ if(alarm_enabled & alarm_type_mask) {
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INFO, "alarm_release:
clear alarm, pending %d\n", !!(alarm_pending & alarm_type_mask));
+ hrtimer_cancel(&alarm_timer[i]);
+ alarm_enabled &= ~alarm_type_mask;
+ }
+ }
+ if(alarm_pending | wait_pending) {
+ if(alarm_pending)
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INFO, "alarm_release:
clear pending alarms %x\n", alarm_pending);
+ android_unlock_suspend(&alarm_suspend_lock);
+ wait_pending = 0;
+ alarm_pending = 0;
+ }
+ alarm_opened = 0;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return 0;
+}
+
+static enum hrtimer_restart alarm_timer_triggered(struct hrtimer
*timer)
+{
+ unsigned long flags;
+ android_alarm_type_t alarm_type = (timer - alarm_timer);
+ uint32_t alarm_type_mask = 1U << alarm_type;
+
+
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INT,
"alarm_timer_triggered type %d\n", alarm_type);
+ spin_lock_irqsave(&alarm_slock, flags);
+ android_lock_suspend_auto_expire(&alarm_suspend_lock, 5 * HZ);
+ alarm_enabled &= ~alarm_type_mask;
+ alarm_pending |= alarm_type_mask;
+ wake_up(&alarm_wait_queue);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return HRTIMER_NORESTART;
+}
+
+static void alarm_triggered_func(void *p)
+{
+// unsigned long flags;
+
+ struct rtc_device *rtc = alarm_rtc_dev;
+ if(rtc->irq_data & RTC_AF) {
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INT,
"alarm_triggered_func:\n");
+ android_lock_suspend_auto_expire(&alarm_rtc_suspend_lock, 1 * HZ);
+ }
+}
+
+int alarm_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int err = 0;
+ unsigned long flags;
+ struct rtc_wkalrm rtc_alarm;
+ struct rtc_time rtc_current_rtc_time;
+ unsigned long rtc_current_time;
+ unsigned long rtc_alarm_time;
+ struct timespec rtc_current_timespec;
+ struct timespec rtc_delta;
+ struct timespec elapsed_realtime_alarm_time;
+
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_FLOW, "alarm_suspend(%p,
%d)\n", pdev, state.event);
+ spin_lock_irqsave(&alarm_slock, flags);
+ if(alarm_pending && (alarm_suspend_lock.flags &
ANDROID_SUSPEND_LOCK_AUTO_EXPIRE)) {
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INFO, "alarm pending\n");
+ err = -EBUSY;
+ goto err1;
+ }
+ if(alarm_enabled & (ANDROID_ALARM_RTC_WAKEUP_MASK |
ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)) {
+ if(alarm_enabled & ANDROID_ALARM_RTC_WAKEUP_MASK)
+ hrtimer_cancel(&alarm_timer[ANDROID_ALARM_RTC_WAKEUP]);
+ if(alarm_enabled & ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
hrtimer_cancel(&alarm_timer[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP]);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+ rtc_current_timespec.tv_nsec = 0;
+ rtc_tm_to_time(&rtc_current_rtc_time,
&rtc_current_timespec.tv_sec);
+ save_time_delta(&rtc_delta, &rtc_current_timespec);
+ set_normalized_timespec(&elapsed_realtime_alarm_time,
+ alarm_time[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].tv_sec +
elapsed_rtc_delta.tv_sec,
+ alarm_time[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].tv_nsec +
elapsed_rtc_delta.tv_nsec);
+ if((alarm_enabled & ANDROID_ALARM_RTC_WAKEUP_MASK) &&
+ (!(alarm_enabled & ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+ || timespec_compare(&alarm_time[ANDROID_ALARM_RTC_WAKEUP],
&elapsed_realtime_alarm_time) < 0))
+ rtc_alarm_time =
timespec_sub(alarm_time[ANDROID_ALARM_RTC_WAKEUP], rtc_delta).tv_sec;
+ else {
+ rtc_alarm_time = timespec_sub(elapsed_realtime_alarm_time,
rtc_delta).tv_sec;
+ }
+ rtc_time_to_tm(rtc_alarm_time, &rtc_alarm.time);
+ rtc_alarm.enabled = 1;
+ rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+ rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+ rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INFO,
+ "rtc alarm set at %ld, now %ld, rtc delta %ld.%09ld\n",
+ rtc_alarm_time, rtc_current_time,
+ rtc_delta.tv_sec, rtc_delta.tv_nsec);
+ if(rtc_current_time + 1 >= rtc_alarm_time) {
+ //spin_lock_irqsave(&alarm_slock, flags);
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INFO, "alarm about to go
off\n");
+ memset(&rtc_alarm, 0, sizeof(rtc_alarm));
+ rtc_alarm.enabled = 0;
+ rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ android_lock_suspend_auto_expire(&alarm_rtc_suspend_lock, 2 *
HZ); // trigger a wakeup
+ alarm_start_hrtimer(ANDROID_ALARM_RTC_WAKEUP);
+ alarm_start_hrtimer(ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP);
+ err = -EBUSY;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ }
+ }
+ else {
+err1:
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ }
+ return err;
+}
+
+int alarm_resume(struct platform_device *pdev)
+{
+ struct rtc_wkalrm alarm;
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_FLOW, "alarm_resume(%p)
\n", pdev);
+ if(alarm_enabled & (ANDROID_ALARM_RTC_WAKEUP_MASK |
ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)) {
+ memset(&alarm, 0, sizeof(alarm));
+ alarm.enabled = 0;
+ rtc_set_alarm(alarm_rtc_dev, &alarm);
+ alarm_start_hrtimer(ANDROID_ALARM_RTC_WAKEUP);
+ alarm_start_hrtimer(ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP);
+ }
+ return 0;
+}
+
+static struct rtc_task alarm_rtc_task = {
+ .func = alarm_triggered_func
+};
+
+static struct file_operations alarm_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = alarm_ioctl,
+ .open = alarm_open,
+ .release = alarm_release,
+};
+
+static struct miscdevice alarm_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "alarm",
+ .fops = &alarm_fops,
+};
+
+static int rtc_alarm_add_device(struct device *dev,
+ struct class_interface *class_intf)
+{
+ int err;
+ struct rtc_device *rtc = to_rtc_device(dev);
+
+ mutex_lock(&alarm_setrtc_mutex);
+
+ if(alarm_rtc_dev) {
+ err = -EBUSY;
+ goto err1;
+ }
+
+ err = misc_register(&alarm_device);
+ if(err)
+ goto err1;
+ alarm_platform_dev = platform_device_register_simple("alarm", -1,
NULL, 0);
+ if(IS_ERR(alarm_platform_dev)) {
+ err = PTR_ERR(alarm_platform_dev);
+ goto err2;
+ }
+ err = rtc_irq_register(rtc, &alarm_rtc_task);
+ if(err)
+ goto err3;
+ alarm_rtc_dev = rtc;
+ mutex_unlock(&alarm_setrtc_mutex);
+
+ //device_pm_set_parent(&alarm_platform_dev->dev, dev); // currently
useless, drivers are suspended in reverse creation order
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INFO, "alarm: parent %p
\n", alarm_platform_dev->dev.power.pm_parent);
+ return 0;
+
+err3:
+ platform_device_unregister(alarm_platform_dev);
+err2:
+ misc_deregister(&alarm_device);
+err1:
+ mutex_unlock(&alarm_setrtc_mutex);
+ return err;
+}
+
+static void rtc_alarm_remove_device(struct device *dev,
+ struct class_interface
*class_intf)
+{
+ if(dev == &alarm_rtc_dev->dev) {
+ rtc_irq_unregister(alarm_rtc_dev, &alarm_rtc_task);
+ platform_device_unregister(alarm_platform_dev);
+ misc_deregister(&alarm_device);
+ alarm_rtc_dev = NULL;
+ }
+}
+
+static struct class_interface rtc_alarm_interface = {
+ .add_dev = &rtc_alarm_add_device,
+ .remove_dev = &rtc_alarm_remove_device,
+};
+
+static struct platform_driver alarm_driver = {
+ .suspend = alarm_suspend,
+ .resume = alarm_resume,
+ .driver = {
+ .name = "alarm"
+ }
+};
+
+static int __init alarm_late_init(void)
+{
+ unsigned long flags;
+ struct timespec system_time;
+
+ // this needs to run after the rtc is read at boot
+ spin_lock_irqsave(&alarm_slock, flags);
+ // We read the current rtc and system time so we can later calulate
+ // elasped realtime to be (boot_systemtime + rtc - boot_rtc) ==
+ // (rtc - (boot_rtc - boot_systemtime))
+ getnstimeofday(&elapsed_rtc_delta);
+ ktime_get_ts(&system_time);
+ elapsed_rtc_delta = timespec_sub(elapsed_rtc_delta, system_time);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INFO,
+ "alarm_late_init: rtc to elapsed realtime delta %ld.%09ld\n",
+ elapsed_rtc_delta.tv_sec, elapsed_rtc_delta.tv_nsec);
+ return 0;
+}
+
+static int __init alarm_init(void)
+{
+ int err;
+ int i;
+
+ for(i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ hrtimer_init(&alarm_timer[i], CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ alarm_timer[i].function = alarm_timer_triggered;
+ }
+ hrtimer_init(&alarm_timer[ANDROID_ALARM_SYSTEMTIME],
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ alarm_timer[ANDROID_ALARM_SYSTEMTIME].function =
alarm_timer_triggered;
+ err = platform_driver_register(&alarm_driver);
+ if(err < 0)
+ goto err1;
+ err = android_init_suspend_lock(&alarm_suspend_lock);
+ if(err < 0)
+ goto err2;
+ err = android_init_suspend_lock(&alarm_rtc_suspend_lock);
+ if(err < 0)
+ goto err3;
+ rtc_alarm_interface.class = rtc_class;
+ err = class_interface_register(&rtc_alarm_interface);
+ if(err < 0)
+ goto err4;
+
+ return 0;
+
+err4:
+ android_uninit_suspend_lock(&alarm_rtc_suspend_lock);
+err3:
+ android_uninit_suspend_lock(&alarm_suspend_lock);
+err2:
+ platform_driver_unregister(&alarm_driver);
+err1:
+ return err;
+}
+
+static void __exit alarm_exit(void)
+{
+ class_interface_unregister(&rtc_alarm_interface);
+ android_uninit_suspend_lock(&alarm_rtc_suspend_lock);
+ android_uninit_suspend_lock(&alarm_suspend_lock);
+ platform_driver_unregister(&alarm_driver);
+}
+
+late_initcall(alarm_late_init);
+module_init(alarm_init);
+module_exit(alarm_exit);
+
diff -Naur linux-2.6.25/drivers/android/binder.c linux-2.6.25-
android-0.9_r1/drivers/android/binder.c
--- linux-2.6.25/drivers/android/binder.c 1970-01-01
01:00:00.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/drivers/android/binder.c 2008-07-24
02:04:04.000000000 +0100
@@ -0,0 +1,3439 @@
+/* drivers/android/binder.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General
Public
+ * License version 2, as published by the Free Software Foundation,
and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/binder.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nsproxy.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+static DEFINE_MUTEX(binder_lock);
+static HLIST_HEAD(binder_procs);
+static struct binder_node *binder_context_mgr_node;
+static uid_t binder_context_mgr_uid = -1;
+static int binder_last_id;
+static struct proc_dir_entry *binder_proc_dir_entry_root;
+static struct proc_dir_entry *binder_proc_dir_entry_proc;
+static struct hlist_head binder_dead_nodes;
+
+static int binder_read_proc_proc(
+ char *page, char **start, off_t off, int count, int *eof, void
*data);
+
+/* This is only defined in include/asm-arm/sizes.h */
+#ifndef SZ_1K
+#define SZ_1K 0x400
+#endif
+
+#ifndef SZ_4M
+#define SZ_4M 0x400000
+#endif
+
+#ifndef __i386__
+#define FORBIDDEN_MMAP_FLAGS (VM_WRITE | VM_EXEC)
+#else
+#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
+#endif
+
+#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
+
+enum {
+ BINDER_DEBUG_USER_ERROR = 1U << 0,
+ BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
+ BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
+ BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
+ BINDER_DEBUG_DEAD_BINDER = 1U << 4,
+ BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
+ BINDER_DEBUG_READ_WRITE = 1U << 6,
+ BINDER_DEBUG_USER_REFS = 1U << 7,
+ BINDER_DEBUG_THREADS = 1U << 8,
+ BINDER_DEBUG_TRANSACTION = 1U << 9,
+ BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
+ BINDER_DEBUG_FREE_BUFFER = 1U << 11,
+ BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
+ BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
+ BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
+ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
+};
+static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
+ BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
+module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR |
S_IRUGO)
+static int binder_debug_no_lock;
+module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR
| S_IRUGO)
+static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
+static int binder_stop_on_user_error;
+static int binder_set_stop_on_user_error(
+ const char *val, struct kernel_param *kp)
+{
+ int ret;
+ ret = param_set_int(val, kp);
+ if (binder_stop_on_user_error < 2)
+ wake_up(&binder_user_error_wait);
+ return ret;
+}
+module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
+ param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+
+#define binder_user_error(x...) \
+ do { \
+ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
+ printk(KERN_INFO x); \
+ if (binder_stop_on_user_error) \
+ binder_stop_on_user_error = 2; \
+ } while (0)
+
+enum {
+ BINDER_STAT_PROC,
+ BINDER_STAT_THREAD,
+ BINDER_STAT_NODE,
+ BINDER_STAT_REF,
+ BINDER_STAT_DEATH,
+ BINDER_STAT_TRANSACTION,
+ BINDER_STAT_TRANSACTION_COMPLETE,
+ BINDER_STAT_COUNT
+};
+
+struct binder_stats {
+ int br[_IOC_NR(BR_FAILED_REPLY) + 1];
+ int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+ int obj_created[BINDER_STAT_COUNT];
+ int obj_deleted[BINDER_STAT_COUNT];
+};
+
+static struct binder_stats binder_stats;
+
+struct binder_transaction_log_entry {
+ int debug_id;
+ int call_type;
+ int from_proc;
+ int from_thread;
+ int target_handle;
+ int to_proc;
+ int to_thread;
+ int to_node;
+ int data_size;
+ int offsets_size;
+};
+struct binder_transaction_log {
+ int next;
+ int full;
+ struct binder_transaction_log_entry entry[32];
+};
+struct binder_transaction_log binder_transaction_log;
+struct binder_transaction_log binder_transaction_log_failed;
+
+static struct binder_transaction_log_entry
*binder_transaction_log_add(
+ struct binder_transaction_log *log)
+{
+ struct binder_transaction_log_entry *e;
+ e = &log->entry[log->next];
+ memset(e, 0, sizeof(*e));
+ log->next++;
+ if (log->next == ARRAY_SIZE(log->entry)) {
+ log->next = 0;
+ log->full = 1;
+ }
+ return e;
+}
+
+struct binder_work {
+ struct list_head entry;
+ enum {
+ BINDER_WORK_TRANSACTION = 1,
+ BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_NODE,
+ BINDER_WORK_DEAD_BINDER,
+ BINDER_WORK_DEAD_BINDER_AND_CLEAR,
+ BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+ } type;
+};
+
+struct binder_node {
+ int debug_id;
+ struct binder_work work;
+ union {
+ struct rb_node rb_node;
+ struct hlist_node dead_node;
+ };
+ struct binder_proc *proc;
+ struct hlist_head refs;
+ int internal_strong_refs;
+ int local_weak_refs;
+ int local_strong_refs;
+ void __user *ptr;
+ void __user *cookie;
+ unsigned has_strong_ref : 1;
+ unsigned has_weak_ref : 1;
+ unsigned has_async_transaction : 1;
+ unsigned accept_fds : 1;
+ int min_priority : 8;
+ struct list_head async_todo;
+};
+
+struct binder_ref_death {
+ struct binder_work work;
+ void __user *cookie;
+};
+
+struct binder_ref {
+ /* Lookups needed: */
+ /* node + proc => ref (transaction) */
+ /* desc + proc => ref (transaction, inc/dec ref) */
+ /* node => refs + procs (proc exit) */
+ int debug_id;
+ struct rb_node rb_node_desc;
+ struct rb_node rb_node_node;
+ struct hlist_node node_entry;
+ struct binder_proc *proc;
+ struct binder_node *node;
+ uint32_t desc;
+ int strong;
+ int weak;
+ struct binder_ref_death *death;
+};
+
+struct binder_buffer {
+ struct list_head entry; /* free and allocated entries by addesss */
+ struct rb_node rb_node; /* free entry by size or allocated entry */
+ /* by address */
+ unsigned free : 1;
+ unsigned allow_user_free : 1;
+ unsigned async_transaction : 1;
+ unsigned debug_id : 29;
+
+ struct binder_transaction *transaction;
+
+ struct binder_node *target_node;
+ size_t data_size;
+ size_t offsets_size;
+ uint8_t data[0];
+};
+
+struct binder_proc {
+ struct hlist_node proc_node;
+ struct rb_root threads;
+ struct rb_root nodes;
+ struct rb_root refs_by_desc;
+ struct rb_root refs_by_node;
+ int pid;
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+ void *buffer;
+ size_t user_buffer_offset;
+
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+
+ struct page **pages;
+ size_t buffer_size;
+ uint32_t buffer_free;
+ struct list_head todo;
+ wait_queue_head_t wait;
+ struct binder_stats stats;
+ struct list_head delivered_death;
+ int max_threads;
+ int requested_threads;
+ int requested_threads_started;
+ int ready_threads;
+ long default_priority;
+};
+
+enum {
+ BINDER_LOOPER_STATE_REGISTERED = 0x01,
+ BINDER_LOOPER_STATE_ENTERED = 0x02,
+ BINDER_LOOPER_STATE_EXITED = 0x04,
+ BINDER_LOOPER_STATE_INVALID = 0x08,
+ BINDER_LOOPER_STATE_WAITING = 0x10,
+ BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+};
+
+struct binder_thread {
+ struct binder_proc *proc;
+ struct rb_node rb_node;
+ int pid;
+ int looper;
+ struct binder_transaction *transaction_stack;
+ struct list_head todo;
+ uint32_t return_error; /* Write failed, return error code in read
buf */
+ uint32_t return_error2; /* Write failed, return error code in read
*/
+ /* buffer. Used when sending a reply to a dead process that */
+ /* we are also waiting on */
+ wait_queue_head_t wait;
+ struct binder_stats stats;
+};
+
+struct binder_transaction {
+ int debug_id;
+ struct binder_work work;
+ struct binder_thread *from;
+ struct binder_transaction *from_parent;
+ struct binder_proc *to_proc;
+ struct binder_thread *to_thread;
+ struct binder_transaction *to_parent;
+ unsigned need_reply : 1;
+ /*unsigned is_dead : 1;*/ /* not used at the moment */
+
+ struct binder_buffer *buffer;
+ unsigned int code;
+ unsigned int flags;
+ long priority;
+ long saved_priority;
+ uid_t sender_euid;
+};
+
+/*
+ * copied from get_unused_fd_flags
+ */
+int task_get_unused_fd_flags(struct task_struct *tsk, int flags)
+{
+ struct files_struct *files = get_files_struct(tsk);
+ int fd, error;
+ struct fdtable *fdt;
+ unsigned long rlim_cur;
+
+ if (files == NULL)
+ return -ESRCH;
+
+ error = -EMFILE;
+ spin_lock(&files->file_lock);
+
+repeat:
+ fdt = files_fdtable(files);
+ fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
+ files->next_fd);
+
+ /*
+ * N.B. For clone tasks sharing a files structure, this test
+ * will limit the total number of files that can be opened.
+ */
+ rcu_read_lock();
+ if (tsk->signal)
+ rlim_cur = tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
+ else
+ rlim_cur = 0;
+ rcu_read_unlock();
+ if (fd >= rlim_cur)
+ goto out;
+
+ /* Do we need to expand the fd array or fd set? */
+ error = expand_files(files, fd);
+ if (error < 0)
+ goto out;
+
+ if (error) {
+ /*
+ * If we needed to expand the fs array we
+ * might have blocked - try again.
+ */
+ error = -EMFILE;
+ goto repeat;
+ }
+
+ FD_SET(fd, fdt->open_fds);
+ if (flags & O_CLOEXEC)
+ FD_SET(fd, fdt->close_on_exec);
+ else
+ FD_CLR(fd, fdt->close_on_exec);
+ files->next_fd = fd + 1;
+#if 1
+ /* Sanity check */
+ if (fdt->fd[fd] != NULL) {
+ printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
+ fdt->fd[fd] = NULL;
+ }
+#endif
+ error = fd;
+
+out:
+ spin_unlock(&files->file_lock);
+ put_files_struct(files);
+ return error;
+}
+
+/*
+ * copied from fd_install
+ */
+static void task_fd_install(
+ struct task_struct *tsk, unsigned int fd, struct file *file)
+{
+ struct files_struct *files = get_files_struct(tsk);
+ struct fdtable *fdt;
+
+ if (files == NULL)
+ return;
+
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ BUG_ON(fdt->fd[fd] != NULL);
+ rcu_assign_pointer(fdt->fd[fd], file);
+ spin_unlock(&files->file_lock);
+ put_files_struct(files);
+}
+
+/*
+ * copied from __put_unused_fd in open.c
+ */
+static void __put_unused_fd(struct files_struct *files, unsigned int
fd)
+{
+ struct fdtable *fdt = files_fdtable(files);
+ __FD_CLR(fd, fdt->open_fds);
+ if (fd < files->next_fd)
+ files->next_fd = fd;
+}
+
+/*
+ * copied from sys_close
+ */
+static long task_close_fd(struct task_struct *tsk, unsigned int fd)
+{
+ struct file *filp;
+ struct files_struct *files = get_files_struct(tsk);
+ struct fdtable *fdt;
+ int retval;
+
+ if (files == NULL)
+ return -ESRCH;
+
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ if (fd >= fdt->max_fds)
+ goto out_unlock;
+ filp = fdt->fd[fd];
+ if (!filp)
+ goto out_unlock;
+ rcu_assign_pointer(fdt->fd[fd], NULL);
+ FD_CLR(fd, fdt->close_on_exec);
+ __put_unused_fd(files, fd);
+ spin_unlock(&files->file_lock);
+ retval = filp_close(filp, files);
+
+ /* can't restart close syscall because file table entry was cleared
*/
+ if (unlikely(retval == -ERESTARTSYS ||
+ retval == -ERESTARTNOINTR ||
+ retval == -ERESTARTNOHAND ||
+ retval == -ERESTART_RESTARTBLOCK))
+ retval = -EINTR;
+
+ put_files_struct(files);
+ return retval;
+
+out_unlock:
+ spin_unlock(&files->file_lock);
+ put_files_struct(files);
+ return -EBADF;
+}
+
+static void binder_set_nice(long nice)
+{
+ long min_nice;
+ if (can_nice(current, nice)) {
+ set_user_nice(current, nice);
+ return;
+ }
+ min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
+ if (binder_debug_mask & BINDER_DEBUG_PRIORITY_CAP)
+ printk(KERN_INFO "binder: %d: nice value %ld not allowed use "
+ "%ld instead\n", current->pid, nice, min_nice);
+ set_user_nice(current, min_nice);
+ if (min_nice < 20)
+ return;
+ binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid);
+}
+
+static size_t binder_buffer_size(
+ struct binder_proc *proc, struct binder_buffer *buffer)
+{
+ if (list_is_last(&buffer->entry, &proc->buffers))
+ return proc->buffer + proc->buffer_size - (void *)buffer->data;
+ else
+ return (size_t)list_entry(buffer->entry.next,
+ struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(
+ struct binder_proc *proc, struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &proc->free_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ size_t new_buffer_size;
+
+ BUG_ON(!new_buffer->free);
+
+ new_buffer_size = binder_buffer_size(proc, new_buffer);
+
+ if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+ printk(KERN_INFO "binder: %d: add free buffer, size %d, "
+ "at %p\n", proc->pid, new_buffer_size, new_buffer);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ if (new_buffer_size < buffer_size)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer(
+ struct binder_proc *proc, struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &proc->allocated_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+
+ BUG_ON(new_buffer->free);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (new_buffer < buffer)
+ p = &parent->rb_left;
+ else if (new_buffer > buffer)
+ p = &parent->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_buffer_lookup(
+ struct binder_proc *proc, void __user *user_ptr)
+{
+ struct rb_node *n = proc->allocated_buffers.rb_node;
+ struct binder_buffer *buffer;
+ struct binder_buffer *kern_ptr;
+
+ kern_ptr = user_ptr - proc->user_buffer_offset
+ - offsetof(struct binder_buffer, data);
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (kern_ptr < buffer)
+ n = n->rb_left;
+ else if (kern_ptr > buffer)
+ n = n->rb_right;
+ else
+ return buffer;
+ }
+ return NULL;
+}
+
+static int binder_update_page_range(struct binder_proc *proc, int
allocate,
+ void *start, void *end, struct vm_area_struct *vma)
+{
+ void *page_addr;
+ unsigned long user_page_addr;
+ struct vm_struct tmp_area;
+ struct page **page;
+ struct mm_struct *mm;
+
+ if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+ printk(KERN_INFO "binder: %d: %s pages %p-%p\n",
+ proc->pid, allocate ? "allocate" : "free", start, end);
+
+ if (end <= start)
+ return 0;
+
+ if (vma)
+ mm = NULL;
+ else
+ mm = get_task_mm(proc->tsk);
+
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ vma = proc->vma;
+ }
+
+ if (allocate == 0)
+ goto free_range;
+
+ if (vma == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed to "
+ "map pages in userspace, no vma\n", proc->pid);
+ goto err_no_vma;
+ }
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ int ret;
+ struct page **page_array_ptr;
+ page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+
+ BUG_ON(*page);
+ *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (*page == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "for page at %p\n", proc->pid, page_addr);
+ goto err_alloc_page_failed;
+ }
+ tmp_area.addr = page_addr;
+ tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
+ page_array_ptr = page;
+ ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
+ if (ret) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "to map page at %p in kernel\n",
+ proc->pid, page_addr);
+ goto err_map_kernel_failed;
+ }
+ user_page_addr = (size_t)page_addr + proc->user_buffer_offset;
+ ret = vm_insert_page(vma, user_page_addr, page[0]);
+ if (ret) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "to map page at %lx in userspace\n",
+ proc->pid, user_page_addr);
+ goto err_vm_insert_page_failed;
+ }
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+
+free_range:
+ for (page_addr = end - PAGE_SIZE; page_addr >= start;
+ page_addr -= PAGE_SIZE) {
+ page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ if (vma)
+ zap_page_range(vma, (size_t)page_addr +
+ proc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+ __free_page(*page);
+ *page = NULL;
+err_alloc_page_failed:
+ ;
+ }
+err_no_vma:
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return -ENOMEM;
+}
+
+static struct binder_buffer *binder_alloc_buf(struct binder_proc
*proc,
+ size_t data_size, size_t offsets_size, int is_async)
+{
+ struct rb_node *n = proc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ struct rb_node *best_fit = NULL;
+ void *has_page_addr;
+ void *end_page_addr;
+ size_t size;
+
+ if (proc->vma == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
+ proc->pid);
+ return NULL;
+ }
+
+ size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+
+ if (size < data_size || size < offsets_size) {
+ binder_user_error("binder: %d: got transaction with invalid "
+ "size %d-%d\n", proc->pid, data_size, offsets_size);
+ return NULL;
+ }
+
+ if (is_async &&
+ proc->free_async_space < size + sizeof(struct binder_buffer)) {
+ if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+ printk(KERN_ERR "binder: %d: binder_alloc_buf size %d f"
+ "ailed, no async space left\n", proc->pid, size);
+ return NULL;
+ }
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ if (size < buffer_size) {
+ best_fit = n;
+ n = n->rb_left;
+ } else if (size > buffer_size)
+ n = n->rb_right;
+ else {
+ best_fit = n;
+ break;
+ }
+ }
+ if (best_fit == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf size %d failed, "
+ "no address space\n", proc->pid, size);
+ return NULL;
+ }
+ if (n == NULL) {
+ buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+ buffer_size = binder_buffer_size(proc, buffer);
+ }
+ if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+ printk(KERN_INFO "binder: %d: binder_alloc_buf size %d got buff"
+ "er %p size %d\n", proc->pid, size, buffer, buffer_size);
+
+ has_page_addr =
+ (void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK);
+ if (n == NULL) {
+ if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+ buffer_size = size; /* no room for other buffers */
+ else
+ buffer_size = size + sizeof(struct binder_buffer);
+ }
+ end_page_addr = (void *)PAGE_ALIGN((size_t)buffer->data +
buffer_size);
+ if (end_page_addr > has_page_addr)
+ end_page_addr = has_page_addr;
+ if (binder_update_page_range(proc, 1,
+ (void *)PAGE_ALIGN((size_t)buffer->data), end_page_addr, NULL))
+ return NULL;
+
+ rb_erase(best_fit, &proc->free_buffers);
+ buffer->free = 0;
+ binder_insert_allocated_buffer(proc, buffer);
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer = (void *)buffer->data + size;
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(proc, new_buffer);
+ }
+ if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+ printk(KERN_INFO "binder: %d: binder_alloc_buf size %d got "
+ "%p\n", proc->pid, size, buffer);
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ if (is_async) {
+ proc->free_async_space -= size + sizeof(struct binder_buffer);
+ if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC)
+ printk(KERN_INFO "binder: %d: binder_alloc_buf size %d "
+ "async free %d\n", proc->pid, size,
+ proc->free_async_space);
+ }
+
+ return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+ return (void *)((size_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+ return (void *)(((size_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(
+ struct binder_proc *proc, struct binder_buffer *buffer)
+{
+ struct binder_buffer *prev, *next = NULL;
+ int free_page_end = 1;
+ int free_page_start = 1;
+
+ BUG_ON(proc->buffers.next == &buffer->entry);
+ prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ BUG_ON(!prev->free);
+ if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+ free_page_start = 0;
+ if (buffer_end_page(prev) == buffer_end_page(buffer))
+ free_page_end = 0;
+ if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+ printk(KERN_INFO "binder: %d: merge free, buffer %p "
+ "share page with %p\n", proc->pid, buffer, prev);
+ }
+
+ if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (buffer_start_page(next) == buffer_end_page(buffer)) {
+ free_page_end = 0;
+ if (buffer_start_page(next) ==
+ buffer_start_page(buffer))
+ free_page_start = 0;
+ if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+ printk(KERN_INFO "binder: %d: merge free, "
+ "buffer %p share page with %p\n",
+ proc->pid, buffer, prev);
+ }
+ }
+ list_del(&buffer->entry);
+ if (free_page_start || free_page_end) {
+ if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+ printk(KERN_INFO "binder: %d: merge free, buffer %p do "
+ "not share page%s%s with with %p or %p\n",
+ proc->pid, buffer, free_page_start ? "" : " end",
+ free_page_end ? "" : " start", prev, next);
+ binder_update_page_range(proc, 0, free_page_start ?
+ buffer_start_page(buffer) : buffer_end_page(buffer),
+ (free_page_end ? buffer_end_page(buffer) :
+ buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ }
+}
+
+static void binder_free_buf(
+ struct binder_proc *proc, struct binder_buffer *buffer)
+{
+ size_t size, buffer_size;
+
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ size = ALIGN(buffer->data_size, sizeof(void *)) +
+ ALIGN(buffer->offsets_size, sizeof(void *));
+ if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+ printk(KERN_INFO "binder: %d: binder_free_buf %p size %d buffer"
+ "_size %d\n", proc->pid, buffer, size, buffer_size);
+
+ BUG_ON(buffer->free);
+ BUG_ON(size > buffer_size);
+ BUG_ON(buffer->transaction != NULL);
+ BUG_ON((void *)buffer < proc->buffer);
+ BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+
+ if (buffer->async_transaction) {
+ proc->free_async_space += size + sizeof(struct binder_buffer);
+ if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC)
+ printk(KERN_INFO "binder: %d: binder_free_buf size %d "
+ "async free %d\n", proc->pid, size,
+ proc->free_async_space);
+ }
+
+ binder_update_page_range(proc, 0,
+ (void *)PAGE_ALIGN((size_t)buffer->data),
+ (void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK),
+ NULL);
+ rb_erase(&buffer->rb_node, &proc->allocated_buffers);
+ buffer->free = 1;
+ if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ struct binder_buffer *next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (next->free) {
+ rb_erase(&next->rb_node, &proc->free_buffers);
+ binder_delete_free_buffer(proc, next);
+ }
+ }
+ if (proc->buffers.next != &buffer->entry) {
+ struct binder_buffer *prev = list_entry(buffer->entry.prev,
+ struct binder_buffer, entry);
+ if (prev->free) {
+ binder_delete_free_buffer(proc, buffer);
+ rb_erase(&prev->rb_node, &proc->free_buffers);
+ buffer = prev;
+ }
+ }
+ binder_insert_free_buffer(proc, buffer);
+}
+
+static struct binder_node *
+binder_get_node(struct binder_proc *proc, void __user *ptr)
+{
+ struct rb_node *n = proc->nodes.rb_node;
+ struct binder_node *node;
+
+ while (n) {
+ node = rb_entry(n, struct binder_node, rb_node);
+
+ if (ptr < node->ptr)
+ n = n->rb_left;
+ else if (ptr > node->ptr)
+ n = n->rb_right;
+ else
+ return node;
+ }
+ return NULL;
+}
+
+static struct binder_node *
+binder_new_node(struct binder_proc *proc, void __user *ptr)
+{
+ struct rb_node **p = &proc->nodes.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_node *node;
+
+ while (*p) {
+ parent = *p;
+ node = rb_entry(parent, struct binder_node, rb_node);
+
+ if (ptr < node->ptr)
+ p = &(*p)->rb_left;
+ else if (ptr > node->ptr)
+ p = &(*p)->rb_right;
+ else
+ return NULL;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL)
+ return NULL;
+ binder_stats.obj_created[BINDER_STAT_NODE]++;
+ rb_link_node(&node->rb_node, parent, p);
+ rb_insert_color(&node->rb_node, &proc->nodes);
+ node->debug_id = ++binder_last_id;
+ node->proc = proc;
+ node->ptr = ptr;
+ node->work.type = BINDER_WORK_NODE;
+ INIT_LIST_HEAD(&node->work.entry);
+ INIT_LIST_HEAD(&node->async_todo);
+ return node;
+}
+
+static int
+binder_inc_node(struct binder_node *node, int strong, int internal,
+ struct list_head *target_list)
+{
+ if (strong) {
+ if (internal) {
+ if (target_list == NULL &&
+ node->internal_strong_refs == 0 &&
+ !(node == binder_context_mgr_node &&
+ node->has_strong_ref)) {
+ printk(KERN_ERR "binder: invalid inc strong "
+ "node for %d\n", node->debug_id);
+ return -EINVAL;
+ }
+ node->internal_strong_refs++;
+ } else
+ node->local_strong_refs++;
+ if (!node->has_strong_ref && list_empty(&node->work.entry))
+ list_add_tail(&node->work.entry, target_list);
+ } else {
+ if (!internal)
+ node->local_weak_refs++;
+ if (!node->has_weak_ref && list_empty(&node->work.entry)) {
+ if (target_list == NULL) {
+ printk(KERN_ERR "binder: invalid inc weak node "
+ "for %d\n", node->debug_id);
+ return -EINVAL;
+ }
+ list_add_tail(&node->work.entry, target_list);
+ }
+ }
+ return 0;
+}
+
+static int
+binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+ if (strong) {
+ if (internal)
+ node->internal_strong_refs--;
+ else
+ node->local_strong_refs--;
+ if (node->local_strong_refs || node->internal_strong_refs)
+ return 0;
+ } else {
+ if (!internal)
+ node->local_weak_refs--;
+ if (node->local_weak_refs || !hlist_empty(&node->refs))
+ return 0;
+ }
+ if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+ if (list_empty(&node->work.entry)) {
+ list_add_tail(&node->work.entry, &node->proc->todo);
+ wake_up_interruptible(&node->proc->wait);
+ }
+ } else {
+ list_del_init(&node->work.entry);
+ if (hlist_empty(&node->refs) && !node->local_strong_refs &&
+ !node->local_weak_refs) {
+ if (node->proc) {
+ rb_erase(&node->rb_node, &node->proc->nodes);
+ if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+ printk(KERN_INFO "binder: refless node %d deleted\n", node-
>debug_id);
+ } else {
+ hlist_del(&node->dead_node);
+ if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+ printk(KERN_INFO "binder: dead node %d deleted\n", node-
>debug_id);
+ }
+ kfree(node);
+ binder_stats.obj_deleted[BINDER_STAT_NODE]++;
+ }
+ }
+
+ return 0;
+}
+
+
+static struct binder_ref *
+binder_get_ref(struct binder_proc *proc, uint32_t desc)
+{
+ struct rb_node *n = proc->refs_by_desc.rb_node;
+ struct binder_ref *ref;
+
+ while (n) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+
+ if (desc < ref->desc)
+ n = n->rb_left;
+ else if (desc > ref->desc)
+ n = n->rb_right;
+ else
+ return ref;
+ }
+ return NULL;
+}
+
+static struct binder_ref *
+binder_get_ref_for_node(struct binder_proc *proc, struct binder_node
*node)
+{
+ struct rb_node *n;
+ struct rb_node **p = &proc->refs_by_node.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_ref *ref, *new_ref;
+
+ while (*p) {
+ parent = *p;
+ ref = rb_entry(parent, struct binder_ref, rb_node_node);
+
+ if (node < ref->node)
+ p = &(*p)->rb_left;
+ else if (node > ref->node)
+ p = &(*p)->rb_right;
+ else
+ return ref;
+ }
+ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (new_ref == NULL)
+ return NULL;
+ binder_stats.obj_created[BINDER_STAT_REF]++;
+ new_ref->debug_id = ++binder_last_id;
+ new_ref->proc = proc;
+ new_ref->node = node;
+ rb_link_node(&new_ref->rb_node_node, parent, p);
+ rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
+
+ new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+ for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+ if (ref->desc > new_ref->desc)
+ break;
+ new_ref->desc = ref->desc + 1;
+ }
+
+ p = &proc->refs_by_desc.rb_node;
+ while (*p) {
+ parent = *p;
+ ref = rb_entry(parent, struct binder_ref, rb_node_desc);
+
+ if (new_ref->desc < ref->desc)
+ p = &(*p)->rb_left;
+ else if (new_ref->desc > ref->desc)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_ref->rb_node_desc, parent, p);
+ rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
+ if (node) {
+ hlist_add_head(&new_ref->node_entry, &node->refs);
+ if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+ printk(KERN_INFO "binder: %d new ref %d desc %d for "
+ "node %d\n", proc->pid, new_ref->debug_id,
+ new_ref->desc, node->debug_id);
+ } else {
+ if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+ printk(KERN_INFO "binder: %d new ref %d desc %d for "
+ "dead node\n", proc->pid, new_ref->debug_id,
+ new_ref->desc);
+ }
+ return new_ref;
+}
+
+static void
+binder_delete_ref(struct binder_ref *ref)
+{
+ if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+ printk(KERN_INFO "binder: %d delete ref %d desc %d for "
+ "node %d\n", ref->proc->pid, ref->debug_id,
+ ref->desc, ref->node->debug_id);
+ rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
+ rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
+ if (ref->strong)
+ binder_dec_node(ref->node, 1, 1);
+ hlist_del(&ref->node_entry);
+ binder_dec_node(ref->node, 0, 1);
+ if (ref->death) {
+ if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
+ printk(KERN_INFO "binder: %d delete ref %d desc %d "
+ "has death notification\n", ref->proc->pid,
+ ref->debug_id, ref->desc);
+ list_del(&ref->death->work.entry);
+ kfree(ref->death);
+ binder_stats.obj_deleted[BINDER_STAT_DEATH]++;
+ }
+ kfree(ref);
+ binder_stats.obj_deleted[BINDER_STAT_REF]++;
+}
+
+static int
+binder_inc_ref(
+ struct binder_ref *ref, int strong, struct list_head *target_list)
+{
+ int ret;
+ if (strong) {
+ if (ref->strong == 0) {
+ ret = binder_inc_node(ref->node, 1, 1, target_list);
+ if (ret)
+ return ret;
+ }
+ ref->strong++;
+ } else {
+ if (ref->weak == 0) {
+ ret = binder_inc_node(ref->node, 0, 1, target_list);
+ if (ret)
+ return ret;
+ }
+ ref->weak++;
+ }
+ return 0;
+}
+
+
+static int
+binder_dec_ref(struct binder_ref *ref, int strong)
+{
+ if (strong) {
+ if (ref->strong == 0) {
+ binder_user_error("binder: %d invalid dec strong, "
+ "ref %d desc %d s %d w %d\n",
+ ref->proc->pid, ref->debug_id,
+ ref->desc, ref->strong, ref->weak);
+ return -EINVAL;
+ }
+ ref->strong--;
+ if (ref->strong == 0) {
+ int ret;
+ ret = binder_dec_node(ref->node, strong, 1);
+ if (ret)
+ return ret;
+ }
+ } else {
+ if (ref->weak == 0) {
+ binder_user_error("binder: %d invalid dec weak, "
+ "ref %d desc %d s %d w %d\n",
+ ref->proc->pid, ref->debug_id,
+ ref->desc, ref->strong, ref->weak);
+ return -EINVAL;
+ }
+ ref->weak--;
+ }
+ if (ref->strong == 0 && ref->weak == 0)
+ binder_delete_ref(ref);
+ return 0;
+}
+
+static void
+binder_pop_transaction(
+ struct binder_thread *target_thread, struct binder_transaction *t)
+{
+ if (target_thread) {
+ BUG_ON(target_thread->transaction_stack != t);
+ BUG_ON(target_thread->transaction_stack->from != target_thread);
+ target_thread->transaction_stack =
+ target_thread->transaction_stack->from_parent;
+ t->from = NULL;
+ }
+ t->need_reply = 0;
+ if (t->buffer)
+ t->buffer->transaction = NULL;
+ kfree(t);
+ binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
+}
+
+static void
+binder_send_failed_reply(struct binder_transaction *t, uint32_t
error_code)
+{
+ struct binder_thread *target_thread;
+ BUG_ON(t->flags & TF_ONE_WAY);
+ while (1) {
+ target_thread = t->from;
+ if (target_thread) {
+ if (target_thread->return_error != BR_OK &&
+ target_thread->return_error2 == BR_OK) {
+ target_thread->return_error2 =
+ target_thread->return_error;
+ target_thread->return_error = BR_OK;
+ }
+ if (target_thread->return_error == BR_OK) {
+ if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION)
+ printk(KERN_INFO "binder: send failed reply for transaction %d
to %d:%d\n",
+ t->debug_id, target_thread->proc->pid, target_thread-
>pid);
+
+ binder_pop_transaction(target_thread, t);
+ target_thread->return_error = error_code;
+ wake_up_interruptible(&target_thread->wait);
+ } else {
+ printk(KERN_ERR "binder: reply failed, target "
+ "thread, %d:%d, has error code %d "
+ "already\n", target_thread->proc->pid,
+ target_thread->pid,
+ target_thread->return_error);
+ }
+ return;
+ } else {
+ struct binder_transaction *next = t->from_parent;
+
+ if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION)
+ printk(KERN_INFO "binder: send failed reply "
+ "for transaction %d, target dead\n",
+ t->debug_id);
+
+ binder_pop_transaction(target_thread, t);
+ if (next == NULL) {
+ if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
+ printk(KERN_INFO "binder: reply failed,"
+ " no target thread at root\n");
+ return;
+ }
+ t = next;
+ if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
+ printk(KERN_INFO "binder: reply failed, no targ"
+ "et thread -- retry %d\n", t->debug_id);
+ }
+ }
+}
+
+static void
+binder_transaction_buffer_release(struct binder_proc *proc,
+ struct binder_buffer *buffer, size_t *failed_at);
+
+static void
+binder_transaction(struct binder_proc *proc, struct binder_thread
*thread,
+ struct binder_transaction_data *tr, int reply)
+{
+ struct binder_transaction *t;
+ struct binder_work *tcomplete;
+ size_t *offp, *off_end;
+ struct binder_proc *target_proc;
+ struct binder_thread *target_thread = NULL;
+ struct binder_node *target_node = NULL;
+ struct list_head *target_list;
+ wait_queue_head_t *target_wait;
+ struct binder_transaction *in_reply_to = NULL;
+ struct binder_transaction_log_entry *e;
+ uint32_t return_error;
+
+ e = binder_transaction_log_add(&binder_transaction_log);
+ e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
+ e->from_proc = proc->pid;
+ e->from_thread = thread->pid;
+ e->target_handle = tr->target.handle;
+ e->data_size = tr->data_size;
+ e->offsets_size = tr->offsets_size;
+
+ if (reply) {
+ in_reply_to = thread->transaction_stack;
+ if (in_reply_to == NULL) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with no transaction stack\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_empty_call_stack;
+ }
+ binder_set_nice(in_reply_to->saved_priority);
+ if (in_reply_to->to_thread != thread) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with bad transaction stack,"
+ " transaction %d has target %d:%d\n",
+ proc->pid, thread->pid, in_reply_to->debug_id,
+ in_reply_to->to_proc ?
+ in_reply_to->to_proc->pid : 0,
+ in_reply_to->to_thread ?
+ in_reply_to->to_thread->pid : 0);
+ return_error = BR_FAILED_REPLY;
+ in_reply_to = NULL;
+ goto err_bad_call_stack;
+ }
+ thread->transaction_stack = in_reply_to->to_parent;
+ target_thread = in_reply_to->from;
+ if (target_thread == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_dead_binder;
+ }
+ if (target_thread->transaction_stack != in_reply_to) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with bad target transaction stack %d, "
+ "expected %d\n",
+ proc->pid, thread->pid,
+ target_thread->transaction_stack ?
+ target_thread->transaction_stack->debug_id : 0,
+ in_reply_to->debug_id);
+ return_error = BR_FAILED_REPLY;
+ in_reply_to = NULL;
+ target_thread = NULL;
+ goto err_dead_binder;
+ }
+ target_proc = target_thread->proc;
+ } else {
+ if (tr->target.handle) {
+ struct binder_ref *ref;
+ ref = binder_get_ref(proc, tr->target.handle);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d got "
+ "transaction to invalid handle\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_invalid_target_handle;
+ }
+ target_node = ref->node;
+ } else {
+ target_node = binder_context_mgr_node;
+ if (target_node == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_no_context_mgr_node;
+ }
+ }
+ e->to_node = target_node->debug_id;
+ target_proc = target_node->proc;
+ if (target_proc == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_dead_binder;
+ }
+ if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
+ struct binder_transaction *tmp;
+ tmp = thread->transaction_stack;
+ while (tmp) {
+ if (tmp->from && tmp->from->proc == target_proc)
+ target_thread = tmp->from;
+ tmp = tmp->from_parent;
+ }
+ }
+ }
+ if (target_thread) {
+ e->to_thread = target_thread->pid;
+ target_list = &target_thread->todo;
+ target_wait = &target_thread->wait;
+ } else {
+ target_list = &target_proc->todo;
+ target_wait = &target_proc->wait;
+ }
+ e->to_proc = target_proc->pid;
+
+ /* TODO: reuse incoming transaction for reply */
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (t == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_alloc_t_failed;
+ }
+ binder_stats.obj_created[BINDER_STAT_TRANSACTION]++;
+
+ tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+ if (tcomplete == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_alloc_tcomplete_failed;
+ }
+ binder_stats.obj_created[BINDER_STAT_TRANSACTION_COMPLETE]++;
+
+ t->debug_id = ++binder_last_id;
+ e->debug_id = t->debug_id;
+
+ if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) {
+ if (reply)
+ printk(KERN_INFO "binder: %d:%d BC_REPLY %d -> %d:%d, "
+ "data %p-%p size %d-%d\n",
+ proc->pid, thread->pid, t->debug_id,
+ target_proc->pid, target_thread->pid,
+ tr->data.ptr.buffer, tr->data.ptr.offsets,
+ tr->data_size, tr->offsets_size);
+ else
+ printk(KERN_INFO "binder: %d:%d BC_TRANSACTION %d -> "
+ "%d - node %d, data %p-%p size %d-%d\n",
+ proc->pid, thread->pid, t->debug_id,
+ target_proc->pid, target_node->debug_id,
+ tr->data.ptr.buffer, tr->data.ptr.offsets,
+ tr->data_size, tr->offsets_size);
+ }
+
+ if (!reply && !(tr->flags & TF_ONE_WAY))
+ t->from = thread;
+ else
+ t->from = NULL;
+ t->sender_euid = proc->tsk->euid;
+ t->to_proc = target_proc;
+ t->to_thread = target_thread;
+ t->code = tr->code;
+ t->flags = tr->flags;
+ t->priority = task_nice(current);
+ t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+ tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+ if (t->buffer == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_alloc_buf_failed;
+ }
+ t->buffer->allow_user_free = 0;
+ t->buffer->debug_id = t->debug_id;
+ t->buffer->transaction = t;
+ t->buffer->target_node = target_node;
+ if (target_node)
+ binder_inc_node(target_node, 1, 0, NULL);
+
+ offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void
*)));
+
+ if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr-
>data_size)) {
+ binder_user_error("binder: %d:%d got transaction with invalid "
+ "data ptr\n", proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_copy_data_failed;
+ }
+ if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
+ binder_user_error("binder: %d:%d got transaction with invalid "
+ "offsets ptr\n", proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_copy_data_failed;
+ }
+ off_end = (void *)offp + tr->offsets_size;
+ for (; offp < off_end; offp++) {
+ struct flat_binder_object *fp;
+ if (*offp > t->buffer->data_size - sizeof(*fp)) {
+ binder_user_error("binder: %d:%d got transaction with "
+ "invalid offset, %d\n",
+ proc->pid, thread->pid, *offp);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_offset;
+ }
+ fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+ switch (fp->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER: {
+ struct binder_ref *ref;
+ struct binder_node *node = binder_get_node(proc, fp->binder);
+ if (node == NULL) {
+ node = binder_new_node(proc, fp->binder);
+ if (node == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_new_node_failed;
+ }
+ node->cookie = fp->cookie;
+ node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ }
+ ref = binder_get_ref_for_node(target_proc, node);
+ if (ref == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_for_node_failed;
+ }
+ if (fp->type == BINDER_TYPE_BINDER)
+ fp->type = BINDER_TYPE_HANDLE;
+ else
+ fp->type = BINDER_TYPE_WEAK_HANDLE;
+ fp->handle = ref->desc;
+ binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread-
>todo);
+ if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+ printk(KERN_INFO " node %d u%p -> ref %d desc %d\n",
+ node->debug_id, node->ptr, ref->debug_id, ref->desc);
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE: {
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d got "
+ "transaction with invalid "
+ "handle, %ld\n", proc->pid,
+ thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_failed;
+ }
+ if (ref->node->proc == target_proc) {
+ if (fp->type == BINDER_TYPE_HANDLE)
+ fp->type = BINDER_TYPE_BINDER;
+ else
+ fp->type = BINDER_TYPE_WEAK_BINDER;
+ fp->binder = ref->node->ptr;
+ fp->cookie = ref->node->cookie;
+ binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0,
NULL);
+ if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+ printk(KERN_INFO " ref %d desc %d -> node %d u%p\n",
+ ref->debug_id, ref->desc, ref->node->debug_id, ref->node-
>ptr);
+ } else {
+ struct binder_ref *new_ref;
+ new_ref = binder_get_ref_for_node(target_proc, ref->node);
+ if (new_ref == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_for_node_failed;
+ }
+ fp->handle = new_ref->desc;
+ binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
+ if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+ printk(KERN_INFO " ref %d desc %d -> ref %d desc %d (node
%d)\n",
+ ref->debug_id, ref->desc, new_ref->debug_id, new_ref-
>desc, ref->node->debug_id);
+ }
+ } break;
+
+ case BINDER_TYPE_FD: {
+ int target_fd;
+ struct file *file;
+
+ if (reply) {
+ if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
+ binder_user_error("binder: %d:%d got reply with fd, %ld, but
target does not allow fds\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fd_not_allowed;
+ }
+ } else if (!target_node->accept_fds) {
+ binder_user_error("binder: %d:%d got transaction with fd, %ld,
but target does not allow fds\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fd_not_allowed;
+ }
+
+ file = fget(fp->handle);
+ if (file == NULL) {
+ binder_user_error("binder: %d:%d got transaction with invalid fd,
%ld\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fget_failed;
+ }
+ target_fd = task_get_unused_fd_flags(target_proc->tsk, O_CLOEXEC);
+ if (target_fd < 0) {
+ fput(file);
+ return_error = BR_FAILED_REPLY;
+ goto err_get_unused_fd_failed;
+ }
+ task_fd_install(target_proc->tsk, target_fd, file);
+ if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+ printk(KERN_INFO " fd %ld -> %d\n", fp->handle,
target_fd);
+ /* TODO: fput? */
+ fp->handle = target_fd;
+ } break;
+
+ default:
+ binder_user_error("binder: %d:%d got transactio"
+ "n with invalid object type, %lx\n",
+ proc->pid, thread->pid, fp->type);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_object_type;
+ }
+ }
+ if (reply) {
+ BUG_ON(t->buffer->async_transaction != 0);
+ binder_pop_transaction(target_thread, in_reply_to);
+ } else if (!(t->flags & TF_ONE_WAY)) {
+ BUG_ON(t->buffer->async_transaction != 0);
+ t->need_reply = 1;
+ t->from_parent = thread->transaction_stack;
+ thread->transaction_stack = t;
+ } else {
+ BUG_ON(target_node == NULL);
+ BUG_ON(t->buffer->async_transaction != 1);
+ if (target_node->has_async_transaction) {
+ target_list = &target_node->async_todo;
+ target_wait = NULL;
+ } else
+ target_node->has_async_transaction = 1;
+ }
+ t->work.type = BINDER_WORK_TRANSACTION;
+ list_add_tail(&t->work.entry, target_list);
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ list_add_tail(&tcomplete->entry, &thread->todo);
+ if (target_wait)
+ wake_up_interruptible(target_wait);
+ return;
+
+err_get_unused_fd_failed:
+err_fget_failed:
+err_fd_not_allowed:
+err_binder_get_ref_for_node_failed:
+err_binder_get_ref_failed:
+err_binder_new_node_failed:
+err_bad_object_type:
+err_bad_offset:
+err_copy_data_failed:
+ binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ t->buffer->transaction = NULL;
+ binder_free_buf(target_proc, t->buffer);
+err_binder_alloc_buf_failed:
+ kfree(tcomplete);
+ binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
+err_alloc_tcomplete_failed:
+ kfree(t);
+ binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
+err_alloc_t_failed:
+err_bad_call_stack:
+err_empty_call_stack:
+err_dead_binder:
+err_invalid_target_handle:
+err_no_context_mgr_node:
+ if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION)
+ printk(KERN_INFO "binder: %d:%d transaction failed %d, size %d-%d
\n",
+ proc->pid, thread->pid, return_error,
+ tr->data_size, tr->offsets_size);
+
+ {
+ struct binder_transaction_log_entry *fe;
+ fe = binder_transaction_log_add(&binder_transaction_log_failed);
+ *fe = *e;
+ }
+
+ BUG_ON(thread->return_error != BR_OK);
+ if (in_reply_to) {
+ thread->return_error = BR_TRANSACTION_COMPLETE;
+ binder_send_failed_reply(in_reply_to, return_error);
+ } else
+ thread->return_error = return_error;
+}
+
+static void
+binder_transaction_buffer_release(struct binder_proc *proc, struct
binder_buffer *buffer, size_t *failed_at)
+{
+ size_t *offp, *off_end;
+ int debug_id = buffer->debug_id;
+
+ if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+ printk(KERN_INFO "binder: %d buffer release %d, size %d-%d, failed
at %p\n",
+ proc->pid, buffer->debug_id,
+ buffer->data_size, buffer->offsets_size, failed_at);
+
+ if (buffer->target_node)
+ binder_dec_node(buffer->target_node, 1, 0);
+
+ offp = (size_t *)(buffer->data + ALIGN(buffer->data_size,
sizeof(void *)));
+ if (failed_at)
+ off_end = failed_at;
+ else
+ off_end = (void *)offp + buffer->offsets_size;
+ for (; offp < off_end; offp++) {
+ struct flat_binder_object *fp;
+ if (*offp > buffer->data_size - sizeof(*fp)) {
+ printk(KERN_ERR "binder: transaction release %d bad offset %d,
size %d\n", debug_id, *offp, buffer->data_size);
+ continue;
+ }
+ fp = (struct flat_binder_object *)(buffer->data + *offp);
+ switch (fp->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER: {
+ struct binder_node *node = binder_get_node(proc, fp->binder);
+ if (node == NULL) {
+ printk(KERN_ERR "binder: transaction release %d bad node %p\n",
debug_id, fp->binder);
+ break;
+ }
+ if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+ printk(KERN_INFO " node %d u%p\n",
+ node->debug_id, node->ptr);
+ binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE: {
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ if (ref == NULL) {
+ printk(KERN_ERR "binder: transaction release %d bad handle %ld
\n", debug_id, fp->handle);
+ break;
+ }
+ if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+ printk(KERN_INFO " ref %d desc %d (node %d)\n",
+ ref->debug_id, ref->desc, ref->node->debug_id);
+ binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
+ } break;
+
+ case BINDER_TYPE_FD:
+ if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+ printk(KERN_INFO " fd %ld\n", fp->handle);
+ if (failed_at)
+ task_close_fd(proc->tsk, fp->handle);
+ break;
+
+ default:
+ printk(KERN_ERR "binder: transaction release %d bad object type %lx
\n", debug_id, fp->type);
+ break;
+ }
+ }
+}
+
+int
+binder_thread_write(struct binder_proc *proc, struct binder_thread
*thread,
+ void __user *buffer, int size, signed long *consumed)
+{
+ uint32_t cmd;
+ void __user *ptr = buffer + *consumed;
+ void __user *end = buffer + size;
+
+ while (ptr < end && thread->return_error == BR_OK) {
+ if (get_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
+ binder_stats.bc[_IOC_NR(cmd)]++;
+ proc->stats.bc[_IOC_NR(cmd)]++;
+ thread->stats.bc[_IOC_NR(cmd)]++;
+ }
+ switch (cmd) {
+ case BC_INCREFS:
+ case BC_ACQUIRE:
+ case BC_RELEASE:
+ case BC_DECREFS: {
+ uint32_t target;
+ struct binder_ref *ref;
+ const char *debug_string;
+
+ if (get_user(target, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (target == 0 && binder_context_mgr_node &&
+ (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
+ ref = binder_get_ref_for_node(proc,
+ binder_context_mgr_node);
+ if (ref->desc != target) {
+ binder_user_error("binder: %d:"
+ "%d tried to acquire "
+ "reference to desc 0, "
+ "got %d instead\n",
+ proc->pid, thread->pid,
+ ref->desc);
+ }
+ } else
+ ref = binder_get_ref(proc, target);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d refcou"
+ "nt change on invalid ref %d\n",
+ proc->pid, thread->pid, target);
+ break;
+ }
+ switch (cmd) {
+ case BC_INCREFS:
+ debug_string = "IncRefs";
+ binder_inc_ref(ref, 0, NULL);
+ break;
+ case BC_ACQUIRE:
+ debug_string = "Acquire";
+ binder_inc_ref(ref, 1, NULL);
+ break;
+ case BC_RELEASE:
+ debug_string = "Release";
+ binder_dec_ref(ref, 1);
+ break;
+ case BC_DECREFS:
+ default:
+ debug_string = "DecRefs";
+ binder_dec_ref(ref, 0);
+ break;
+ }
+ if (binder_debug_mask & BINDER_DEBUG_USER_REFS)
+ printk(KERN_INFO "binder: %d:%d %s ref %d desc %d s %d w %d for
node %d\n",
+ proc->pid, thread->pid, debug_string, ref->debug_id, ref-
>desc, ref->strong, ref->weak, ref->node->debug_id);
+ break;
+ }
+ case BC_INCREFS_DONE:
+ case BC_ACQUIRE_DONE: {
+ void __user *node_ptr;
+ void *cookie;
+ struct binder_node *node;
+
+ if (get_user(node_ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ if (get_user(cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ node = binder_get_node(proc, node_ptr);
+ if (node == NULL) {
+ binder_user_error("binder: %d:%d "
+ "%s u%p no match\n",
+ proc->pid, thread->pid,
+ cmd == BC_INCREFS_DONE ?
+ "BC_INCREFS_DONE" :
+ "BC_ACQUIRE_DONE",
+ node_ptr);
+ break;
+ }
+ binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+ if (binder_debug_mask & BINDER_DEBUG_USER_REFS)
+ printk(KERN_INFO "binder: %d:%d %s node %d ls %d lw %d\n",
+ proc->pid, thread->pid, cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node->debug_id, node-
>local_strong_refs, node->local_weak_refs);
+ break;
+ }
+ case BC_ATTEMPT_ACQUIRE:
+ printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n");
+ return -EINVAL;
+ case BC_ACQUIRE_RESULT:
+ printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n");
+ return -EINVAL;
+
+ case BC_FREE_BUFFER: {
+ void __user *data_ptr;
+ struct binder_buffer *buffer;
+
+ if (get_user(data_ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+
+ buffer = binder_buffer_lookup(proc, data_ptr);
+ if (buffer == NULL) {
+ binder_user_error("binder: %d:%d "
+ "BC_FREE_BUFFER u%p no match\n",
+ proc->pid, thread->pid, data_ptr);
+ break;
+ }
+ if (!buffer->allow_user_free) {
+ binder_user_error("binder: %d:%d "
+ "BC_FREE_BUFFER u%p matched "
+ "unreturned buffer\n",
+ proc->pid, thread->pid, data_ptr);
+ break;
+ }
+ if (binder_debug_mask & BINDER_DEBUG_FREE_BUFFER)
+ printk(KERN_INFO "binder: %d:%d BC_FREE_BUFFER u%p found buffer
%d for %s transaction\n",
+ proc->pid, thread->pid, data_ptr, buffer->debug_id,
+ buffer->transaction ? "active" : "finished");
+
+ if (buffer->transaction) {
+ buffer->transaction->buffer = NULL;
+ buffer->transaction = NULL;
+ }
+ if (buffer->async_transaction && buffer->target_node) {
+ BUG_ON(!buffer->target_node->has_async_transaction);
+ if (list_empty(&buffer->target_node->async_todo))
+ buffer->target_node->has_async_transaction = 0;
+ else
+ list_move_tail(buffer->target_node->async_todo.next, &thread-
>todo);
+ }
+ binder_transaction_buffer_release(proc, buffer, NULL);
+ binder_free_buf(proc, buffer);
+ break;
+ }
+
+ case BC_TRANSACTION:
+ case BC_REPLY: {
+ struct binder_transaction_data tr;
+
+ if (copy_from_user(&tr, ptr, sizeof(tr)))
+ return -EFAULT;
+ ptr += sizeof(tr);
+ binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+ break;
+ }
+
+ case BC_REGISTER_LOOPER:
+ if (binder_debug_mask & BINDER_DEBUG_THREADS)
+ printk(KERN_INFO "binder: %d:%d BC_REGISTER_LOOPER\n",
+ proc->pid, thread->pid);
+ if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_REGISTER_LOOPER called "
+ "after BC_ENTER_LOOPER\n",
+ proc->pid, thread->pid);
+ } else if (proc->requested_threads == 0) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_REGISTER_LOOPER called "
+ "without request\n",
+ proc->pid, thread->pid);
+ } else {
+ proc->requested_threads--;
+ proc->requested_threads_started++;
+ }
+ thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+ break;
+ case BC_ENTER_LOOPER:
+ if (binder_debug_mask & BINDER_DEBUG_THREADS)
+ printk(KERN_INFO "binder: %d:%d BC_ENTER_LOOPER\n",
+ proc->pid, thread->pid);
+ if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_ENTER_LOOPER called after "
+ "BC_REGISTER_LOOPER\n",
+ proc->pid, thread->pid);
+ }
+ thread->looper |= BINDER_LOOPER_STATE_ENTERED;
+ break;
+ case BC_EXIT_LOOPER:
+ if (binder_debug_mask & BINDER_DEBUG_THREADS)
+ printk(KERN_INFO "binder: %d:%d BC_EXIT_LOOPER\n",
+ proc->pid, thread->pid);
+ thread->looper |= BINDER_LOOPER_STATE_EXITED;
+ break;
+
+ case BC_REQUEST_DEATH_NOTIFICATION:
+ case BC_CLEAR_DEATH_NOTIFICATION: {
+ uint32_t target;
+ void __user *cookie;
+ struct binder_ref *ref;
+ struct binder_ref_death *death;
+
+ if (get_user(target, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (get_user(cookie, (void __user * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ ref = binder_get_ref(proc, target);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d %s "
+ "invalid ref %d\n",
+ proc->pid, thread->pid,
+ cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+ "BC_REQUEST_DEATH_NOTIFICATION" :
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ target);
+ break;
+ }
+
+ if (binder_debug_mask & BINDER_DEBUG_DEATH_NOTIFICATION)
+ printk(KERN_INFO "binder: %d:%d %s %p ref %d desc %d s %d w %d
for node %d\n",
+ proc->pid, thread->pid,
+ cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+ "BC_REQUEST_DEATH_NOTIFICATION" :
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ cookie, ref->debug_id, ref->desc,
+ ref->strong, ref->weak, ref->node->debug_id);
+
+ if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+ if (ref->death) {
+ binder_user_error("binder: %d:%"
+ "d BC_REQUEST_DEATH_NOTI"
+ "FICATION death notific"
+ "ation already set\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ death = kzalloc(sizeof(*death), GFP_KERNEL);
+ if (death == NULL) {
+ thread->return_error = BR_ERROR;
+ if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION)
+ printk(KERN_INFO "binder: %d:%d "
+ "BC_REQUEST_DEATH_NOTIFICATION failed\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ binder_stats.obj_created[BINDER_STAT_DEATH]++;
+ INIT_LIST_HEAD(&death->work.entry);
+ death->cookie = cookie;
+ ref->death = death;
+ if (ref->node->proc == NULL) {
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) {
+ list_add_tail(&ref->death->work.entry, &thread->todo);
+ } else {
+ list_add_tail(&ref->death->work.entry, &proc->todo);
+ wake_up_interruptible(&ref->proc->wait);
+ }
+ }
+ } else {
+ if (ref->death == NULL) {
+ binder_user_error("binder: %d:%"
+ "d BC_CLEAR_DEATH_NOTIFI"
+ "CATION death notificat"
+ "ion not active\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ death = ref->death;
+ if (death->cookie != cookie) {
+ binder_user_error("binder: %d:%"
+ "d BC_CLEAR_DEATH_NOTIFI"
+ "CATION death notificat"
+ "ion cookie mismatch "
+ "%p != %p\n",
+ proc->pid, thread->pid,
+ death->cookie, cookie);
+ break;
+ }
+ ref->death = NULL;
+ if (list_empty(&death->work.entry)) {
+ death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+ list_add_tail(&death->work.entry, &thread->todo);
+ } else {
+ BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
+ death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
+ }
+ }
+ } break;
+ case BC_DEAD_BINDER_DONE: {
+ struct binder_work *w;
+ void __user *cookie;
+ struct binder_ref_death *death = NULL;
+ if (get_user(cookie, (void __user * __user *)ptr))
+ return -EFAULT;
+
+ ptr += sizeof(void *);
+ list_for_each_entry(w, &proc->delivered_death, entry) {
+ struct binder_ref_death *tmp_death = container_of(w, struct
binder_ref_death, work);
+ if (tmp_death->cookie == cookie) {
+ death = tmp_death;
+ break;
+ }
+ }
+ if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
+ printk(KERN_INFO "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p
\n",
+ proc->pid, thread->pid, cookie, death);
+ if (death == NULL) {
+ binder_user_error("binder: %d:%d BC_DEAD"
+ "_BINDER_DONE %p not found\n",
+ proc->pid, thread->pid, cookie);
+ break;
+ }
+
+ list_del_init(&death->work.entry);
+ if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
+ death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+ list_add_tail(&death->work.entry, &thread->todo);
+ }
+ } break;
+
+ default:
+ printk(KERN_ERR "binder: %d:%d unknown command %d\n", proc->pid,
thread->pid, cmd);
+ return -EINVAL;
+ }
+ *consumed = ptr - buffer;
+ }
+ return 0;
+}
+
+void
+binder_stat_br(struct binder_proc *proc, struct binder_thread
*thread, uint32_t cmd)
+{
+ if (_IOC_NR(cmd) < ARRAY_SIZE(
binder_stats.br)) {
+
binder_stats.br[_IOC_NR(cmd)]++;
+ proc->
stats.br[_IOC_NR(cmd)]++;
+ thread->
stats.br[_IOC_NR(cmd)]++;
+ }
+}
+
+static int
+binder_has_proc_work(struct binder_proc *proc, struct binder_thread
*thread)
+{
+ return !list_empty(&proc->todo) || (thread->looper &
BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int
+binder_has_thread_work(struct binder_thread *thread)
+{
+ return !list_empty(&thread->todo) || thread->return_error != BR_OK
||
+ (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int
+binder_thread_read(struct binder_proc *proc, struct binder_thread
*thread,
+ void __user *buffer, int size, signed long *consumed, int
non_block)
+{
+ void __user *ptr = buffer + *consumed;
+ void __user *end = buffer + size;
+
+ int ret = 0;
+ int wait_for_proc_work;
+
+ if (*consumed == 0) {
+ if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ }
+
+retry:
+ wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);
+
+ if (thread->return_error != BR_OK && ptr < end) {
+ if (thread->return_error2 != BR_OK) {
+ if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (ptr == end)
+ goto done;
+ thread->return_error2 = BR_OK;
+ }
+ if (put_user(thread->return_error, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ thread->return_error = BR_OK;
+ goto done;
+ }
+
+
+ thread->looper |= BINDER_LOOPER_STATE_WAITING;
+ if (wait_for_proc_work)
+ proc->ready_threads++;
+ mutex_unlock(&binder_lock);
+ if (wait_for_proc_work) {
+ if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))) {
+ binder_user_error("binder: %d:%d ERROR: Thread waiting "
+ "for process work before calling BC_REGISTER_"
+ "LOOPER or BC_ENTER_LOOPER (state %x)\n",
+ proc->pid, thread->pid, thread->looper);
+ wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
+ }
+ binder_set_nice(proc->default_priority);
+ if (non_block) {
+ if (!binder_has_proc_work(proc, thread))
+ ret = -EAGAIN;
+ } else
+ ret = wait_event_interruptible_exclusive(proc->wait,
binder_has_proc_work(proc, thread));
+ } else {
+ if (non_block) {
+ if (!binder_has_thread_work(thread))
+ ret = -EAGAIN;
+ } else
+ ret = wait_event_interruptible(thread->wait,
binder_has_thread_work(thread));
+ }
+ mutex_lock(&binder_lock);
+ if (wait_for_proc_work)
+ proc->ready_threads--;
+ thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
+
+ if (ret)
+ return ret;
+
+ while (1) {
+ uint32_t cmd;
+ struct binder_transaction_data tr;
+ struct binder_work *w;
+ struct binder_transaction *t = NULL;
+
+ if (!list_empty(&thread->todo))
+ w = list_first_entry(&thread->todo, struct binder_work, entry);
+ else if (!list_empty(&proc->todo) && wait_for_proc_work)
+ w = list_first_entry(&proc->todo, struct binder_work, entry);
+ else {
+ if (ptr - buffer == 4 && !(thread->looper &
BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
+ goto retry;
+ break;
+ }
+
+ if (end - ptr < sizeof(tr) + 4)
+ break;
+
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION: {
+ t = container_of(w, struct binder_transaction, work);
+ } break;
+ case BINDER_WORK_TRANSACTION_COMPLETE: {
+ cmd = BR_TRANSACTION_COMPLETE;
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+
+ binder_stat_br(proc, thread, cmd);
+ if (binder_debug_mask & BINDER_DEBUG_TRANSACTION_COMPLETE)
+ printk(KERN_INFO "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
+ proc->pid, thread->pid);
+
+ list_del(&w->entry);
+ kfree(w);
+ binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
+ } break;
+ case BINDER_WORK_NODE: {
+ struct binder_node *node = container_of(w, struct binder_node,
work);
+ uint32_t cmd = BR_NOOP;
+ const char *cmd_name;
+ int strong = node->internal_strong_refs || node-
>local_strong_refs;
+ int weak = !hlist_empty(&node->refs) || node->local_weak_refs ||
strong;
+ if (weak && !node->has_weak_ref) {
+ cmd = BR_INCREFS;
+ cmd_name = "BR_INCREFS";
+ node->has_weak_ref = 1;
+ node->local_weak_refs++;
+ } else if (strong && !node->has_strong_ref) {
+ cmd = BR_ACQUIRE;
+ cmd_name = "BR_ACQUIRE";
+ node->has_strong_ref = 1;
+ node->local_strong_refs++;
+ } else if (!strong && node->has_strong_ref) {
+ cmd = BR_RELEASE;
+ cmd_name = "BR_RELEASE";
+ node->has_strong_ref = 0;
+ } else if (!weak && node->has_weak_ref) {
+ cmd = BR_DECREFS;
+ cmd_name = "BR_DECREFS";
+ node->has_weak_ref = 0;
+ }
+ if (cmd != BR_NOOP) {
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(node->ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ if (put_user(node->cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+
+ binder_stat_br(proc, thread, cmd);
+ if (binder_debug_mask & BINDER_DEBUG_USER_REFS)
+ printk(KERN_INFO "binder: %d:%d %s %d u%p c%p\n",
+ proc->pid, thread->pid, cmd_name, node->debug_id, node-
>ptr, node->cookie);
+ } else {
+ list_del_init(&w->entry);
+ if (!weak && !strong) {
+ if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+ printk(KERN_INFO "binder: %d:%d node %d u%p c%p deleted\n",
+ proc->pid, thread->pid, node->debug_id, node->ptr, node-
>cookie);
+ rb_erase(&node->rb_node, &proc->nodes);
+ kfree(node);
+ binder_stats.obj_deleted[BINDER_STAT_NODE]++;
+ } else {
+ if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+ printk(KERN_INFO "binder: %d:%d node %d u%p c%p state unchanged
\n",
+ proc->pid, thread->pid, node->debug_id, node->ptr, node-
>cookie);
+ }
+ }
+ } break;
+ case BINDER_WORK_DEAD_BINDER:
+ case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+ case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
+ struct binder_ref_death *death = container_of(w, struct
binder_ref_death, work);
+ uint32_t cmd;
+ if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
+ cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
+ else
+ cmd = BR_DEAD_BINDER;
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(death->cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ if (binder_debug_mask & BINDER_DEBUG_DEATH_NOTIFICATION)
+ printk(KERN_INFO "binder: %d:%d %s %p\n",
+ proc->pid, thread->pid,
+ cmd == BR_DEAD_BINDER ?
+ "BR_DEAD_BINDER" :
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ death->cookie);
+
+ if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
+ list_del(&w->entry);
+ kfree(death);
+ binder_stats.obj_deleted[BINDER_STAT_DEATH]++;
+ } else
+ list_move(&w->entry, &proc->delivered_death);
+ if (cmd == BR_DEAD_BINDER)
+ goto done; /* DEAD_BINDER notifications can cause transactions */
+ } break;
+ }
+
+ if (!t)
+ continue;
+
+ BUG_ON(t->buffer == NULL);
+ if (t->buffer->target_node) {
+ struct binder_node *target_node = t->buffer->target_node;
+ tr.target.ptr = target_node->ptr;
+ tr.cookie = target_node->cookie;
+ t->saved_priority = task_nice(current);
+ if (t->priority < target_node->min_priority &&
+ !(t->flags & TF_ONE_WAY))
+ binder_set_nice(t->priority);
+ else if (!(t->flags & TF_ONE_WAY) ||
+ t->saved_priority > target_node->min_priority)
+ binder_set_nice(target_node->min_priority);
+ cmd = BR_TRANSACTION;
+ } else {
+ tr.target.ptr = NULL;
+ tr.cookie = NULL;
+ cmd = BR_REPLY;
+ }
+ tr.code = t->code;
+ tr.flags = t->flags;
+ tr.sender_euid = t->sender_euid;
+
+ if (t->from) {
+ struct task_struct *sender = t->from->proc->tsk;
+ tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns);
+ } else {
+ tr.sender_pid = 0;
+ }
+
+ tr.data_size = t->buffer->data_size;
+ tr.offsets_size = t->buffer->offsets_size;
+ tr.data.ptr.buffer = (void *)((void *)t->buffer->data + proc-
>user_buffer_offset);
+ tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer-
>data_size, sizeof(void *));
+
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (copy_to_user(ptr, &tr, sizeof(tr)))
+ return -EFAULT;
+ ptr += sizeof(tr);
+
+ binder_stat_br(proc, thread, cmd);
+ if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+ printk(KERN_INFO "binder: %d:%d %s %d %d:%d, cmd %d size %d-%d ptr
%p-%p\n",
+ proc->pid, thread->pid,
+ (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY",
+ t->debug_id, t->from ? t->from->proc->pid : 0,
+ t->from ? t->from->pid : 0, cmd,
+ t->buffer->data_size, t->buffer->offsets_size,
+ tr.data.ptr.buffer, tr.data.ptr.offsets);
+
+ list_del(&t->work.entry);
+ t->buffer->allow_user_free = 1;
+ if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ t->to_parent = thread->transaction_stack;
+ t->to_thread = thread;
+ thread->transaction_stack = t;
+ } else {
+ t->buffer->transaction = NULL;
+ kfree(t);
+ binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
+ }
+ break;
+ }
+
+done:
+
+ *consumed = ptr - buffer;
+ if (proc->requested_threads + proc->ready_threads == 0 &&
+ proc->requested_threads_started < proc->max_threads &&
+ (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to
*/
+ /*spawn a new thread if we leave this out */) {
+ proc->requested_threads++;
+ if (binder_debug_mask & BINDER_DEBUG_THREADS)
+ printk(KERN_INFO "binder: %d:%d BR_SPAWN_LOOPER\n",
+ proc->pid, thread->pid);
+ if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void binder_release_work(struct list_head *list)
+{
+ struct binder_work *w;
+ while (!list_empty(list)) {
+ w = list_first_entry(list, struct binder_work, entry);
+ list_del_init(&w->entry);
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION: {
+ struct binder_transaction *t = container_of(w, struct
binder_transaction, work);
+ if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
+ binder_send_failed_reply(t, BR_DEAD_REPLY);
+ } break;
+ case BINDER_WORK_TRANSACTION_COMPLETE: {
+ kfree(w);
+ binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
+ } break;
+ default:
+ break;
+ }
+ }
+
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc
*proc)
+{
+ struct binder_thread *thread = NULL;
+ struct rb_node *parent = NULL;
+ struct rb_node **p = &proc->threads.rb_node;
+
+ while (*p) {
+ parent = *p;
+ thread = rb_entry(parent, struct binder_thread, rb_node);
+
+ if (current->pid < thread->pid)
+ p = &(*p)->rb_left;
+ else if (current->pid > thread->pid)
+ p = &(*p)->rb_right;
+ else
+ break;
+ }
+ if (*p == NULL) {
+ thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ if (thread == NULL)
+ return NULL;
+ binder_stats.obj_created[BINDER_STAT_THREAD]++;
+ thread->proc = proc;
+ thread->pid = current->pid;
+ init_waitqueue_head(&thread->wait);
+ INIT_LIST_HEAD(&thread->todo);
+ rb_link_node(&thread->rb_node, parent, p);
+ rb_insert_color(&thread->rb_node, &proc->threads);
+ thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ thread->return_error = BR_OK;
+ thread->return_error2 = BR_OK;
+ }
+ return thread;
+}
+
+static int binder_free_thread(struct binder_proc *proc, struct
binder_thread *thread)
+{
+ struct binder_transaction *t;
+ struct binder_transaction *send_reply = NULL;
+ int active_transactions = 0;
+
+ rb_erase(&thread->rb_node, &proc->threads);
+ t = thread->transaction_stack;
+ if (t && t->to_thread == thread)
+ send_reply = t;
+ while (t) {
+ active_transactions++;
+ if (binder_debug_mask & BINDER_DEBUG_DEAD_TRANSACTION)
+ printk(KERN_INFO "binder: release %d:%d transaction %d %s, still
active\n",
+ proc->pid, thread->pid, t->debug_id, (t->to_thread ==
thread) ? "in" : "out");
+ if (t->to_thread == thread) {
+ t->to_proc = NULL;
+ t->to_thread = NULL;
+ if (t->buffer) {
+ t->buffer->transaction = NULL;
+ t->buffer = NULL;
+ }
+ t = t->to_parent;
+ } else if (t->from == thread) {
+ t->from = NULL;
+ t = t->from_parent;
+ } else
+ BUG();
+ }
+ if (send_reply)
+ binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
+ binder_release_work(&thread->todo);
+ kfree(thread);
+ binder_stats.obj_deleted[BINDER_STAT_THREAD]++;
+ return active_transactions;
+}
+
+static unsigned int binder_poll(struct file *filp, struct
poll_table_struct *wait)
+{
+ struct binder_proc *proc = filp->private_data;
+ struct binder_thread *thread = NULL;
+ int wait_for_proc_work;
+
+ mutex_lock(&binder_lock);
+ thread = binder_get_thread(proc);
+
+ wait_for_proc_work = thread->transaction_stack == NULL &&
+ list_empty(&thread->todo) && thread->return_error == BR_OK;
+ mutex_unlock(&binder_lock);
+
+ if (wait_for_proc_work) {
+ if (binder_has_proc_work(proc, thread))
+ return POLLIN;
+ poll_wait(filp, &proc->wait, wait);
+ if (binder_has_proc_work(proc, thread))
+ return POLLIN;
+ } else {
+ if (binder_has_thread_work(thread))
+ return POLLIN;
+ poll_wait(filp, &thread->wait, wait);
+ if (binder_has_thread_work(thread))
+ return POLLIN;
+ }
+ return 0;
+}
+
+static long binder_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
+{
+ int ret;
+ struct binder_proc *proc = filp->private_data;
+ struct binder_thread *thread;
+ unsigned int size = _IOC_SIZE(cmd);
+ void __user *ubuf = (void __user *)arg;
+
+ /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid,
current->pid, cmd, arg);*/
+
+ ret = wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
+ if (ret)
+ return ret;
+
+ mutex_lock(&binder_lock);
+ thread = binder_get_thread(proc);
+ if (thread == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ switch (cmd) {
+ case BINDER_WRITE_READ: {
+ struct binder_write_read bwr;
+ if (size != sizeof(struct binder_write_read)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ if (binder_debug_mask & BINDER_DEBUG_READ_WRITE)
+ printk(KERN_INFO "binder: %d:%d write %ld at %08lx, read %ld at
%08lx\n",
+ proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
bwr.read_size, bwr.read_buffer);
+ if (bwr.write_size > 0) {
+ ret = binder_thread_write(proc, thread, (void __user
*)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
+ if (ret < 0) {
+ bwr.read_consumed = 0;
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+ if (bwr.read_size > 0) {
+ ret = binder_thread_read(proc, thread, (void __user
*)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags &
O_NONBLOCK);
+ if (!list_empty(&proc->todo))
+ wake_up_interruptible(&proc->wait);
+ if (ret < 0) {
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+ if (binder_debug_mask & BINDER_DEBUG_READ_WRITE)
+ printk(KERN_INFO "binder: %d:%d wrote %ld of %ld, read return %ld
of %ld\n",
+ proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
bwr.read_consumed, bwr.read_size);
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ }
+ case BINDER_SET_MAX_THREADS:
+ if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc-
>max_threads))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case BINDER_SET_CONTEXT_MGR:
+ if (binder_context_mgr_node != NULL) {
+ printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
+ ret = -EBUSY;
+ goto err;
+ }
+ if (binder_context_mgr_uid != -1) {
+ if (binder_context_mgr_uid != current->euid) {
+ printk(KERN_ERR "binder: BINDER_SET_"
+ "CONTEXT_MGR bad uid %d != %d\n",
+ current->euid,
+ binder_context_mgr_uid);
+ ret = -EPERM;
+ goto err;
+ }
+ } else
+ binder_context_mgr_uid = current->euid;
+ binder_context_mgr_node = binder_new_node(proc, NULL);
+ if (binder_context_mgr_node == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ binder_context_mgr_node->local_weak_refs++;
+ binder_context_mgr_node->local_strong_refs++;
+ binder_context_mgr_node->has_strong_ref = 1;
+ binder_context_mgr_node->has_weak_ref = 1;
+ break;
+ case BINDER_THREAD_EXIT:
+ if (binder_debug_mask & BINDER_DEBUG_THREADS)
+ printk(KERN_INFO "binder: %d:%d exit\n",
+ proc->pid, thread->pid);
+ binder_free_thread(proc, thread);
+ thread = NULL;
+ break;
+ case BINDER_VERSION:
+ if (size != sizeof(struct binder_version)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct
binder_version *)ubuf)->protocol_version)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = 0;
+err:
+ if (thread)
+ thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
+ mutex_unlock(&binder_lock);
+ wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
+ if (ret && ret != -ERESTARTSYS)
+ printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc-
>pid, current->pid, cmd, arg, ret);
+ return ret;
+}
+
+static void binder_vma_open(struct vm_area_struct *vma)
+{
+ struct binder_proc *proc = vma->vm_private_data;
+ if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
+ printk(KERN_INFO "binder: %d open vm area %lx-%lx (%ld K) vma %lx
pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end -
vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot);
+ dump_stack();
+}
+static void binder_vma_close(struct vm_area_struct *vma)
+{
+ struct binder_proc *proc = vma->vm_private_data;
+ if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
+ printk(KERN_INFO "binder: %d close vm area %lx-%lx (%ld K) vma %lx
pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end -
vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot);
+ proc->vma = NULL;
+}
+
+static struct vm_operations_struct binder_vm_ops = {
+ .open = binder_vma_open,
+ .close = binder_vma_close,
+};
+
+static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+ struct vm_struct *area;
+ struct binder_proc *proc = filp->private_data;
+ const char *failure_string;
+ struct binder_buffer *buffer;
+
+ if ((vma->vm_end - vma->vm_start) > SZ_4M)
+ vma->vm_end = vma->vm_start + SZ_4M;
+
+ if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
+ printk(KERN_INFO "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx
\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma-
>vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot);
+
+ if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
+ ret = -EPERM;
+ failure_string = "bad vm_flags";
+ goto err_bad_arg;
+ }
+ vma->vm_flags |= VM_DONTCOPY;
+
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ if (area == NULL) {
+ ret = -ENOMEM;
+ failure_string = "get_vm_area";
+ goto err_get_vm_area_failed;
+ }
+ proc->buffer = area->addr;
+ proc->user_buffer_offset = vma->vm_start - (size_t)proc->buffer;
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+ if (cache_is_vipt_aliasing()) {
+ while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
+ printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment
\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
+ vma->vm_start += PAGE_SIZE;
+ }
+ }
+#endif
+ proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma-
>vm_start) / PAGE_SIZE), GFP_KERNEL);
+ if (proc->pages == NULL) {
+ ret = -ENOMEM;
+ failure_string = "alloc page array";
+ goto err_alloc_pages_failed;
+ }
+ proc->buffer_size = vma->vm_end - vma->vm_start;
+
+ vma->vm_ops = &binder_vm_ops;
+ vma->vm_private_data = proc;
+
+ if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer +
PAGE_SIZE, vma)) {
+ ret = -ENOMEM;
+ failure_string = "alloc small buf";
+ goto err_alloc_small_buf_failed;
+ }
+ buffer = proc->buffer;
+ INIT_LIST_HEAD(&proc->buffers);
+ list_add(&buffer->entry, &proc->buffers);
+ buffer->free = 1;
+ binder_insert_free_buffer(proc, buffer);
+ proc->free_async_space = proc->buffer_size / 2;
+ barrier();
+ proc->vma = vma;
+
+ /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid,
vma->vm_start, vma->vm_end, proc->buffer);*/
+ return 0;
+
+err_alloc_small_buf_failed:
+ kfree(proc->pages);
+err_alloc_pages_failed:
+ vfree(proc->buffer);
+err_get_vm_area_failed:
+ mutex_unlock(&binder_lock);
+err_bad_arg:
+ printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid,
vma->vm_start, vma->vm_end, failure_string, ret);
+ return ret;
+}
+
+static int binder_open(struct inode *nodp, struct file *filp)
+{
+ struct binder_proc *proc;
+
+ if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
+ printk(KERN_INFO "binder_open: %d:%d\n", current->group_leader-
>pid, current->pid);
+
+ proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+ if (proc == NULL)
+ return -ENOMEM;
+ get_task_struct(current);
+ proc->tsk = current;
+ INIT_LIST_HEAD(&proc->todo);
+ init_waitqueue_head(&proc->wait);
+ proc->default_priority = task_nice(current);
+ mutex_lock(&binder_lock);
+ binder_stats.obj_created[BINDER_STAT_PROC]++;
+ hlist_add_head(&proc->proc_node, &binder_procs);
+ proc->pid = current->group_leader->pid;
+ INIT_LIST_HEAD(&proc->delivered_death);
+ filp->private_data = proc;
+ mutex_unlock(&binder_lock);
+
+ if (binder_proc_dir_entry_proc) {
+ char strbuf[11];
+ snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+ create_proc_read_entry(strbuf, S_IRUGO, binder_proc_dir_entry_proc,
binder_read_proc_proc, proc);
+ }
+
+ return 0;
+}
+
+static int binder_flush(struct file *filp, fl_owner_t id)
+{
+ struct rb_node *n;
+ struct binder_proc *proc = filp->private_data;
+ int wake_count = 0;
+
+ mutex_lock(&binder_lock);
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+ struct binder_thread *thread = rb_entry(n, struct binder_thread,
rb_node);
+ thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
+ wake_up_interruptible(&thread->wait);
+ wake_count++;
+ }
+ }
+ wake_up_interruptible_all(&proc->wait);
+ mutex_unlock(&binder_lock);
+
+ if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
+ printk(KERN_INFO "binder_flush: %d woke %d threads\n", proc->pid,
wake_count);
+
+ return 0;
+}
+
+static int binder_release(struct inode *nodp, struct file *filp)
+{
+ struct hlist_node *pos;
+ struct binder_transaction *t;
+ struct rb_node *n;
+ struct binder_proc *proc = filp->private_data;
+ int threads, nodes, incoming_refs, outgoing_refs, buffers,
active_transactions, page_count;
+
+ if (binder_proc_dir_entry_proc) {
+ char strbuf[11];
+ snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+ remove_proc_entry(strbuf, binder_proc_dir_entry_proc);
+ }
+ mutex_lock(&binder_lock);
+ hlist_del(&proc->proc_node);
+ if (binder_context_mgr_node && binder_context_mgr_node->proc ==
proc) {
+ if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
+ printk(KERN_INFO "binder_release: %d context_mgr_node gone\n",
proc->pid);
+ binder_context_mgr_node = NULL;
+ }
+
+ threads = 0;
+ active_transactions = 0;
+ while ((n = rb_first(&proc->threads))) {
+ struct binder_thread *thread = rb_entry(n, struct binder_thread,
rb_node);
+ threads++;
+ active_transactions += binder_free_thread(proc, thread);
+ }
+ nodes = 0;
+ incoming_refs = 0;
+ while ((n = rb_first(&proc->nodes))) {
+ struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
+
+ nodes++;
+ rb_erase(&node->rb_node, &proc->nodes);
+ list_del_init(&node->work.entry);
+ if (hlist_empty(&node->refs)) {
+ kfree(node);
+ binder_stats.obj_deleted[BINDER_STAT_NODE]++;
+ } else {
+ struct binder_ref *ref;
+ int death = 0;
+
+ node->proc = NULL;
+ node->local_strong_refs = 0;
+ node->local_weak_refs = 0;
+ hlist_add_head(&node->dead_node, &binder_dead_nodes);
+
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
+ incoming_refs++;
+ if (ref->death) {
+ death++;
+ if (list_empty(&ref->death->work.entry)) {
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ list_add_tail(&ref->death->work.entry, &ref->proc->todo);
+ wake_up_interruptible(&ref->proc->wait);
+ } else
+ BUG();
+ }
+ }
+ if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
+ printk(KERN_INFO "binder: node %d now dead, refs %d, death %d\n",
node->debug_id, incoming_refs, death);
+ }
+ }
+ outgoing_refs = 0;
+ while ((n = rb_first(&proc->refs_by_desc))) {
+ struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
+ outgoing_refs++;
+ binder_delete_ref(ref);
+ }
+ binder_release_work(&proc->todo);
+ buffers = 0;
+
+ while ((n = rb_first(&proc->allocated_buffers))) {
+ struct binder_buffer *buffer = rb_entry(n, struct binder_buffer,
rb_node);
+ t = buffer->transaction;
+ if (t) {
+ t->buffer = NULL;
+ buffer->transaction = NULL;
+ printk(KERN_ERR "binder: release proc %d, transaction %d, not freed
\n", proc->pid, t->debug_id);
+ /*BUG();*/
+ }
+ binder_free_buf(proc, buffer);
+ buffers++;
+ }
+
+ binder_stats.obj_deleted[BINDER_STAT_PROC]++;
+ mutex_unlock(&binder_lock);
+
+ page_count = 0;
+ if (proc->pages) {
+ int i;
+ for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
+ if (proc->pages[i]) {
+ if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+ printk(KERN_INFO "binder_release: %d: page %d at %p not freed
\n", proc->pid, i, proc->buffer + i * PAGE_SIZE);
+ __free_page(proc->pages[i]);
+ page_count++;
+ }
+ }
+ kfree(proc->pages);
+ vfree(proc->buffer);
+ }
+
+ put_task_struct(proc->tsk);
+
+ if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
+ printk(KERN_INFO "binder_release: %d threads %d, nodes %d (ref %d),
refs %d, active transactions %d, buffers %d, pages %d\n",
+ proc->pid, threads, nodes, incoming_refs, outgoing_refs,
active_transactions, buffers, page_count);
+
+ kfree(proc);
+ return 0;
+}
+
+static char *print_binder_transaction(char *buf, char *end, const
char *prefix, struct binder_transaction *t)
+{
+ buf += snprintf(buf, end - buf, "%s %d: %p from %d:%d to %d:%d code
%x flags %x pri %ld r%d",
+ prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0,
+ t->from ? t->from->pid : 0,
+ t->to_proc ? t->to_proc->pid : 0,
+ t->to_thread ? t->to_thread->pid : 0,
+ t->code, t->flags, t->priority, t->need_reply);
+ if (buf >= end)
+ return buf;
+ if (t->buffer == NULL) {
+ buf += snprintf(buf, end - buf, " buffer free\n");
+ return buf;
+ }
+ if (t->buffer->target_node) {
+ buf += snprintf(buf, end - buf, " node %d",
+ t->buffer->target_node->debug_id);
+ if (buf >= end)
+ return buf;
+ }
+ buf += snprintf(buf, end - buf, " size %d:%d data %p\n",
+ t->buffer->data_size, t->buffer->offsets_size,
+ t->buffer->data);
+ return buf;
+}
+
+static char *print_binder_buffer(char *buf, char *end, const char
*prefix, struct binder_buffer *buffer)
+{
+ buf += snprintf(buf, end - buf, "%s %d: %p size %d:%d %s\n",
+ prefix, buffer->debug_id, buffer->data,
+ buffer->data_size, buffer->offsets_size,
+ buffer->transaction ? "active" : "delivered");
+ return buf;
+}
+
+static char *print_binder_work(char *buf, char *end, const char
*prefix,
+ const char *transaction_prefix, struct binder_work *w)
+{
+ struct binder_node *node;
+ struct binder_transaction *t;
+
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION:
+ t = container_of(w, struct binder_transaction, work);
+ buf = print_binder_transaction(buf, end, transaction_prefix, t);
+ break;
+ case BINDER_WORK_TRANSACTION_COMPLETE:
+ buf += snprintf(buf, end - buf,
+ "%stransaction complete\n", prefix);
+ break;
+ case BINDER_WORK_NODE:
+ node = container_of(w, struct binder_node, work);
+ buf += snprintf(buf, end - buf, "%snode work %d: u%p c%p\n",
+ prefix, node->debug_id, node->ptr, node->cookie);
+ break;
+ case BINDER_WORK_DEAD_BINDER:
+ buf += snprintf(buf, end - buf, "%shas dead binder\n", prefix);
+ break;
+ case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+ buf += snprintf(buf, end - buf,
+ "%shas cleared dead binder\n", prefix);
+ break;
+ case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
+ buf += snprintf(buf, end - buf,
+ "%shas cleared death notification\n", prefix);
+ break;
+ default:
+ buf += snprintf(buf, end - buf, "%sunknown work: type %d\n",
+ prefix, w->type);
+ break;
+ }
+ return buf;
+}
+
+static char *print_binder_thread(char *buf, char *end, struct
binder_thread *thread, int print_always)
+{
+ struct binder_transaction *t;
+ struct binder_work *w;
+ char *start_buf = buf;
+ char *header_buf;
+
+ buf += snprintf(buf, end - buf, " thread %d: l %02x\n", thread-
>pid, thread->looper);
+ header_buf = buf;
+ t = thread->transaction_stack;
+ while (t) {
+ if (buf >= end)
+ break;
+ if (t->from == thread) {
+ buf = print_binder_transaction(buf, end, " outgoing
transaction", t);
+ t = t->from_parent;
+ } else if (t->to_thread == thread) {
+ buf = print_binder_transaction(buf, end, " incoming
transaction", t);
+ t = t->to_parent;
+ } else {
+ buf = print_binder_transaction(buf, end, " bad transaction",
t);
+ t = NULL;
+ }
+ }
+ list_for_each_entry(w, &thread->todo, entry) {
+ if (buf >= end)
+ break;
+ buf = print_binder_work(buf, end, " ",
+ " pending transaction", w);
+ }
+ if (!print_always && buf == header_buf)
+ buf = start_buf;
+ return buf;
+}
+
+static char *print_binder_node(char *buf, char *end, struct
binder_node *node)
+{
+ struct binder_ref *ref;
+ struct hlist_node *pos;
+ struct binder_work *w;
+ int count;
+ count = 0;
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+ count++;
+
+ buf += snprintf(buf, end - buf, " node %d: u%p c%p hs %d hw %d ls
%d lw %d is %d iw %d",
+ node->debug_id, node->ptr, node->cookie,
+ node->has_strong_ref, node->has_weak_ref,
+ node->local_strong_refs, node->local_weak_refs,
+ node->internal_strong_refs, count);
+ if (buf >= end)
+ return buf;
+ if (count) {
+ buf += snprintf(buf, end - buf, " proc");
+ if (buf >= end)
+ return buf;
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
+ buf += snprintf(buf, end - buf, " %d", ref->proc->pid);
+ if (buf >= end)
+ return buf;
+ }
+ }
+ buf += snprintf(buf, end - buf, "\n");
+ list_for_each_entry(w, &node->async_todo, entry) {
+ if (buf >= end)
+ break;
+ buf = print_binder_work(buf, end, " ",
+ " pending async transaction", w);
+ }
+ return buf;
+}
+
+static char *print_binder_ref(char *buf, char *end, struct binder_ref
*ref)
+{
+ buf += snprintf(buf, end - buf, " ref %d: desc %d %snode %d s %d w
%d d %p\n",
+ ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
+ ref->node->debug_id, ref->strong, ref->weak, ref->death);
+ return buf;
+}
+
+static char *print_binder_proc(char *buf, char *end, struct
binder_proc *proc, int print_all)
+{
+ struct binder_work *w;
+ struct rb_node *n;
+ char *start_buf = buf;
+ char *header_buf;
+
+ buf += snprintf(buf, end - buf, "proc %d\n", proc->pid);
+ header_buf = buf;
+
+ for (n = rb_first(&proc->threads); n != NULL && buf < end; n =
rb_next(n))
+ buf = print_binder_thread(buf, end, rb_entry(n, struct
binder_thread, rb_node), print_all);
+ for (n = rb_first(&proc->nodes); n != NULL && buf < end; n =
rb_next(n)) {
+ struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
+ if (print_all || node->has_async_transaction)
+ buf = print_binder_node(buf, end, node);
+ }
+ if (print_all) {
+ for (n = rb_first(&proc->refs_by_desc); n != NULL && buf < end; n =
rb_next(n))
+ buf = print_binder_ref(buf, end, rb_entry(n, struct binder_ref,
rb_node_desc));
+ }
+ for (n = rb_first(&proc->allocated_buffers); n != NULL && buf < end;
n = rb_next(n))
+ buf = print_binder_buffer(buf, end, " buffer", rb_entry(n, struct
binder_buffer, rb_node));
+ list_for_each_entry(w, &proc->todo, entry) {
+ if (buf >= end)
+ break;
+ buf = print_binder_work(buf, end, " ",
+ " pending transaction", w);
+ }
+ list_for_each_entry(w, &proc->delivered_death, entry) {
+ if (buf >= end)
+ break;
+ buf += snprintf(buf, end - buf, " has delivered dead binder\n");
+ break;
+ }
+ if (!print_all && buf == header_buf)
+ buf = start_buf;
+ return buf;
+}
+
+static const char *binder_return_strings[] = {
+ "BR_ERROR",
+ "BR_OK",
+ "BR_TRANSACTION",
+ "BR_REPLY",
+ "BR_ACQUIRE_RESULT",
+ "BR_DEAD_REPLY",
+ "BR_TRANSACTION_COMPLETE",
+ "BR_INCREFS",
+ "BR_ACQUIRE",
+ "BR_RELEASE",
+ "BR_DECREFS",
+ "BR_ATTEMPT_ACQUIRE",
+ "BR_NOOP",
+ "BR_SPAWN_LOOPER",
+ "BR_FINISHED",
+ "BR_DEAD_BINDER",
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ "BR_FAILED_REPLY"
+};
+
+static const char *binder_command_strings[] = {
+ "BC_TRANSACTION",
+ "BC_REPLY",
+ "BC_ACQUIRE_RESULT",
+ "BC_FREE_BUFFER",
+ "BC_INCREFS",
+ "BC_ACQUIRE",
+ "BC_RELEASE",
+ "BC_DECREFS",
+ "BC_INCREFS_DONE",
+ "BC_ACQUIRE_DONE",
+ "BC_ATTEMPT_ACQUIRE",
+ "BC_REGISTER_LOOPER",
+ "BC_ENTER_LOOPER",
+ "BC_EXIT_LOOPER",
+ "BC_REQUEST_DEATH_NOTIFICATION",
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ "BC_DEAD_BINDER_DONE"
+};
+
+static const char *binder_objstat_strings[] = {
+ "proc",
+ "thread",
+ "node",
+ "ref",
+ "death",
+ "transaction",
+ "transaction_complete"
+};
+
+static char *print_binder_stats(char *buf, char *end, const char
*prefix, struct binder_stats *stats)
+{
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
ARRAY_SIZE(binder_command_strings));
+ for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
+ if (stats->bc[i])
+ buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix,
+ binder_command_strings[i], stats->bc[i]);
+ if (buf >= end)
+ return buf;
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
ARRAY_SIZE(binder_return_strings));
+ for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
+ if (stats->br[i])
+ buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix,
+ binder_return_strings[i], stats->br[i]);
+ if (buf >= end)
+ return buf;
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
ARRAY_SIZE(binder_objstat_strings));
+ BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(stats-
>obj_deleted));
+ for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
+ if (stats->obj_created[i] || stats->obj_deleted[i])
+ buf += snprintf(buf, end - buf, "%s%s: active %d total %d\n",
prefix,
+ binder_objstat_strings[i],
+ stats->obj_created[i] - stats->obj_deleted[i],
+ stats->obj_created[i]);
+ if (buf >= end)
+ return buf;
+ }
+ return buf;
+}
+
+static char *print_binder_proc_stats(char *buf, char *end, struct
binder_proc *proc)
+{
+ struct binder_work *w;
+ struct rb_node *n;
+ int count, strong, weak;
+
+ buf += snprintf(buf, end - buf, "proc %d\n", proc->pid);
+ if (buf >= end)
+ return buf;
+ count = 0;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ count++;
+ buf += snprintf(buf, end - buf, " threads: %d\n", count);
+ if (buf >= end)
+ return buf;
+ buf += snprintf(buf, end - buf, " requested threads: %d+%d/%d\n"
+ " ready threads %d\n"
+ " free async space %d\n", proc->requested_threads,
+ proc->requested_threads_started, proc->max_threads,
+ proc->ready_threads, proc->free_async_space);
+ if (buf >= end)
+ return buf;
+ count = 0;
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+ count++;
+ buf += snprintf(buf, end - buf, " nodes: %d\n", count);
+ if (buf >= end)
+ return buf;
+ count = 0;
+ strong = 0;
+ weak = 0;
+ for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
+ count++;
+ strong += ref->strong;
+ weak += ref->weak;
+ }
+ buf += snprintf(buf, end - buf, " refs: %d s %d w %d\n", count,
strong, weak);
+ if (buf >= end)
+ return buf;
+
+ count = 0;
+ for (n = rb_first(&proc->allocated_buffers); n != NULL; n =
rb_next(n))
+ count++;
+ buf += snprintf(buf, end - buf, " buffers: %d\n", count);
+ if (buf >= end)
+ return buf;
+
+ count = 0;
+ list_for_each_entry(w, &proc->todo, entry) {
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION:
+ count++;
+ break;
+ default:
+ break;
+ }
+ }
+ buf += snprintf(buf, end - buf, " pending transactions: %d\n",
count);
+ if (buf >= end)
+ return buf;
+
+ buf = print_binder_stats(buf, end, " ", &proc->stats);
+
+ return buf;
+}
+
+
+static int binder_read_proc_state(
+ char *page, char **start, off_t off, int count, int *eof, void
*data)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ struct binder_node *node;
+ int len = 0;
+ char *buf = page;
+ char *end = page + PAGE_SIZE;
+ int do_lock = !binder_debug_no_lock;
+
+ if (off)
+ return 0;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ buf += snprintf(buf, end - buf, "binder state:\n");
+
+ if (!hlist_empty(&binder_dead_nodes))
+ buf += snprintf(buf, end - buf, "dead nodes:\n");
+ hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) {
+ if (buf >= end)
+ break;
+ buf = print_binder_node(buf, end, node);
+ }
+
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
+ if (buf >= end)
+ break;
+ buf = print_binder_proc(buf, end, proc, 1);
+ }
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ if (buf > page + PAGE_SIZE)
+ buf = page + PAGE_SIZE;
+
+ *start = page + off;
+
+ len = buf - page;
+ if (len > off)
+ len -= off;
+ else
+ len = 0;
+
+ return len < count ? len : count;
+}
+
+static int binder_read_proc_stats(
+ char *page, char **start, off_t off, int count, int *eof, void
*data)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ int len = 0;
+ char *p = page;
+ int do_lock = !binder_debug_no_lock;
+
+ if (off)
+ return 0;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ p += snprintf(p, PAGE_SIZE, "binder stats:\n");
+
+ p = print_binder_stats(p, page + PAGE_SIZE, "", &binder_stats);
+
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
+ if (p >= page + PAGE_SIZE)
+ break;
+ p = print_binder_proc_stats(p, page + PAGE_SIZE, proc);
+ }
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ if (p > page + PAGE_SIZE)
+ p = page + PAGE_SIZE;
+
+ *start = page + off;
+
+ len = p - page;
+ if (len > off)
+ len -= off;
+ else
+ len = 0;
+
+ return len < count ? len : count;
+}
+
+static int binder_read_proc_transactions(
+ char *page, char **start, off_t off, int count, int *eof, void
*data)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ int len = 0;
+ char *buf = page;
+ char *end = page + PAGE_SIZE;
+ int do_lock = !binder_debug_no_lock;
+
+ if (off)
+ return 0;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ buf += snprintf(buf, end - buf, "binder transactions:\n");
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
+ if (buf >= end)
+ break;
+ buf = print_binder_proc(buf, end, proc, 0);
+ }
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ if (buf > page + PAGE_SIZE)
+ buf = page + PAGE_SIZE;
+
+ *start = page + off;
+
+ len = buf - page;
+ if (len > off)
+ len -= off;
+ else
+ len = 0;
+
+ return len < count ? len : count;
+}
+
+static int binder_read_proc_proc(
+ char *page, char **start, off_t off, int count, int *eof, void
*data)
+{
+ struct binder_proc *proc = data;
+ int len = 0;
+ char *p = page;
+ int do_lock = !binder_debug_no_lock;
+
+ if (off)
+ return 0;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+ p += snprintf(p, PAGE_SIZE, "binder proc state:\n");
+ p = print_binder_proc(p, page + PAGE_SIZE, proc, 1);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+
+ if (p > page + PAGE_SIZE)
+ p = page + PAGE_SIZE;
+ *start = page + off;
+
+ len = p - page;
+ if (len > off)
+ len -= off;
+ else
+ len = 0;
+
+ return len < count ? len : count;
+}
+
+static char *print_binder_transaction_log_entry(char *buf, char *end,
struct binder_transaction_log_entry *e)
+{
+ buf += snprintf(buf, end - buf, "%d: %s from %d:%d to %d:%d node %d
handle %d size %d:%d\n",
+ e->debug_id, (e->call_type == 2) ? "reply" :
+ ((e->call_type == 1) ? "async" : "call "), e->from_proc,
+ e->from_thread, e->to_proc, e->to_thread, e->to_node,
+ e->target_handle, e->data_size, e->offsets_size);
+ return buf;
+}
+
+static int binder_read_proc_transaction_log(
+ char *page, char **start, off_t off, int count, int *eof, void
*data)
+{
+ struct binder_transaction_log *log = data;
+ int len = 0;
+ int i;
+ char *buf = page;
+ char *end = page + PAGE_SIZE;
+
+ if (off)
+ return 0;
+
+ if (log->full) {
+ for (i = log->next; i < ARRAY_SIZE(log->entry); i++) {
+ if (buf >= end)
+ break;
+ buf = print_binder_transaction_log_entry(buf, end, &log-
>entry[i]);
+ }
+ }
+ for (i = 0; i < log->next; i++) {
+ if (buf >= end)
+ break;
+ buf = print_binder_transaction_log_entry(buf, end, &log->entry[i]);
+ }
+
+ *start = page + off;
+
+ len = buf - page;
+ if (len > off)
+ len -= off;
+ else
+ len = 0;
+
+ return len < count ? len : count;
+}
+
+static struct file_operations binder_fops = {
+ .owner = THIS_MODULE,
+ .poll = binder_poll,
+ .unlocked_ioctl = binder_ioctl,
+ .mmap = binder_mmap,
+ .open = binder_open,
+ .flush = binder_flush,
+ .release = binder_release,
+};
+
+static struct miscdevice binder_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "binder",
+ .fops = &binder_fops
+};
+
+static int __init binder_init(void)
+{
+ int ret;
+
+ binder_proc_dir_entry_root = proc_mkdir("binder", NULL);
+ if (binder_proc_dir_entry_root)
+ binder_proc_dir_entry_proc = proc_mkdir("proc",
binder_proc_dir_entry_root);
+ ret = misc_register(&binder_miscdev);
+ if (binder_proc_dir_entry_root) {
+ create_proc_read_entry("state", S_IRUGO,
binder_proc_dir_entry_root, binder_read_proc_state, NULL);
+ create_proc_read_entry("stats", S_IRUGO,
binder_proc_dir_entry_root, binder_read_proc_stats, NULL);
+ create_proc_read_entry("transactions", S_IRUGO,
binder_proc_dir_entry_root, binder_read_proc_transactions, NULL);
+ create_proc_read_entry("transaction_log", S_IRUGO,
binder_proc_dir_entry_root, binder_read_proc_transaction_log,
&binder_transaction_log);
+ create_proc_read_entry("failed_transaction_log", S_IRUGO,
binder_proc_dir_entry_root, binder_read_proc_transaction_log,
&binder_transaction_log_failed);
+ }
+ return ret;
+}
+
+device_initcall(binder_init);
+
diff -Naur linux-2.6.25/drivers/android/logger.c linux-2.6.25-
android-0.9_r1/drivers/android/logger.c
--- linux-2.6.25/drivers/android/logger.c 1970-01-01
01:00:00.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/drivers/android/logger.c 2008-07-24
02:04:04.000000000 +0100
@@ -0,0 +1,605 @@
+/* drivers/android/logger.c
+ *
+ * Android Logging Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General
Public
+ * License version 2, as published by the Free Software Foundation,
and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/time.h>
+#include <linux/logger.h>
+
+#include <asm/ioctls.h>
+
+/*
+ * struct logger_log - represents a specific log, such as 'main' or
'radio'
+ *
+ * This structure lives from module insertion until module removal,
so it does
+ * not need additional reference counting. The structure is protected
by the
+ * mutex 'mutex'.
+ */
+struct logger_log {
+ unsigned char * buffer; /* the ring buffer itself */
+ struct miscdevice misc; /* misc device representing the log */
+ wait_queue_head_t wq; /* wait queue for readers */
+ struct list_head readers; /* this log's readers */
+ struct mutex mutex; /* mutex protecting buffer */
+ size_t w_off; /* current write head offset */
+ size_t head; /* new readers start here */
+ size_t size; /* size of the log */
+};
+
+/*
+ * struct logger_reader - a logging device open for reading
+ *
+ * This object lives from open to release, so we don't need
additional
+ * reference counting. The structure is protected by log->mutex.
+ */
+struct logger_reader {
+ struct logger_log * log; /* associated log */
+ struct list_head list; /* entry in logger_log's list */
+ size_t r_off; /* current read head offset */
+};
+
+/* logger_offset - returns index 'n' into the log via (optimized)
modulus */
+#define logger_offset(n) ((n) & (log->size - 1))
+
+/*
+ * file_get_log - Given a file structure, return the associated log
+ *
+ * This isn't aesthetic. We have several goals:
+ *
+ * 1) Need to quickly obtain the associated log during an I/O
operation
+ * 2) Readers need to maintain state (logger_reader)
+ * 3) Writers need to be very fast (open() should be a near no-op)
+ *
+ * In the reader case, we can trivially go file->logger_reader-
>logger_log.
+ * For a writer, we don't want to maintain a logger_reader, so we
just go
+ * file->logger_log. Thus what file->private_data points at depends
on whether
+ * or not the file was opened for reading. This function hides that
dirtiness.
+ */
+static inline struct logger_log * file_get_log(struct file *file)
+{
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader = file->private_data;
+ return reader->log;
+ } else
+ return file->private_data;
+}
+
+/*
+ * get_entry_len - Grabs the length of the payload of the next entry
starting
+ * from 'off'.
+ *
+ * Caller needs to hold log->mutex.
+ */
+static __u32 get_entry_len(struct logger_log *log, size_t off)
+{
+ __u16 val;
+
+ switch (log->size - off) {
+ case 1:
+ memcpy(&val, log->buffer + off, 1);
+ memcpy(((char *) &val) + 1, log->buffer, 1);
+ break;
+ default:
+ memcpy(&val, log->buffer + off, 2);
+ }
+
+ return sizeof(struct logger_entry) + val;
+}
+
+/*
+ * do_read_log_to_user - reads exactly 'count' bytes from 'log' into
the
+ * user-space buffer 'buf'. Returns 'count' on success.
+ *
+ * Caller must hold log->mutex.
+ */
+static ssize_t do_read_log_to_user(struct logger_log *log,
+ struct logger_reader *reader,
+ char __user *buf,
+ size_t count)
+{
+ size_t len;
+
+ /*
+ * We read from the log in two disjoint operations. First, we read
from
+ * the current read head offset up to 'count' bytes or to the end of
+ * the log, whichever comes first.
+ */
+ len = min(count, log->size - reader->r_off);
+ if (copy_to_user(buf, log->buffer + reader->r_off, len))
+ return -EFAULT;
+
+ /*
+ * Second, we read any remaining bytes, starting back at the head of
+ * the log.
+ */
+ if (count != len)
+ if (copy_to_user(buf + len, log->buffer, count - len))
+ return -EFAULT;
+
+ reader->r_off = logger_offset(reader->r_off + count);
+
+ return count;
+}
+
+/*
+ * logger_read - our log's read() method
+ *
+ * Behavior:
+ *
+ * - O_NONBLOCK works
+ * - If there are no log entries to read, blocks until log is
written to
+ * - Atomically reads exactly one log entry
+ *
+ * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to
EINVAL if read
+ * buffer is insufficient to hold next entry.
+ */
+static ssize_t logger_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct logger_reader *reader = file->private_data;
+ struct logger_log *log = reader->log;
+ ssize_t ret;
+ DEFINE_WAIT(wait);
+
+start:
+ while (1) {
+ prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
+
+ mutex_lock(&log->mutex);
+ ret = (log->w_off == reader->r_off);
+ mutex_unlock(&log->mutex);
+ if (!ret)
+ break;
+
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ schedule();
+ }
+
+ finish_wait(&log->wq, &wait);
+ if (ret)
+ return ret;
+
+ mutex_lock(&log->mutex);
+
+ /* is there still something to read or did we race? */
+ if (unlikely(log->w_off == reader->r_off)) {
+ mutex_unlock(&log->mutex);
+ goto start;
+ }
+
+ /* get the size of the next entry */
+ ret = get_entry_len(log, reader->r_off);
+ if (count < ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* get exactly one entry from the log */
+ ret = do_read_log_to_user(log, reader, buf, ret);
+
+out:
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+/*
+ * get_next_entry - return the offset of the first valid entry at
least 'len'
+ * bytes after 'off'.
+ *
+ * Caller must hold log->mutex.
+ */
+static size_t get_next_entry(struct logger_log *log, size_t off,
size_t len)
+{
+ size_t count = 0;
+
+ do {
+ size_t nr = get_entry_len(log, off);
+ off = logger_offset(off + nr);
+ count += nr;
+ } while (count < len);
+
+ return off;
+}
+
+/*
+ * clock_interval - is a < c < b in mod-space? Put another way, does
the line
+ * from a to b cross c?
+ */
+static inline int clock_interval(size_t a, size_t b, size_t c)
+{
+ if (b < a) {
+ if (a < c || b >= c)
+ return 1;
+ } else {
+ if (a < c && b >= c)
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * fix_up_readers - walk the list of all readers and "fix up" any who
were
+ * lapped by the writer; also do the same for the default "start
head".
+ * We do this by "pulling forward" the readers and start head to the
first
+ * entry after the new write head.
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void fix_up_readers(struct logger_log *log, size_t len)
+{
+ size_t old = log->w_off;
+ size_t new = logger_offset(old + len);
+ struct logger_reader *reader;
+
+ if (clock_interval(old, new, log->head))
+ log->head = get_next_entry(log, log->head, len);
+
+ list_for_each_entry(reader, &log->readers, list)
+ if (clock_interval(old, new, reader->r_off))
+ reader->r_off = get_next_entry(log, reader->r_off, len);
+}
+
+/*
+ * do_write_log - writes 'len' bytes from 'buf' to 'log'
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void do_write_log(struct logger_log *log, const void *buf,
size_t count)
+{
+ size_t len;
+
+ len = min(count, log->size - log->w_off);
+ memcpy(log->buffer + log->w_off, buf, len);
+
+ if (count != len)
+ memcpy(log->buffer, buf + len, count - len);
+
+ log->w_off = logger_offset(log->w_off + count);
+
+}
+
+/*
+ * do_write_log_user - writes 'len' bytes from the user-space buffer
'buf' to
+ * the log 'log'
+ *
+ * The caller needs to hold log->mutex.
+ *
+ * Returns 'count' on success, negative error code on failure.
+ */
+static ssize_t do_write_log_from_user(struct logger_log *log,
+ const void __user *buf, size_t count)
+{
+ size_t len;
+
+ len = min(count, log->size - log->w_off);
+ if (len && copy_from_user(log->buffer + log->w_off, buf, len))
+ return -EFAULT;
+
+ if (count != len)
+ if (copy_from_user(log->buffer, buf + len, count - len))
+ return -EFAULT;
+
+ log->w_off = logger_offset(log->w_off + count);
+
+ return count;
+}
+
+/*
+ * logger_aio_write - our write method, implementing support for
write(),
+ * writev(), and aio_write(). Writes are our fast path, and we try to
optimize
+ * them above all else.
+ */
+ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t ppos)
+{
+ struct logger_log *log = file_get_log(iocb->ki_filp);
+ size_t orig = log->w_off;
+ struct logger_entry header;
+ struct timespec now;
+ ssize_t ret = 0;
+
+ now = current_kernel_time();
+
+ header.pid = current->tgid;
+ header.tid = current->pid;
+ header.sec = now.tv_sec;
+ header.nsec = now.tv_nsec;
+ header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+
+ /* null writes succeed, return zero */
+ if (unlikely(!header.len))
+ return 0;
+
+ mutex_lock(&log->mutex);
+
+ /*
+ * Fix up any readers, pulling them forward to the first readable
+ * entry after (what will be) the new write offset. We do this now
+ * because if we partially fail, we can end up with clobbered log
+ * entries that encroach on readable buffer.
+ */
+ fix_up_readers(log, sizeof(struct logger_entry) + header.len);
+
+ do_write_log(log, &header, sizeof(struct logger_entry));
+
+ while (nr_segs-- > 0) {
+ size_t len;
+ ssize_t nr;
+
+ /* figure out how much of this vector we can keep */
+ len = min_t(size_t, iov->iov_len, header.len - ret);
+
+ /* write out this segment's payload */
+ nr = do_write_log_from_user(log, iov->iov_base, len);
+ if (unlikely(nr < 0)) {
+ log->w_off = orig;
+ mutex_unlock(&log->mutex);
+ return nr;
+ }
+
+ iov++;
+ ret += nr;
+ }
+
+ mutex_unlock(&log->mutex);
+
+ /* wake up any blocked readers */
+ wake_up_interruptible(&log->wq);
+
+ return ret;
+}
+
+static struct logger_log * get_log_from_minor(int);
+
+/*
+ * logger_open - the log's open() file operation
+ *
+ * Note how near a no-op this is in the write-only case. Keep it that
way!
+ */
+static int logger_open(struct inode *inode, struct file *file)
+{
+ struct logger_log *log;
+ int ret;
+
+ ret = nonseekable_open(inode, file);
+ if (ret)
+ return ret;
+
+ log = get_log_from_minor(MINOR(inode->i_rdev));
+ if (!log)
+ return -ENODEV;
+
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader;
+
+ reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
+ if (!reader)
+ return -ENOMEM;
+
+ reader->log = log;
+ INIT_LIST_HEAD(&reader->list);
+
+ mutex_lock(&log->mutex);
+ reader->r_off = log->head;
+ list_add_tail(&reader->list, &log->readers);
+ mutex_unlock(&log->mutex);
+
+ file->private_data = reader;
+ } else
+ file->private_data = log;
+
+ return 0;
+}
+
+/*
+ * logger_release - the log's release file operation
+ *
+ * Note this is a total no-op in the write-only case. Keep it that
way!
+ */
+static int logger_release(struct inode *ignored, struct file *file)
+{
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader = file->private_data;
+ list_del(&reader->list);
+ kfree(reader);
+ }
+
+ return 0;
+}
+
+/*
+ * logger_poll - the log's poll file operation, for poll/select/epoll
+ *
+ * Note we always return POLLOUT, because you can always write() to
the log.
+ * Note also that, strictly speaking, a return value of POLLIN does
not
+ * guarantee that the log is readable without blocking, as there is a
small
+ * chance that the writer can lap the reader in the interim between
poll()
+ * returning and the read() request.
+ */
+static unsigned int logger_poll(struct file *file, poll_table *wait)
+{
+ struct logger_reader *reader;
+ struct logger_log *log;
+ unsigned int ret = POLLOUT | POLLWRNORM;
+
+ if (!(file->f_mode & FMODE_READ))
+ return ret;
+
+ reader = file->private_data;
+ log = reader->log;
+
+ poll_wait(file, &log->wq, wait);
+
+ mutex_lock(&log->mutex);
+ if (log->w_off != reader->r_off)
+ ret |= POLLIN | POLLRDNORM;
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+static long logger_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
+{
+ struct logger_log *log = file_get_log(file);
+ struct logger_reader *reader;
+ long ret = -ENOTTY;
+
+ mutex_lock(&log->mutex);
+
+ switch (cmd) {
+ case LOGGER_GET_LOG_BUF_SIZE:
+ ret = log->size;
+ break;
+ case LOGGER_GET_LOG_LEN:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ if (log->w_off >= reader->r_off)
+ ret = log->w_off - reader->r_off;
+ else
+ ret = (log->size - reader->r_off) + log->w_off;
+ break;
+ case LOGGER_GET_NEXT_ENTRY_LEN:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ if (log->w_off != reader->r_off)
+ ret = get_entry_len(log, reader->r_off);
+ else
+ ret = 0;
+ break;
+ case LOGGER_FLUSH_LOG:
+ if (!(file->f_mode & FMODE_WRITE)) {
+ ret = -EBADF;
+ break;
+ }
+ list_for_each_entry(reader, &log->readers, list)
+ reader->r_off = log->w_off;
+ log->head = log->w_off;
+ ret = 0;
+ break;
+ }
+
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+static struct file_operations logger_fops = {
+ .owner = THIS_MODULE,
+ .read = logger_read,
+ .aio_write = logger_aio_write,
+ .poll = logger_poll,
+ .unlocked_ioctl = logger_ioctl,
+ .compat_ioctl = logger_ioctl,
+ .open = logger_open,
+ .release = logger_release,
+};
+
+/*
+ * Defines a log structure with name 'NAME' and a size of 'SIZE'
bytes, which
+ * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and
less than
+ * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
+ */
+#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
+static unsigned char _buf_ ## VAR[SIZE]; \
+static struct logger_log VAR = { \
+ .buffer = _buf_ ## VAR, \
+ .misc = { \
+ .minor = MISC_DYNAMIC_MINOR, \
+ .name = NAME, \
+ .fops = &logger_fops, \
+ .parent = NULL, \
+ }, \
+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
+ .readers = LIST_HEAD_INIT(VAR .readers), \
+ .mutex = __MUTEX_INITIALIZER(VAR .mutex), \
+ .w_off = 0, \
+ .head = 0, \
+ .size = SIZE, \
+};
+
+DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 64*1024)
+DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 64*1024)
+DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 64*1024)
+
+static struct logger_log * get_log_from_minor(int minor)
+{
+ if (log_main.misc.minor == minor)
+ return &log_main;
+ if (log_events.misc.minor == minor)
+ return &log_events;
+ if (log_radio.misc.minor == minor)
+ return &log_radio;
+ return NULL;
+}
+
+static int __init init_log(struct logger_log *log)
+{
+ int ret;
+
+ ret = misc_register(&log->misc);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "logger: failed to register misc "
+ "device for log '%s'!\n", log->
misc.name);
+ return ret;
+ }
+
+ printk(KERN_INFO "logger: created %luK log '%s'\n",
+ (unsigned long) log->size >> 10, log->
misc.name);
+
+ return 0;
+}
+
+static int __init logger_init(void)
+{
+ int ret;
+
+ ret = init_log(&log_main);
+ if (unlikely(ret))
+ goto out;
+
+ ret = init_log(&log_events);
+ if (unlikely(ret))
+ goto out;
+
+ ret = init_log(&log_radio);
+ if (unlikely(ret))
+ goto out;
+
+out:
+ return ret;
+}
+device_initcall(logger_init);
diff -Naur linux-2.6.25/drivers/android/power.c linux-2.6.25-
android-0.9_r1/drivers/android/power.c
--- linux-2.6.25/drivers/android/power.c 1970-01-01 01:00:00.000000000
+0100
+++ linux-2.6.25-android-0.9_r1/drivers/android/power.c 2008-07-24
02:04:04.000000000 +0100
@@ -0,0 +1,1221 @@
+/* drivers/android/power.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General
Public
+ * License version 2, as published by the Free Software Foundation,
and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+//#include <linux/platform_device.h>
+#include <linux/sysdev.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/wait.h>
+#include <linux/android_power.h>
+#include <linux/suspend.h>
+#include <linux/syscalls.h> // sys_sync
+#include <linux/console.h>
+#include <linux/kbd_kern.h>
+#include <linux/vt_kern.h>
+#include <linux/freezer.h>
+#ifdef CONFIG_ANDROID_POWER_STAT
+#include <linux/proc_fs.h>
+#endif
+
+#define ANDROID_POWER_TEST_EARLY_SUSPEND 0
+#define ANDROID_POWER_PRINT_USER_WAKE_LOCKS 0
+
+MODULE_DESCRIPTION("OMAP CSMI Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+#define ANDROID_SUSPEND_CONSOLE (MAX_NR_CONSOLES-2)
+
+static spinlock_t g_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_MUTEX(g_early_suspend_lock);
+
+wait_queue_head_t g_wait_queue;
+
+static LIST_HEAD(g_inactive_locks);
+static LIST_HEAD(g_active_partial_wake_locks);
+static LIST_HEAD(g_active_full_wake_locks);
+static LIST_HEAD(g_early_suspend_handlers);
+static enum {
+ USER_AWAKE,
+ USER_NOTIFICATION,
+ USER_SLEEP
+} g_user_suspend_state;
+static int g_current_event_num;
+static struct workqueue_struct *g_suspend_work_queue;
+static void android_power_suspend(struct work_struct *work);
+static void android_power_wakeup_locked(int notification, ktime_t
time);
+static DECLARE_WORK(g_suspend_work, android_power_suspend);
+static int g_max_user_lockouts = 16;
+
+//static const char g_free_user_lockout_name[] = "free_user";
+static struct {
+ enum {
+ USER_WAKE_LOCK_INACTIVE,
+ USER_WAKE_LOCK_PARTIAL,
+ USER_WAKE_LOCK_FULL
+ } state;
+ android_suspend_lock_t suspend_lock;
+ char name_buffer[32];
+} *g_user_wake_locks;
+#ifdef CONFIG_ANDROID_POWER_STAT
+android_suspend_lock_t g_deleted_wake_locks;
+android_suspend_lock_t g_no_wake_locks;
+#endif
+static struct kobject *android_power_kobj;
+#ifndef CONFIG_FRAMEBUFFER_CONSOLE
+static wait_queue_head_t fb_state_wq;
+static spinlock_t fb_state_lock = SPIN_LOCK_UNLOCKED;
+int fb_state;
+#endif
+
+#if 0
+android_suspend_lock_t *android_allocate_suspend_lock(const char
*debug_name)
+{
+ unsigned long irqflags;
+ struct android_power *e;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if(e == NULL) {
+ printk("android_power_allocate: kzalloc failed\n");
+ return NULL;
+ }
+ e->name = debug_name;
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ list_add(&e->link, &g_allocated);
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+ return e;
+}
+#endif
+
+int android_init_suspend_lock(android_suspend_lock_t *lock)
+{
+ unsigned long irqflags;
+
+ if(lock->name == NULL) {
+ printk("android_init_suspend_lock: error name=NULL, lock=%p\n",
lock);
+ dump_stack();
+ return -EINVAL;
+ }
+
+ //printk("android_init_suspend_lock name=%s\n", lock->name);
+#ifdef CONFIG_ANDROID_POWER_STAT
+ lock->stat.count = 0;
+ lock->stat.expire_count = 0;
+ lock->stat.total_time = ktime_set(0, 0);
+ lock->stat.max_time = ktime_set(0, 0);
+ lock->stat.last_time = ktime_set(0, 0);
+#endif
+ lock->flags = 0;
+
+ INIT_LIST_HEAD(&lock->link);
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ list_add(&lock->link, &g_inactive_locks);
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+// if(lock->flags & ANDROID_SUSPEND_LOCK_FLAG_USER_VISIBLE_MASK) {
+// sysfs_create_file(struct kobject * k, const struct attribute * a)
+// }
+ return 0;
+}
+
+void android_uninit_suspend_lock(android_suspend_lock_t *lock)
+{
+ unsigned long irqflags;
+ //printk("android_uninit_suspend_lock name=%s\n", lock->name);
+ spin_lock_irqsave(&g_list_lock, irqflags);
+#ifdef CONFIG_ANDROID_POWER_STAT
+ if(lock->stat.count) {
+ if(g_deleted_wake_locks.stat.count == 0) {
+
g_deleted_wake_locks.name = "deleted_wake_locks";
+ android_init_suspend_lock(&g_deleted_wake_locks);
+ }
+ g_deleted_wake_locks.stat.count += lock->stat.count;
+ g_deleted_wake_locks.stat.expire_count += lock->stat.expire_count;
+ g_deleted_wake_locks.stat.total_time =
ktime_add(g_deleted_wake_locks.stat.total_time, lock-
>stat.total_time);
+ g_deleted_wake_locks.stat.max_time =
ktime_add(g_deleted_wake_locks.stat.max_time, lock->stat.max_time);
+ }
+#endif
+ list_del(&lock->link);
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+}
+
+void android_lock_suspend(android_suspend_lock_t *lock)
+{
+ unsigned long irqflags;
+ //printk("android_lock_suspend name=%s\n", lock->name);
+ spin_lock_irqsave(&g_list_lock, irqflags);
+#ifdef CONFIG_ANDROID_POWER_STAT
+ if(!(lock->flags & ANDROID_SUSPEND_LOCK_ACTIVE)) {
+ lock->flags |= ANDROID_SUSPEND_LOCK_ACTIVE;
+ lock->stat.last_time = ktime_get();
+ }
+#endif
+ lock->expires = INT_MAX;
+ lock->flags &= ~ANDROID_SUSPEND_LOCK_AUTO_EXPIRE;
+ list_del(&lock->link);
+ list_add(&lock->link, &g_active_partial_wake_locks);
+ g_current_event_num++;
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+}
+
+void android_lock_suspend_auto_expire(android_suspend_lock_t *lock,
int timeout)
+{
+ unsigned long irqflags;
+ //printk("android_lock_suspend name=%s\n", lock->name);
+ spin_lock_irqsave(&g_list_lock, irqflags);
+#ifdef CONFIG_ANDROID_POWER_STAT
+ if(!(lock->flags & ANDROID_SUSPEND_LOCK_ACTIVE)) {
+ lock->flags |= ANDROID_SUSPEND_LOCK_ACTIVE;
+ lock->stat.last_time = ktime_get();
+ }
+#endif
+ lock->expires = jiffies + timeout;
+ lock->flags |= ANDROID_SUSPEND_LOCK_AUTO_EXPIRE;
+ list_del(&lock->link);
+ list_add(&lock->link, &g_active_partial_wake_locks);
+ g_current_event_num++;
+ wake_up(&g_wait_queue);
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+}
+
+void android_lock_partial_suspend_auto_expire(android_suspend_lock_t
*lock, int timeout)
+{
+ unsigned long irqflags;
+ //printk("android_lock_suspend name=%s\n", lock->name);
+ spin_lock_irqsave(&g_list_lock, irqflags);
+#ifdef CONFIG_ANDROID_POWER_STAT
+ if(!(lock->flags & ANDROID_SUSPEND_LOCK_ACTIVE)) {
+ lock->flags |= ANDROID_SUSPEND_LOCK_ACTIVE;
+ lock->stat.last_time = ktime_get();
+ }
+#endif
+ lock->expires = jiffies + timeout;
+ lock->flags |= ANDROID_SUSPEND_LOCK_AUTO_EXPIRE;
+ list_del(&lock->link);
+ list_add(&lock->link, &g_active_full_wake_locks);
+ g_current_event_num++;
+ wake_up(&g_wait_queue);
+ android_power_wakeup_locked(1, ktime_get());
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+}
+
+#ifdef CONFIG_ANDROID_POWER_STAT
+static int print_lock_stat(char *buf, android_suspend_lock_t *lock)
+{
+ ktime_t active_time;
+ if(lock->flags & ANDROID_SUSPEND_LOCK_ACTIVE)
+ active_time = ktime_sub(ktime_get(), lock->stat.last_time);
+ else
+ active_time = ktime_set(0, 0);
+ return sprintf(buf, "\"%s\"\t%d\t%d\t%lld\t%lld\t%lld\t%lld\n",
+ lock->name,
+ lock->stat.count, lock->stat.expire_count,
+ ktime_to_ns(active_time),
+ ktime_to_ns(lock->stat.total_time),
+ ktime_to_ns(lock->stat.max_time),
+ ktime_to_ns(lock->stat.last_time));
+}
+
+
+static int wakelocks_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ unsigned long irqflags;
+ android_suspend_lock_t *lock;
+ int len = 0;
+ char *p = page;
+
+ spin_lock_irqsave(&g_list_lock, irqflags);
+
+ p += sprintf(p, "name\tcount\texpire_count\tactive_since\ttotal_time
\tmax_time\tlast_change\n");
+ list_for_each_entry(lock, &g_inactive_locks, link) {
+ p += print_lock_stat(p, lock);
+ }
+ list_for_each_entry(lock, &g_active_partial_wake_locks, link) {
+ p += print_lock_stat(p, lock);
+ }
+ list_for_each_entry(lock, &g_active_full_wake_locks, link) {
+ p += print_lock_stat(p, lock);
+ }
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+
+
+ *start = page + off;
+
+ len = p - page;
+ if (len > off)
+ len -= off;
+ else
+ len = 0;
+
+ return len < count ? len : count;
+}
+
+static void android_unlock_suspend_stat_locked(android_suspend_lock_t
*lock)
+{
+ if(lock->flags & ANDROID_SUSPEND_LOCK_ACTIVE) {
+ ktime_t duration;
+ lock->flags &= ~ANDROID_SUSPEND_LOCK_ACTIVE;
+ lock->stat.count++;
+ duration = ktime_sub(ktime_get(), lock->stat.last_time);
+ lock->stat.total_time = ktime_add(lock->stat.total_time, duration);
+ if(ktime_to_ns(duration) > ktime_to_ns(lock->stat.max_time))
+ lock->stat.max_time = duration;
+ lock->stat.last_time = ktime_get();
+ }
+}
+#endif
+
+void android_unlock_suspend(android_suspend_lock_t *lock)
+{
+ int had_full_wake_locks;
+ unsigned long irqflags;
+ //printk("android_unlock_suspend name=%s\n", lock->name);
+ spin_lock_irqsave(&g_list_lock, irqflags);
+#ifdef CONFIG_ANDROID_POWER_STAT
+ android_unlock_suspend_stat_locked(lock);
+#endif
+ lock->flags &= ~ANDROID_SUSPEND_LOCK_AUTO_EXPIRE;
+ had_full_wake_locks = !list_empty(&g_active_full_wake_locks);
+ list_del(&lock->link);
+ list_add(&lock->link, &g_inactive_locks);
+ wake_up(&g_wait_queue);
+ if(had_full_wake_locks && list_empty(&g_active_full_wake_locks)) {
+ printk("android_unlock_suspend: released at %lld\n",
ktime_to_ns(ktime_get()));
+ if(g_user_suspend_state == USER_NOTIFICATION) {
+ printk("android sleep state %d->%d at %lld\n",
g_user_suspend_state, USER_SLEEP, ktime_to_ns(ktime_get()));
+ g_user_suspend_state = USER_SLEEP;
+ queue_work(g_suspend_work_queue, &g_suspend_work);
+ }
+ }
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+}
+
+static void android_power_wakeup_locked(int notification, ktime_t
time)
+{
+ int new_state = (notification == 0) ? USER_AWAKE :
USER_NOTIFICATION;
+ if(new_state >= g_user_suspend_state) {
+ return;
+ }
+ printk("android_power_wakeup %d->%d at %lld\n",
g_user_suspend_state, new_state, ktime_to_ns(time));
+ g_user_suspend_state = new_state;
+ g_current_event_num++;
+ wake_up(&g_wait_queue);
+}
+
+static void android_power_wakeup(void)
+{
+ unsigned long irqflags;
+
+ ktime_t ktime_now;
+
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ ktime_now = ktime_get();
+ android_power_wakeup_locked(0, ktime_now);
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+}
+
+static void android_power_request_sleep(void)
+{
+ unsigned long irqflags;
+ int already_suspended;
+ android_suspend_lock_t *lock, *next_lock;
+ ktime_t ktime_now;
+
+ ktime_now = ktime_get();
+ printk("android_power_suspend: %lld\n", ktime_to_ns(ktime_now));
+
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ already_suspended = g_user_suspend_state == USER_SLEEP;
+ if(!already_suspended) {
+ printk("android sleep state %d->%d at %lld\n",
g_user_suspend_state, USER_SLEEP, ktime_to_ns(ktime_now));
+ g_user_suspend_state = USER_SLEEP;
+ }
+
+ list_for_each_entry_safe(lock, next_lock, &g_active_full_wake_locks,
link) {
+#ifdef CONFIG_ANDROID_POWER_STAT
+ android_unlock_suspend_stat_locked(lock);
+#endif
+ list_del(&lock->link);
+ list_add(&lock->link, &g_inactive_locks);
+ printk("android_power_suspend: aborted full wake lock %s\n", lock-
>name);
+ }
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+ queue_work(g_suspend_work_queue, &g_suspend_work);
+}
+
+void android_register_early_suspend(android_early_suspend_t *handler)
+{
+ struct list_head *pos;
+
+ mutex_lock(&g_early_suspend_lock);
+ list_for_each(pos, &g_early_suspend_handlers) {
+ android_early_suspend_t *e = list_entry(pos,
android_early_suspend_t, link);
+ if(e->level > handler->level)
+ break;
+ }
+ list_add_tail(&handler->link, pos);
+ mutex_unlock(&g_early_suspend_lock);
+}
+
+void android_unregister_early_suspend(android_early_suspend_t
*handler)
+{
+ mutex_lock(&g_early_suspend_lock);
+ list_del(&handler->link);
+ mutex_unlock(&g_early_suspend_lock);
+}
+
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE
+static int orig_fgconsole;
+static void console_early_suspend(android_early_suspend_t *h)
+{
+ acquire_console_sem();
+ orig_fgconsole = fg_console;
+ if (vc_allocate(ANDROID_SUSPEND_CONSOLE))
+ goto err;
+ if (set_console(ANDROID_SUSPEND_CONSOLE))
+ goto err;
+ release_console_sem();
+
+ if (vt_waitactive(ANDROID_SUSPEND_CONSOLE))
+ pr_warning("console_early_suspend: Can't switch VCs.\n");
+ return;
+err:
+ pr_warning("console_early_suspend: Can't set console\n");
+ release_console_sem();
+}
+
+static void console_late_resume(android_early_suspend_t *h)
+{
+ int ret;
+ acquire_console_sem();
+ ret = set_console(orig_fgconsole);
+ release_console_sem();
+ if (ret) {
+ pr_warning("console_late_resume: Can't set console.\n");
+ return;
+ }
+
+ if (vt_waitactive(orig_fgconsole))
+ pr_warning("console_late_resume: Can't switch VCs.\n");
+}
+
+static android_early_suspend_t console_early_suspend_desc = {
+ .level = ANDROID_EARLY_SUSPEND_LEVEL_CONSOLE_SWITCH,
+ .suspend = console_early_suspend,
+ .resume = console_late_resume,
+};
+#else
+/* tell userspace to stop drawing, wait for it to stop */
+static void stop_drawing_early_suspend(android_early_suspend_t *h)
+{
+ int ret;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fb_state_lock, irq_flags);
+ fb_state = ANDROID_REQUEST_STOP_DRAWING;
+ spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+
+ wake_up_all(&fb_state_wq);
+ ret = wait_event_timeout(fb_state_wq,
+ fb_state == ANDROID_STOPPED_DRAWING,
+ HZ);
+ if (unlikely(fb_state != ANDROID_STOPPED_DRAWING))
+ printk(KERN_WARNING "android_power: timeout waiting for "
+ "userspace to stop drawing\n");
+}
+
+/* tell userspace to start drawing */
+static void start_drawing_late_resume(android_early_suspend_t *h)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fb_state_lock, irq_flags);
+ fb_state = ANDROID_DRAWING_OK;
+ spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+ printk("drawing ok\n");
+ wake_up(&fb_state_wq);
+}
+
+static android_early_suspend_t stop_drawing_early_suspend_desc = {
+ .level = ANDROID_EARLY_SUSPEND_LEVEL_CONSOLE_SWITCH,
+ .suspend = stop_drawing_early_suspend,
+ .resume = start_drawing_late_resume,
+};
+#endif
+
+#if ANDROID_POWER_TEST_EARLY_SUSPEND
+
+typedef struct
+{
+ android_early_suspend_t h;
+ const char *string;
+} early_suspend_test_t;
+
+static void early_suspend_test(android_early_suspend_t *h)
+{
+ early_suspend_test_t *est = container_of(h, early_suspend_test_t,
h);
+ printk("early suspend %s (l %d)\n", est->string, h->level);
+}
+
+static void late_resume_test(android_early_suspend_t *h)
+{
+ early_suspend_test_t *est = container_of(h, early_suspend_test_t,
h);
+ printk("late resume %s (l %d)\n", est->string, h->level);
+}
+
+#define EARLY_SUSPEND_TEST_ENTRY(ilevel, istring) \
+{ \
+ .h = { \
+ .level = ilevel, \
+ .suspend = early_suspend_test, \
+ .resume = late_resume_test \
+ }, \
+ .string = istring \
+}
+static early_suspend_test_t early_suspend_tests[] = {
+ EARLY_SUSPEND_TEST_ENTRY(10, "1"),
+ EARLY_SUSPEND_TEST_ENTRY(5, "2"),
+ EARLY_SUSPEND_TEST_ENTRY(10, "3"),
+ EARLY_SUSPEND_TEST_ENTRY(15, "4"),
+ EARLY_SUSPEND_TEST_ENTRY(8, "5")
+};
+
+#endif
+
+static int get_wait_timeout(int print_locks, int state, struct
list_head *list_head)
+{
+ unsigned long irqflags;
+ android_suspend_lock_t *lock, *next;
+ int max_timeout = 0;
+
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ list_for_each_entry_safe(lock, next, list_head, link) {
+ if(lock->flags & ANDROID_SUSPEND_LOCK_AUTO_EXPIRE) {
+ int timeout = lock->expires - (int)jiffies;
+ if(timeout <= 0) {
+ lock->flags &= ~ANDROID_SUSPEND_LOCK_AUTO_EXPIRE;
+#ifdef CONFIG_ANDROID_POWER_STAT
+ lock->stat.expire_count++;
+ android_unlock_suspend_stat_locked(lock);
+#endif
+ list_del(&lock->link);
+ list_add(&lock->link, &g_inactive_locks);
+ if(!print_locks) // print wake locks that expire while waiting to
enter sleep
+ printk("expired wake lock %s\n", lock->name);
+ }
+ else {
+ if(timeout > max_timeout)
+ max_timeout = timeout;
+ if(print_locks)
+ printk("active wake lock %s, time left %d\n", lock->name,
timeout);
+ }
+ }
+ else {
+ if(print_locks)
+ printk("active wake lock %s\n", lock->name);
+ }
+ }
+ if(g_user_suspend_state != state || list_empty(list_head))
+ max_timeout = -1;
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+ return max_timeout;
+}
+
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE
+static int android_power_class_suspend(struct sys_device *sdev,
pm_message_t state)
+{
+ int rv = 0;
+ unsigned long irqflags;
+
+ printk("android_power_suspend: enter\n");
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ if(!list_empty(&g_active_partial_wake_locks)) {
+ printk("android_power_suspend: abort for partial wakeup\n");
+ rv = -EAGAIN;
+ }
+ if(g_user_suspend_state != USER_SLEEP) {
+ printk("android_power_suspend: abort for full wakeup\n");
+ rv = -EAGAIN;
+ }
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+ return rv;
+}
+
+static int android_power_device_suspend(struct sys_device *sdev,
pm_message_t state)
+{
+ int rv = 0;
+ unsigned long irqflags;
+
+ printk("android_power_device_suspend: enter\n");
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ if(!list_empty(&g_active_partial_wake_locks)) {
+ printk("android_power_device_suspend: abort for partial wakeup\n");
+ rv = -EAGAIN;
+ }
+ if(g_user_suspend_state != USER_SLEEP) {
+ printk("android_power_device_suspend: abort for full wakeup\n");
+ rv = -EAGAIN;
+ }
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+ return rv;
+}
+#endif
+
+int android_power_is_driver_suspended(void)
+{
+ return (get_wait_timeout(0, USER_SLEEP,
&g_active_partial_wake_locks) < 0) && (g_user_suspend_state ==
USER_SLEEP);
+}
+
+static void android_power_suspend(struct work_struct *work)
+{
+ int entry_event_num;
+ int ret;
+ int wait = 0;
+ android_early_suspend_t *pos;
+ int print_locks;
+ unsigned long irqflags;
+
+ while(g_user_suspend_state != USER_AWAKE) {
+ while(g_user_suspend_state == USER_NOTIFICATION) {
+ wait = get_wait_timeout(print_locks, USER_NOTIFICATION,
&g_active_full_wake_locks);
+ if(wait < 0)
+ break;
+ if(wait)
+ wait_event_interruptible_timeout(g_wait_queue,
get_wait_timeout(0, USER_NOTIFICATION, &g_active_full_wake_locks) !=
wait, wait);
+ }
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ if(g_user_suspend_state == USER_NOTIFICATION &&
list_empty(&g_active_full_wake_locks)) {
+ printk("android sleep state %d->%d at %lld\n",
g_user_suspend_state, USER_SLEEP, ktime_to_ns(ktime_get()));
+ g_user_suspend_state = USER_SLEEP;
+ }
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+ wait = 0;
+ if(g_user_suspend_state == USER_AWAKE) {
+ printk("android_power_suspend: suspend aborted\n");
+ return;
+ }
+
+ mutex_lock(&g_early_suspend_lock);
+ //printk("android_power_suspend: call early suspend handlers\n");
+ list_for_each_entry(pos, &g_early_suspend_handlers, link) {
+ if(pos->suspend != NULL)
+ pos->suspend(pos);
+ }
+ //printk("android_power_suspend: call early suspend handlers\n");
+
+ //printk("android_power_suspend: enter\n");
+
+ sys_sync();
+
+ while(g_user_suspend_state == USER_SLEEP) {
+ //printk("android_power_suspend: enter wait (%d)\n", wait);
+ if(wait) {
+ wait_event_interruptible_timeout(g_wait_queue,
g_user_suspend_state != USER_SLEEP, wait);
+ wait = 0;
+ }
+ print_locks = 1;
+ while(1) {
+ wait = get_wait_timeout(print_locks, USER_SLEEP,
&g_active_partial_wake_locks);
+ print_locks = 0;
+ if(wait < 0)
+ break;
+ if(wait)
+ wait_event_interruptible_timeout(g_wait_queue,
get_wait_timeout(0, USER_SLEEP, &g_active_partial_wake_locks) != wait,
wait);
+ else
+ wait_event_interruptible(g_wait_queue, get_wait_timeout(0,
USER_SLEEP, &g_active_partial_wake_locks) != wait);
+ }
+ wait = 0;
+ //printk("android_power_suspend: exit wait\n");
+ entry_event_num = g_current_event_num;
+ if(g_user_suspend_state != USER_SLEEP)
+ break;
+ sys_sync();
+ printk("android_power_suspend: enter suspend\n");
+ ret = pm_suspend(PM_SUSPEND_MEM);
+ printk("android_power_suspend: exit suspend, ret = %d\n", ret);
+ if(g_current_event_num == entry_event_num) {
+ printk("android_power_suspend: pm_suspend returned with no event
\n");
+ wait = HZ / 2;
+#ifdef CONFIG_ANDROID_POWER_STAT
+ if(g_no_wake_locks.stat.count == 0) {
+
g_no_wake_locks.name = "unknown_wakeups";
+ android_init_suspend_lock(&g_no_wake_locks);
+ }
+ g_no_wake_locks.stat.count++;
+ g_no_wake_locks.stat.total_time = ktime_add(
+ g_no_wake_locks.stat.total_time,
+ ktime_set(0, 500 * NSEC_PER_MSEC));
+ g_no_wake_locks.stat.max_time =
+ ktime_set(0, 500 * NSEC_PER_MSEC);
+#endif
+ }
+ }
+ printk("android_power_suspend: done\n");
+ //printk("android_power_suspend: call late resume handlers\n");
+ list_for_each_entry_reverse(pos, &g_early_suspend_handlers, link) {
+ if(pos->resume != NULL)
+ pos->resume(pos);
+ }
+ //printk("android_power_suspend: call late resume handlers\n");
+ mutex_unlock(&g_early_suspend_lock);
+ }
+}
+
+#if 0
+struct sysdev_class android_power_sysclass = {
+ set_kset_name("android_power"),
+ .suspend = android_power_class_suspend
+};
+static struct sysdev_class *g_android_power_sysclass = NULL;
+
+static struct {
+ struct sys_device sysdev;
+// omap_csmi_gsm_image_info_t *pdata;
+} android_power_device = {
+ .sysdev = {
+ .id = 0,
+ .cls = &android_power_sysclass,
+// .suspend = android_power_device_suspend
+ },
+// .pdata = &g_gsm_image_info
+};
+
+struct sysdev_class *android_power_get_sysclass(void)
+{
+ return g_android_power_sysclass;
+}
+#endif
+
+static ssize_t state_show(struct kobject *kobj, struct kobj_attribute
*attr, char * buf)
+{
+ char * s = buf;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ s += sprintf(s, "%d-%d-%d\n", g_user_suspend_state,
list_empty(&g_active_full_wake_locks),
list_empty(&g_active_partial_wake_locks));
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+ return (s - buf);
+}
+
+static ssize_t state_store(struct kobject *kobj, struct
kobj_attribute *attr, const char * buf, size_t n)
+{
+ if(n >= strlen("standby") &&
+ strncmp(buf, "standby", strlen("standby")) == 0) {
+ android_power_request_sleep();
+ wait_event_interruptible(g_wait_queue, g_user_suspend_state ==
USER_AWAKE);
+ return n;
+ }
+ if(n >= strlen("wake") &&
+ strncmp(buf, "wake", strlen("wake")) == 0) {
+ android_power_wakeup();
+ return n;
+ }
+ printk("android_power state_store: invalid argument\n");
+ return -EINVAL;
+}
+
+static ssize_t request_state_show(struct kobject *kobj, struct
kobj_attribute *attr, char * buf)
+{
+ char * s = buf;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ if(g_user_suspend_state == USER_AWAKE)
+ s += sprintf(s, "wake\n");
+ else if(g_user_suspend_state == USER_NOTIFICATION)
+ s += sprintf(s, "standby (w/full wake lock)\n");
+ else
+ s += sprintf(s, "standby\n");
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+ return (s - buf);
+}
+
+static ssize_t request_state_store(struct kobject *kobj, struct
kobj_attribute *attr, const char * buf, size_t n)
+{
+ if(n >= strlen("standby") &&
+ strncmp(buf, "standby", strlen("standby")) == 0) {
+ android_power_request_sleep();
+ return n;
+ }
+ if(n >= strlen("wake") &&
+ strncmp(buf, "wake", strlen("wake")) == 0) {
+ android_power_wakeup();
+ return n;
+ }
+ printk("android_power state_store: invalid argument\n");
+ return -EINVAL;
+}
+
+
+static int lookup_wake_lock_name(const char *buf, size_t n, int
allocate, int *timeout)
+{
+ int i;
+ int free_index = -1;
+ int inactive_index = -1;
+ int expires_index = -1;
+ int expires_time = INT_MAX;
+ char *tmp_buf[64];
+ char name[32];
+ u64 nanoseconds;
+ int num_arg;
+
+ if(n <= 0)
+ return -EINVAL;
+ if(n >= sizeof(tmp_buf))
+ return -EOVERFLOW;
+ if(n == sizeof(tmp_buf) - 1 && buf[n - 1] != '\0')
+ return -EOVERFLOW;
+
+ memcpy(tmp_buf, buf, n);
+ if(tmp_buf[n - 1] != '\0')
+ tmp_buf[n] = '\0';
+
+ num_arg = sscanf(buf, "%31s %llu", name, &nanoseconds);
+ if(num_arg < 1)
+ return -EINVAL;
+
+ if(strlen(name) >= sizeof(g_user_wake_locks[i].name_buffer))
+ return -EOVERFLOW;
+
+ if(timeout != NULL) {
+ if(num_arg > 1) {
+ do_div(nanoseconds, (NSEC_PER_SEC / HZ));
+ if(nanoseconds <= 0)
+ nanoseconds = 1;
+ *timeout = nanoseconds;
+ }
+ else
+ *timeout = 0;
+ }
+
+ for(i = 0; i < g_max_user_lockouts; i++) {
+ if(strcmp(g_user_wake_locks[i].name_buffer, name) == 0)
+ return i;
+ if(g_user_wake_locks[i].name_buffer[0] == '\0')
+ free_index = i;
+ else if(g_user_wake_locks[i].state == USER_WAKE_LOCK_INACTIVE)
+ inactive_index = i;
+ else if(g_user_wake_locks[i].suspend_lock.expires < expires_time)
+ expires_index = i;
+ }
+ if(allocate) {
+ if(free_index >= 0)
+ i = free_index;
+ else if(inactive_index >= 0)
+ i = inactive_index;
+ else if(expires_index >= 0) {
+ i = expires_index;
+ printk("lookup_wake_lock_name: overwriting expired lock, %s\n",
g_user_wake_locks[i].name_buffer);
+ }
+ else {
+ i = 0;
+ printk("lookup_wake_lock_name: overwriting active lock, %s\n",
g_user_wake_locks[i].name_buffer);
+ }
+ strcpy(g_user_wake_locks[i].name_buffer, name);
+ return i;
+ }
+#if ANDROID_POWER_PRINT_USER_WAKE_LOCKS
+ printk("lookup_wake_lock_name: %s not found\n", name);
+#endif
+ return -EINVAL;
+}
+
+static ssize_t acquire_full_wake_lock_show(struct kobject *kobj,
struct kobj_attribute *attr, char * buf)
+{
+ int i;
+ char * s = buf;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ for(i = 0; i < g_max_user_lockouts; i++) {
+ if(g_user_wake_locks[i].name_buffer[0] != '\0' &&
g_user_wake_locks[i].state == USER_WAKE_LOCK_FULL)
+ s += sprintf(s, "%s ", g_user_wake_locks[i].name_buffer);
+ }
+ s += sprintf(s, "\n");
+
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+ return (s - buf);
+}
+
+static ssize_t acquire_full_wake_lock_store(struct kobject *kobj,
struct kobj_attribute *attr, const char * buf, size_t n)
+{
+ int i;
+ unsigned long irqflags;
+ int timeout;
+
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ i = lookup_wake_lock_name(buf, n, 1, &timeout);
+ if(i >= 0)
+ g_user_wake_locks[i].state = USER_WAKE_LOCK_FULL;
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+ if(i < 0)
+ return i;
+
+#if ANDROID_POWER_PRINT_USER_WAKE_LOCKS
+ printk("acquire_full_wake_lock_store: %s, size %d\n",
g_user_wake_locks[i].name_buffer, n);
+#endif
+
+ //
android_lock_partial_suspend_auto_expire(&g_user_wake_locks[i].suspend_lock,
ktime_to_timespec(g_auto_off_timeout).tv_sec * HZ);
+ if(timeout == 0)
+ timeout = INT_MAX;
+
android_lock_partial_suspend_auto_expire(&g_user_wake_locks[i].suspend_lock,
timeout);
+
+ return n;
+}
+
+static ssize_t acquire_partial_wake_lock_show(struct kobject *kobj,
struct kobj_attribute *attr, char * buf)
+{
+ int i;
+ char * s = buf;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ for(i = 0; i < g_max_user_lockouts; i++) {
+ if(g_user_wake_locks[i].name_buffer[0] != '\0' &&
g_user_wake_locks[i].state == USER_WAKE_LOCK_PARTIAL)
+ s += sprintf(s, "%s ", g_user_wake_locks[i].name_buffer);
+ }
+ s += sprintf(s, "\n");
+
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+ return (s - buf);
+}
+
+static ssize_t acquire_partial_wake_lock_store(struct kobject *kobj,
struct kobj_attribute *attr, const char * buf, size_t n)
+{
+ int i;
+ unsigned long irqflags;
+ int timeout;
+
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ i = lookup_wake_lock_name(buf, n, 1, &timeout);
+ if(i >= 0)
+ g_user_wake_locks[i].state = USER_WAKE_LOCK_PARTIAL;
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+ if(i < 0)
+ return 0;
+
+#if ANDROID_POWER_PRINT_USER_WAKE_LOCKS
+ printk("acquire_partial_wake_lock_store: %s, size %d\n",
g_user_wake_locks[i].name_buffer, n);
+#endif
+
+ if(timeout)
+
android_lock_suspend_auto_expire(&g_user_wake_locks[i].suspend_lock,
timeout);
+ else
+ android_lock_suspend(&g_user_wake_locks[i].suspend_lock);
+
+ return n;
+}
+
+
+static ssize_t release_wake_lock_show(struct kobject *kobj, struct
kobj_attribute *attr, char * buf)
+{
+ int i;
+ char * s = buf;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ for(i = 0; i < g_max_user_lockouts; i++) {
+ if(g_user_wake_locks[i].name_buffer[0] != '\0' &&
g_user_wake_locks[i].state == USER_WAKE_LOCK_INACTIVE)
+ s += sprintf(s, "%s ", g_user_wake_locks[i].name_buffer);
+ }
+ s += sprintf(s, "\n");
+
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+ return (s - buf);
+}
+
+static ssize_t release_wake_lock_store(struct kobject *kobj, struct
kobj_attribute *attr, const char * buf, size_t n)
+{
+ int i;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&g_list_lock, irqflags);
+ i = lookup_wake_lock_name(buf, n, 1, NULL);
+ if(i >= 0) {
+ g_user_wake_locks[i].state = USER_WAKE_LOCK_INACTIVE;
+ }
+ spin_unlock_irqrestore(&g_list_lock, irqflags);
+
+ if(i < 0)
+ return i;
+
+#if ANDROID_POWER_PRINT_USER_WAKE_LOCKS
+ printk("release_wake_lock_store: %s, size %d\n",
g_user_wake_locks[i].name_buffer, n);
+#endif
+
+ android_unlock_suspend(&g_user_wake_locks[i].suspend_lock);
+ return n;
+}
+
+
+#ifndef CONFIG_FRAMEBUFFER_CONSOLE
+static ssize_t wait_for_fb_sleep_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char * s = buf;
+ int ret;
+
+ ret = wait_event_freezable(fb_state_wq, fb_state !=
ANDROID_DRAWING_OK);
+ if (!ret) {
+ s += sprintf(buf, "sleeping");
+ return (s - buf);
+ } else
+ return -1;
+}
+
+static ssize_t wait_for_fb_wake_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char * s = buf;
+ int ret;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fb_state_lock, irq_flags);
+ if (fb_state == ANDROID_REQUEST_STOP_DRAWING) {
+ fb_state = ANDROID_STOPPED_DRAWING;
+ wake_up(&fb_state_wq);
+ }
+ spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+
+ ret = wait_event_freezable(fb_state_wq, fb_state ==
ANDROID_DRAWING_OK);
+ if (!ret) {
+ s += sprintf(buf, "awake");
+ return (s - buf);
+ } else
+ return -1;
+}
+#endif
+
+#define android_power_attr(_name) \
+static struct kobj_attribute _name##_attr = { \
+ .attr = { \
+ .name = __stringify(_name), \
+ .mode = 0664, \
+ }, \
+ .show = _name##_show, \
+ .store = _name##_store, \
+}
+
+#define android_power_ro_attr(_name) \
+static struct kobj_attribute _name##_attr = { \
+ .attr = { \
+ .name = __stringify(_name), \
+ .mode = 0444, \
+ }, \
+ .show = _name##_show, \
+ .store = NULL, \
+}
+
+android_power_attr(state);
+android_power_attr(request_state);
+android_power_attr(acquire_full_wake_lock);
+android_power_attr(acquire_partial_wake_lock);
+android_power_attr(release_wake_lock);
+#ifndef CONFIG_FRAMEBUFFER_CONSOLE
+android_power_ro_attr(wait_for_fb_sleep);
+android_power_ro_attr(wait_for_fb_wake);
+#endif
+
+static struct attribute * g[] = {
+ &state_attr.attr,
+ &request_state_attr.attr,
+ &acquire_full_wake_lock_attr.attr,
+ &acquire_partial_wake_lock_attr.attr,
+ &release_wake_lock_attr.attr,
+#ifndef CONFIG_FRAMEBUFFER_CONSOLE
+ &wait_for_fb_sleep_attr.attr,
+ &wait_for_fb_wake_attr.attr,
+#endif
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = g,
+};
+
+#if 0
+// test code when there is no platform suspend
+
+static android_suspend_lock_t test_pm_ops_suspend_lock = {
+ .name = "test_pm_ops"
+};
+
+int test_pm_op_enter(suspend_state_t state)
+{
+ printk("test_pm_op_enter reached\n");
+ android_lock_suspend(&test_pm_ops_suspend_lock);
+ printk("test_pm_op_enter returned\n");
+ return 0;
+}
+
+void test_pm_ops_late_resume_handler(android_early_suspend_t *h)
+{
+ printk("test_pm_ops_late_resume_handler reached\n");
+ android_unlock_suspend(&test_pm_ops_suspend_lock);
+ printk("test_pm_ops_late_resume_handler returned\n");
+}
+
+static struct pm_ops test_pm_ops = {
+ .enter = test_pm_op_enter
+};
+
+static android_early_suspend_t test_pm_ops_early_suspend_handler = {
+ .resume = test_pm_ops_late_resume_handler
+};
+#endif
+
+static int __init android_power_init(void)
+{
+ int ret;
+ int i;
+
+ printk("android_power_init\n");
+
+#if 0
+ if(pm_ops == NULL) {
+ printk("android_power_init no pm_ops, installing test code\n");
+ pm_set_ops(&test_pm_ops);
+ android_init_suspend_lock(&test_pm_ops_suspend_lock);
+ android_register_early_suspend(&test_pm_ops_early_suspend_handler);
+ }
+#endif
+
+#ifdef CONFIG_ANDROID_POWER_STAT
+ g_deleted_wake_locks.stat.count = 0;
+#endif
+ init_waitqueue_head(&g_wait_queue);
+#ifndef CONFIG_FRAMEBUFFER_CONSOLE
+ init_waitqueue_head(&fb_state_wq);
+ fb_state = ANDROID_DRAWING_OK;
+#endif
+
+ g_user_wake_locks = kzalloc(sizeof(*g_user_wake_locks) *
g_max_user_lockouts, GFP_KERNEL);
+ if(g_user_wake_locks == NULL) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+ for(i = 0; i < g_max_user_lockouts; i++) {
+ g_user_wake_locks[i].
suspend_lock.name =
g_user_wake_locks[i].name_buffer;
+ android_init_suspend_lock(&g_user_wake_locks[i].suspend_lock);
+ }
+
+ g_suspend_work_queue = create_workqueue("suspend");
+ if(g_suspend_work_queue == NULL) {
+ ret = -ENOMEM;
+ goto err2;
+ }
+
+ android_power_kobj = kobject_create_and_add("android_power", NULL);
+ if (android_power_kobj == NULL) {
+ printk("android_power_init: subsystem_register failed\n");
+ ret = -ENOMEM;
+ goto err3;
+ }
+ ret = sysfs_create_group(android_power_kobj, &attr_group);
+ if(ret) {
+ printk("android_power_init: sysfs_create_group failed\n");
+ goto err4;
+ }
+#ifdef CONFIG_ANDROID_POWER_STAT
+ create_proc_read_entry("wakelocks", S_IRUGO, NULL,
wakelocks_read_proc, NULL);
+#endif
+
+#if ANDROID_POWER_TEST_EARLY_SUSPEND
+ {
+ int i;
+ for(i = 0; i < sizeof(early_suspend_tests) /
sizeof(early_suspend_tests[0]); i++)
+ android_register_early_suspend(&early_suspend_tests[i].h);
+ }
+#endif
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE
+ android_register_early_suspend(&console_early_suspend_desc);
+#else
+ android_register_early_suspend(&stop_drawing_early_suspend_desc);
+#endif
+
+#if 0
+ ret = sysdev_class_register(&android_power_sysclass);
+ if(ret) {
+ printk("android_power_init: sysdev_class_register failed\n");
+ goto err1;
+ }
+ ret = sysdev_register(&android_power_device.sysdev);
+ if(ret < 0)
+ goto err2;
+
+ g_android_power_sysclass = &android_power_sysclass;
+#endif
+ printk("android_power_init done\n");
+
+ return 0;
+
+//err2:
+// sysdev_class_unregister(&android_power_sysclass);
+err4:
+ kobject_del(android_power_kobj);
+err3:
+ destroy_workqueue(g_suspend_work_queue);
+err2:
+ for(i = 0; i < g_max_user_lockouts; i++) {
+ android_uninit_suspend_lock(&g_user_wake_locks[i].suspend_lock);
+ }
+ kfree(g_user_wake_locks);
+err1:
+ return ret;
+}
+
+static void __exit android_power_exit(void)
+{
+ int i;
+// g_android_power_sysclass = NULL;
+// sysdev_unregister(&android_power_device.sysdev);
+// sysdev_class_unregister(&android_power_sysclass);
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE
+ android_unregister_early_suspend(&console_early_suspend_desc);
+#else
+ android_unregister_early_suspend(&stop_drawing_early_suspend_desc);
+#endif
+#ifdef CONFIG_ANDROID_POWER_STAT
+ remove_proc_entry("wakelocks", NULL);
+#endif
+ sysfs_remove_group(android_power_kobj, &attr_group);
+ kobject_del(android_power_kobj);
+ destroy_workqueue(g_suspend_work_queue);
+ for(i = 0; i < g_max_user_lockouts; i++) {
+ android_uninit_suspend_lock(&g_user_wake_locks[i].suspend_lock);
+ }
+ kfree(g_user_wake_locks);
+}
+
+core_initcall(android_power_init);
+module_exit(android_power_exit);
+
+//EXPORT_SYMBOL(android_power_get_sysclass);
+EXPORT_SYMBOL(android_init_suspend_lock);
+EXPORT_SYMBOL(android_uninit_suspend_lock);
+EXPORT_SYMBOL(android_lock_suspend);
+EXPORT_SYMBOL(android_lock_suspend_auto_expire);
+EXPORT_SYMBOL(android_unlock_suspend);
+EXPORT_SYMBOL(android_power_wakeup);
+EXPORT_SYMBOL(android_register_early_suspend);
+EXPORT_SYMBOL(android_unregister_early_suspend);
+
+
diff -Naur linux-2.6.25/drivers/android/ram_console.c linux-2.6.25-
android-0.9_r1/drivers/android/ram_console.c
--- linux-2.6.25/drivers/android/ram_console.c 1970-01-01
01:00:00.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/drivers/android/ram_console.c
2008-07-24 02:04:04.000000000 +0100
@@ -0,0 +1,395 @@
+/* drivers/android/ram_console.c
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General
Public
+ * License version 2, as published by the Free Software Foundation,
and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+#include <linux/rslib.h>
+#endif
+
+struct ram_console_buffer {
+ uint32_t sig;
+ uint32_t start;
+ uint32_t size;
+ uint8_t data[0];
+};
+
+#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+static char __initdata
+
ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE];
+#endif
+static char *ram_console_old_log;
+static size_t ram_console_old_log_size;
+
+static struct ram_console_buffer *ram_console_buffer;
+static size_t ram_console_buffer_size;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+static char *ram_console_par_buffer;
+static struct rs_control *ram_console_rs_decoder;
+static int ram_console_corrected_bytes;
+static int ram_console_bad_blocks;
+#define ECC_BLOCK_SIZE
CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
+#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
+#define ECC_SYMSIZE
CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
+#define ECC_POLY
CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
+#endif
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t
*ecc)
+{
+ int i;
+ uint16_t par[ECC_SIZE];
+ /* Initialize the parity buffer */
+ memset(par, 0, sizeof(par));
+ encode_rs8(ram_console_rs_decoder, data, len, par, 0);
+ for (i = 0; i < ECC_SIZE; i++)
+ ecc[i] = par[i];
+}
+
+static int ram_console_decode_rs8(void *data, size_t len, uint8_t
*ecc)
+{
+ int i;
+ uint16_t par[ECC_SIZE];
+ for (i = 0; i < ECC_SIZE; i++)
+ par[i] = ecc[i];
+ return decode_rs8(ram_console_rs_decoder, data, par, len,
+ NULL, 0, NULL, 0, NULL);
+}
+#endif
+
+static void ram_console_update(const char *s, unsigned int count)
+{
+ struct ram_console_buffer *buffer = ram_console_buffer;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ uint8_t *buffer_end = buffer->data + ram_console_buffer_size;
+ uint8_t *block;
+ uint8_t *par;
+ int size = ECC_BLOCK_SIZE;
+#endif
+ memcpy(buffer->data + buffer->start, s, count);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1));
+ par = ram_console_par_buffer +
+ (buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE;
+ do {
+ if (block + ECC_BLOCK_SIZE > buffer_end)
+ size = buffer_end - block;
+ ram_console_encode_rs8(block, size, par);
+ block += ECC_BLOCK_SIZE;
+ par += ECC_SIZE;
+ } while (block < buffer->data + buffer->start + count);
+#endif
+}
+
+static void ram_console_update_header(void)
+{
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ struct ram_console_buffer *buffer = ram_console_buffer;
+ uint8_t *par;
+ par = ram_console_par_buffer +
+ DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) *
ECC_SIZE;
+ ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par);
+#endif
+}
+
+static void
+ram_console_write(struct console *console, const char *s, unsigned
int count)
+{
+ int rem;
+ struct ram_console_buffer *buffer = ram_console_buffer;
+
+ if (count > ram_console_buffer_size) {
+ s += count - ram_console_buffer_size;
+ count = ram_console_buffer_size;
+ }
+ rem = ram_console_buffer_size - buffer->start;
+ if (rem < count) {
+ ram_console_update(s, rem);
+ s += rem;
+ count -= rem;
+ buffer->start = 0;
+ buffer->size = ram_console_buffer_size;
+ }
+ ram_console_update(s, count);
+
+ buffer->start += count;
+ if (buffer->size < ram_console_buffer_size)
+ buffer->size += count;
+ ram_console_update_header();
+}
+
+static struct console ram_console = {
+ .name = "ram",
+ .write = ram_console_write,
+ .flags = CON_PRINTBUFFER | CON_ENABLED,
+ .index = -1,
+};
+
+static void __init
+ram_console_save_old(struct ram_console_buffer *buffer, char *dest)
+{
+ size_t old_log_size = buffer->size;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ uint8_t *block;
+ uint8_t *par;
+ char strbuf[80];
+ int strbuf_len;
+
+ block = buffer->data;
+ par = ram_console_par_buffer;
+ while (block < buffer->data + buffer->size) {
+ int numerr;
+ int size = ECC_BLOCK_SIZE;
+ if (block + size > buffer->data + ram_console_buffer_size)
+ size = buffer->data + ram_console_buffer_size - block;
+ numerr = ram_console_decode_rs8(block, size, par);
+ if (numerr > 0) {
+#if 0
+ printk(KERN_INFO "ram_console: error in block %p, %d\n",
+ block, numerr);
+#endif
+ ram_console_corrected_bytes += numerr;
+ } else if (numerr < 0) {
+#if 0
+ printk(KERN_INFO "ram_console: uncorrectable error in "
+ "block %p\n", block);
+#endif
+ ram_console_bad_blocks++;
+ }
+ block += ECC_BLOCK_SIZE;
+ par += ECC_SIZE;
+ }
+ if (ram_console_corrected_bytes || ram_console_bad_blocks)
+ strbuf_len = snprintf(strbuf, sizeof(strbuf),
+ "\n%d Corrected bytes, %d unrecoverable blocks\n",
+ ram_console_corrected_bytes, ram_console_bad_blocks);
+ else
+ strbuf_len = snprintf(strbuf, sizeof(strbuf),
+ "\nNo errors detected\n");
+ if (strbuf_len >= sizeof(strbuf))
+ strbuf_len = sizeof(strbuf) - 1;
+ old_log_size += strbuf_len;
+#endif
+
+ if (dest == NULL) {
+ dest = kmalloc(old_log_size, GFP_KERNEL);
+ if (dest == NULL) {
+ printk(KERN_ERR
+ "ram_console: failed to allocate buffer\n");
+ return;
+ }
+ }
+
+ ram_console_old_log = dest;
+ ram_console_old_log_size = old_log_size;
+ memcpy(ram_console_old_log,
+ &buffer->data[buffer->start], buffer->size - buffer->start);
+ memcpy(ram_console_old_log + buffer->size - buffer->start,
+ &buffer->data[0], buffer->start);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ memcpy(ram_console_old_log + old_log_size - strbuf_len,
+ strbuf, strbuf_len);
+#endif
+}
+
+static int __init ram_console_init(struct ram_console_buffer *buffer,
+ size_t buffer_size, char *old_buf)
+{
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ int numerr;
+ uint8_t *par;
+#endif
+ ram_console_buffer = buffer;
+ ram_console_buffer_size =
+ buffer_size - sizeof(struct ram_console_buffer);
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
+ ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
+ ram_console_par_buffer = buffer->data + ram_console_buffer_size;
+
+
+ /* first consecutive root is 0
+ * primitive element to generate roots = 1
+ */
+ ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1,
ECC_SIZE);
+ if (ram_console_rs_decoder == NULL) {
+ printk(KERN_INFO "ram_console: init_rs failed\n");
+ return 0;
+ }
+
+ ram_console_corrected_bytes = 0;
+ ram_console_bad_blocks = 0;
+
+ par = ram_console_par_buffer +
+ DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) *
ECC_SIZE;
+
+ numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par);
+ if (numerr > 0) {
+ printk(KERN_INFO "ram_console: error in header, %d\n", numerr);
+ ram_console_corrected_bytes += numerr;
+ } else if (numerr < 0) {
+ printk(KERN_INFO
+ "ram_console: uncorrectable error in header\n");
+ ram_console_bad_blocks++;
+ }
+#endif
+
+ if (buffer->sig == RAM_CONSOLE_SIG) {
+ if (buffer->size > ram_console_buffer_size
+ || buffer->start > buffer->size)
+ printk(KERN_INFO "ram_console: found existing invalid "
+ "buffer, size %d, start %d\n",
+ buffer->size, buffer->start);
+ else {
+ printk(KERN_INFO "ram_console: found existing buffer, "
+ "size %d, start %d\n",
+ buffer->size, buffer->start);
+ ram_console_save_old(buffer, old_buf);
+ }
+ } else {
+ printk(KERN_INFO "ram_console: no valid data in buffer "
+ "(sig = 0x%08x)\n", buffer->sig);
+ }
+
+ buffer->sig = RAM_CONSOLE_SIG;
+ buffer->start = 0;
+ buffer->size = 0;
+
+ register_console(&ram_console);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
+ console_verbose();
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+static int __init ram_console_early_init(void)
+{
+ return ram_console_init((struct ram_console_buffer *)
+ CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR,
+ CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE,
+ ram_console_old_log_init_buffer);
+}
+#else
+static int ram_console_driver_probe(struct platform_device *pdev)
+{
+ struct resource *res = pdev->resource;
+ size_t start;
+ size_t buffer_size;
+ void *buffer;
+
+ if (res == NULL || pdev->num_resources != 1 ||
+ !(res->flags & IORESOURCE_MEM)) {
+ printk(KERN_ERR "ram_console: invalid resource, %p %d flags "
+ "%lx\n", res, pdev->num_resources, res ? res->flags : 0);
+ return -ENXIO;
+ }
+ buffer_size = res->end - res->start + 1;
+ start = res->start;
+ printk(KERN_INFO "ram_console: got buffer at %x, size %x\n",
+ start, buffer_size);
+ buffer = ioremap(res->start, buffer_size);
+ if (buffer == NULL) {
+ printk(KERN_ERR "ram_console: failed to map memory\n");
+ return -ENOMEM;
+ }
+
+ return ram_console_init(buffer, buffer_size, NULL/* allocate */);
+}
+
+static struct platform_driver ram_console_driver = {
+ .probe = ram_console_driver_probe,
+ .driver = {
+ .name = "ram_console",
+ },
+};
+
+static int __init ram_console_module_init(void)
+{
+ int err;
+ err = platform_driver_register(&ram_console_driver);
+ return err;
+}
+#endif
+
+static ssize_t ram_console_read_old(struct file *file, char __user
*buf,
+ size_t len, loff_t *offset)
+{
+ loff_t pos = *offset;
+ ssize_t count;
+
+ if (pos >= ram_console_old_log_size)
+ return 0;
+
+ count = min(len, (size_t)(ram_console_old_log_size - pos));
+ if (copy_to_user(buf, ram_console_old_log + pos, count))
+ return -EFAULT;
+
+ *offset += count;
+ return count;
+}
+
+static struct file_operations ram_console_file_ops = {
+ .owner = THIS_MODULE,
+ .read = ram_console_read_old,
+};
+
+static int __init ram_console_late_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ if (ram_console_old_log == NULL)
+ return 0;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+ ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL);
+ if (ram_console_old_log == NULL) {
+ printk(KERN_ERR
+ "ram_console: failed to allocate buffer for old log\n");
+ ram_console_old_log_size = 0;
+ return 0;
+ }
+ memcpy(ram_console_old_log,
+ ram_console_old_log_init_buffer, ram_console_old_log_size);
+#endif
+ entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
+ if (!entry) {
+ printk(KERN_ERR "ram_console: failed to create proc entry\n");
+ kfree(ram_console_old_log);
+ ram_console_old_log = NULL;
+ return 0;
+ }
+
+ entry->proc_fops = &ram_console_file_ops;
+ entry->size = ram_console_old_log_size;
+ return 0;
+}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+console_initcall(ram_console_early_init);
+#else
+module_init(ram_console_module_init);
+#endif
+late_initcall(ram_console_late_init);
+
diff -Naur linux-2.6.25/drivers/android/timed_gpio.c linux-2.6.25-
android-0.9_r1/drivers/android/timed_gpio.c
--- linux-2.6.25/drivers/android/timed_gpio.c 1970-01-01
01:00:00.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/drivers/android/timed_gpio.c
2008-07-24 02:04:04.000000000 +0100
@@ -0,0 +1,177 @@
+/* drivers/android/timed_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <
lock...@android.com>
+ *
+ * This software is licensed under the terms of the GNU General
Public
+ * License version 2, as published by the Free Software Foundation,
and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/hrtimer.h>
+#include <linux/err.h>
+#include <asm/arch/gpio.h>
+
+#include <linux/android_timed_gpio.h>
+
+
+static struct class *timed_gpio_class;
+
+struct timed_gpio_data {
+ struct device *dev;
+ struct hrtimer timer;
+ spinlock_t lock;
+ unsigned gpio;
+ int max_timeout;
+ u8 active_low;
+};
+
+static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
+{
+ struct timed_gpio_data *gpio_data = container_of(timer, struct
timed_gpio_data, timer);
+
+ gpio_direction_output(gpio_data->gpio, gpio_data->active_low ? 1 :
0);
+ return HRTIMER_NORESTART;
+}
+
+static ssize_t gpio_enable_show(struct device *dev, struct
device_attribute *attr, char *buf)
+{
+ struct timed_gpio_data *gpio_data = dev_get_drvdata(dev);
+ int remaining;
+
+ if (hrtimer_active(&gpio_data->timer)) {
+ ktime_t r = hrtimer_get_remaining(&gpio_data->timer);
+ remaining = r.tv.sec * 1000 + r.tv.nsec / 1000000;
+ } else
+ remaining = 0;
+
+ return sprintf(buf, "%d\n", remaining);
+}
+
+static ssize_t gpio_enable_store(
+ struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct timed_gpio_data *gpio_data = dev_get_drvdata(dev);
+ int value;
+ unsigned long flags;
+
+ sscanf(buf, "%d", &value);
+
+ spin_lock_irqsave(&gpio_data->lock, flags);
+
+ /* cancel previous timer and set GPIO according to value */
+ hrtimer_cancel(&gpio_data->timer);
+ gpio_direction_output(gpio_data->gpio, gpio_data->active_low ? !
value : !!value);
+
+ if (value > 0) {
+ if (value > gpio_data->max_timeout)
+ value = gpio_data->max_timeout;
+
+ hrtimer_start(&gpio_data->timer,
+ ktime_set(value / 1000, (value % 1000) * 1000000),
+ HRTIMER_MODE_REL);
+ }
+
+ spin_unlock_irqrestore(&gpio_data->lock, flags);
+
+ return size;
+}
+
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, gpio_enable_show,
gpio_enable_store);
+
+static int android_timed_gpio_probe(struct platform_device *pdev)
+{
+ struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timed_gpio *cur_gpio;
+ struct timed_gpio_data *gpio_data, *gpio_dat;
+ int i, ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata-
>num_gpios, GFP_KERNEL);
+ if (!gpio_data)
+ return -ENOMEM;
+
+ for (i = 0; i < pdata->num_gpios; i++) {
+ cur_gpio = &pdata->gpios[i];
+ gpio_dat = &gpio_data[i];
+
+ hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ gpio_dat->timer.function = gpio_timer_func;
+ spin_lock_init(&gpio_dat->lock);
+
+ gpio_dat->gpio = cur_gpio->gpio;
+ gpio_dat->max_timeout = cur_gpio->max_timeout;
+ gpio_dat->active_low = cur_gpio->active_low;
+ gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
+
+ gpio_dat->dev = device_create(timed_gpio_class, &pdev->dev, 0,
"%s", cur_gpio->name);
+ if (unlikely(IS_ERR(gpio_dat->dev)))
+ return PTR_ERR(gpio_dat->dev);
+
+ dev_set_drvdata(gpio_dat->dev, gpio_dat);
+ ret = device_create_file(gpio_dat->dev, &dev_attr_enable);
+ if (ret)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, gpio_data);
+
+ return 0;
+}
+
+static int android_timed_gpio_remove(struct platform_device *pdev)
+{
+ struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < pdata->num_gpios; i++) {
+ device_remove_file(gpio_data[i].dev, &dev_attr_enable);
+ device_unregister(gpio_data[i].dev);
+ }
+
+ kfree(gpio_data);
+
+ return 0;
+}
+
+static struct platform_driver android_timed_gpio_driver = {
+ .probe = android_timed_gpio_probe,
+ .remove = android_timed_gpio_remove,
+ .driver = {
+ .name = "android-timed-gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init android_timed_gpio_init(void)
+{
+ timed_gpio_class = class_create(THIS_MODULE, "timed_output");
+ if (IS_ERR(timed_gpio_class))
+ return PTR_ERR(timed_gpio_class);
+ return platform_driver_register(&android_timed_gpio_driver);
+}
+
+static void __exit android_timed_gpio_exit(void)
+{
+ class_destroy(timed_gpio_class);
+ platform_driver_unregister(&android_timed_gpio_driver);
+}
+
+module_init(android_timed_gpio_init);
+module_exit(android_timed_gpio_exit);
+
+MODULE_AUTHOR("Mike Lockwood <
lock...@android.com>");
+MODULE_DESCRIPTION("Android timed gpio driver");
+MODULE_LICENSE("GPL");
diff -Naur linux-2.6.25/drivers/char/Kconfig linux-2.6.25-
android-0.9_r1/drivers/char/Kconfig
--- linux-2.6.25/drivers/char/Kconfig 2008-04-17 03:49:44.000000000
+0100
+++ linux-2.6.25-android-0.9_r1/drivers/char/Kconfig 2008-07-24
02:04:04.000000000 +0100
@@ -1041,12 +1041,31 @@
/sys/devices/platform/telco_clock, with a number of files for
controlling the behavior of this hardware.
+config DEVMEM
+ bool "Memory device driver"
+ default y
+ help
+ The memory driver provides two character devices, mem and kmem,
which
+ provide access to the system's memory. The mem device is a view of
+ physical memory, and each byte in the device corresponds to the
+ matching physical address. The kmem device is the same as mem, but
+ the addresses correspond to the kernel's virtual address space
rather
+ than physical memory. These devices are standard parts of a Linux
+ system and most users should say Y here. You might say N if very
+ security conscience or memory is tight.
+
config DEVPORT
bool
depends on !M68K
depends on ISA || PCI
default y
+config GOLDFISH_TTY
+ tristate "Goldfish TTY Driver"
+ default n
+ help
+ TTY driver for Goldfish Virtual Platform.
+
source "drivers/s390/char/Kconfig"
endmenu
diff -Naur linux-2.6.25/drivers/misc/Kconfig linux-2.6.25-
android-0.9_r1/drivers/misc/Kconfig
--- linux-2.6.25/drivers/misc/Kconfig 2008-04-17 03:49:44.000000000
+0100
+++ linux-2.6.25-android-0.9_r1/drivers/misc/Kconfig 2008-07-24
02:04:04.000000000 +0100
@@ -259,6 +259,16 @@
If you are not sure, say Y here.
+config LOW_MEMORY_KILLER
+ tristate "Low Memory Killer"
+ ---help---
+ Register processes to be killed when memory is low.
+
+config QEMU_TRACE
+ tristate "Virtual Device for QEMU tracing"
+ ---help---
+ This is a virtual device for QEMU tracing.
+
config THINKPAD_ACPI_VIDEO
bool "Video output control support"
depends on THINKPAD_ACPI
diff -Naur linux-2.6.25/drivers/misc/Makefile linux-2.6.25-
android-0.9_r1/drivers/misc/Makefile
--- linux-2.6.25/drivers/misc/Makefile 2008-04-17 03:49:44.000000000
+0100
+++ linux-2.6.25-android-0.9_r1/drivers/misc/Makefile 2008-07-24
02:04:04.000000000 +0100
@@ -21,4 +21,6 @@
obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
+obj-$(CONFIG_LOW_MEMORY_KILLER) += lowmemorykiller.o
+obj-$(CONFIG_QEMU_TRACE) += qemutrace/
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
diff -Naur linux-2.6.25/drivers/misc/lowmemorykiller.c linux-2.6.25-
android-0.9_r1/drivers/misc/lowmemorykiller.c
--- linux-2.6.25/drivers/misc/lowmemorykiller.c 1970-01-01
01:00:00.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/drivers/misc/lowmemorykiller.c
2008-07-24 02:04:04.000000000 +0100
@@ -0,0 +1,119 @@
+/* drivers/misc/lowmemorykiller.c
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General
Public
+ * License version 2, as published by the Free Software Foundation,
and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
+
+static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask);
+
+static struct shrinker lowmem_shrinker = {
+ .shrink = lowmem_shrink,
+ .seeks = DEFAULT_SEEKS * 16
+};
+static uint32_t lowmem_debug_level = 2;
+static int lowmem_adj[6] = {
+ 0,
+ 1,
+ 6,
+ 12,
+};
+static int lowmem_adj_size = 4;
+static size_t lowmem_minfree[6] = {
+ 3*512, // 6MB
+ 2*1024, // 8MB
+ 4*1024, // 16MB
+ 16*1024, // 64MB
+};
+static int lowmem_minfree_size = 4;
+
+#define lowmem_print(level, x...) do { if(lowmem_debug_level >=
(level)) printk(x); } while(0)
+
+module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO |
S_IWUSR);
+module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size,
S_IRUGO | S_IWUSR);
+module_param_array_named(minfree, lowmem_minfree, uint,
&lowmem_minfree_size, S_IRUGO | S_IWUSR);
+module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO |
S_IWUSR);
+
+static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+ struct task_struct *p;
+ struct task_struct *selected = NULL;
+ int rem = 0;
+ int tasksize;
+ int i;
+ int min_adj = OOM_ADJUST_MAX + 1;
+ int selected_tasksize = 0;
+ int array_size = ARRAY_SIZE(lowmem_adj);
+ int other_free = global_page_state(NR_FREE_PAGES) +
global_page_state(NR_FILE_PAGES);
+ if(lowmem_adj_size < array_size)
+ array_size = lowmem_adj_size;
+ if(lowmem_minfree_size < array_size)
+ array_size = lowmem_minfree_size;
+ for(i = 0; i < array_size; i++) {
+ if(other_free < lowmem_minfree[i]) {
+ min_adj = lowmem_adj[i];
+ break;
+ }
+ }
+ if(nr_to_scan > 0)
+ lowmem_print(3, "lowmem_shrink %d, %x, ofree %d, ma %d\n",
nr_to_scan, gfp_mask, other_free, min_adj);
+ read_lock(&tasklist_lock);
+ for_each_process(p) {
+ if(p->oomkilladj >= 0 && p->mm) {
+ tasksize = get_mm_rss(p->mm);
+ if(nr_to_scan > 0 && tasksize > 0 && p->oomkilladj >= min_adj) {
+ if(selected == NULL ||
+ p->oomkilladj > selected->oomkilladj ||
+ (p->oomkilladj == selected->oomkilladj &&
+ tasksize > selected_tasksize)) {
+ selected = p;
+ selected_tasksize = tasksize;
+ lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
+ p->pid, p->comm, p->oomkilladj, tasksize);
+ }
+ }
+ rem += tasksize;
+ }
+ }
+ if(selected != NULL) {
+ lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
+ selected->pid, selected->comm,
+ selected->oomkilladj, selected_tasksize);
+ force_sig(SIGKILL, selected);
+ rem -= selected_tasksize;
+ }
+ lowmem_print(4, "lowmem_shrink %d, %x, return %d\n", nr_to_scan,
gfp_mask, rem);
+ read_unlock(&tasklist_lock);
+ return rem;
+}
+
+static int __init lowmem_init(void)
+{
+ register_shrinker(&lowmem_shrinker);
+ return 0;
+}
+
+static void __exit lowmem_exit(void)
+{
+ unregister_shrinker(&lowmem_shrinker);
+}
+
+module_init(lowmem_init);
+module_exit(lowmem_exit);
+
+MODULE_LICENSE("GPL");
+
diff -Naur linux-2.6.25/fs/inotify_user.c linux-2.6.25-android-0.9_r1/
fs/inotify_user.c
--- linux-2.6.25/fs/inotify_user.c 2008-04-17 03:49:44.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/fs/inotify_user.c 2008-07-24
02:04:04.000000000 +0100
@@ -32,6 +32,9 @@
#include <linux/inotify.h>
#include <linux/syscalls.h>
#include <linux/magic.h>
+#ifdef CONFIG_ANDROID_POWER
+#include <linux/android_power.h>
+#endif
#include <asm/ioctls.h>
@@ -83,6 +86,9 @@
unsigned int queue_size; /* size of the queue (bytes) */
unsigned int event_count; /* number of pending events */
unsigned int max_events; /* maximum number of events */
+#ifdef CONFIG_ANDROID_POWER
+ android_suspend_lock_t suspend_lock;
+#endif
};
/*
@@ -159,6 +165,9 @@
if (atomic_dec_and_test(&dev->count)) {
atomic_dec(&dev->user->inotify_devs);
free_uid(dev->user);
+#ifdef CONFIG_ANDROID_POWER
+ android_uninit_suspend_lock(&dev->suspend_lock);
+#endif
kfree(dev);
}
}
@@ -317,6 +326,9 @@
list_add_tail(&kevent->list, &dev->events);
wake_up_interruptible(&dev->wq);
kill_fasync(&dev->fa, SIGIO, POLL_IN);
+#ifdef CONFIG_ANDROID_POWER
+ android_lock_suspend_auto_expire(&dev->suspend_lock, 5 * HZ);
+#endif
out:
mutex_unlock(&dev->ev_mutex);
@@ -334,6 +346,10 @@
dev->event_count--;
dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
+#ifdef CONFIG_ANDROID_POWER
+ if(dev->event_count == 0)
+ android_unlock_suspend(&dev->suspend_lock);
+#endif
kfree(kevent->name);
kmem_cache_free(event_cachep, kevent);
@@ -622,6 +638,10 @@
dev->max_events = inotify_max_queued_events;
dev->user = user;
atomic_set(&dev->count, 0);
+#ifdef CONFIG_ANDROID_POWER
+ dev->
suspend_lock.name = "inotify";
+ android_init_suspend_lock(&dev->suspend_lock);
+#endif
get_inotify_dev(dev);
atomic_inc(&user->inotify_devs);
diff -Naur linux-2.6.25/include/linux/android_alarm.h linux-2.6.25-
android-0.9_r1/include/linux/android_alarm.h
--- linux-2.6.25/include/linux/android_alarm.h 1970-01-01
01:00:00.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/include/linux/android_alarm.h
2008-07-24 02:04:04.000000000 +0100
@@ -0,0 +1,59 @@
+/* include/linux/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General
Public
+ * License version 2, as published by the Free Software Foundation,
and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_ALARM_H
+#define _LINUX_ANDROID_ALARM_H
+
+#include <asm/ioctl.h>
+#include <linux/time.h>
+
+typedef enum {
+ /* return code bit numbers or set alarm arg */
+ ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME,
+
+ ANDROID_ALARM_TYPE_COUNT,
+
+ /* return code bit numbers */
+ /* ANDROID_ALARM_TIME_CHANGE = 16 */
+} android_alarm_type_t;
+
+typedef enum {
+ ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK = 1U <<
ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME_MASK = 1U <<
ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+ ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+} android_alarm_return_flags_t;
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) <<
4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT _IO('a', 1)
+
+/* Set alarm */
+#define ANDROID_ALARM_SET(type) _IOW('a', 2 | ((type) <<
4), struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type) _IOW('a', 3 | ((type) <<
4), struct timespec)
+#define ANDROID_ALARM_GET_TIME(type) _IOW('a', 4 | ((type) <<
4), struct timespec)
+#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct
timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
+
+#endif
diff -Naur linux-2.6.25/include/linux/android_power.h linux-2.6.25-
android-0.9_r1/include/linux/android_power.h
--- linux-2.6.25/include/linux/android_power.h 1970-01-01
01:00:00.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/include/linux/android_power.h
2008-07-24 02:04:04.000000000 +0100
@@ -0,0 +1,95 @@
+/* include/linux/android_power.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General
Public
+ * License version 2, as published by the Free Software Foundation,
and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_POWER_H
+#define _LINUX_ANDROID_POWER_H
+
+#include <linux/list.h>
+#include <linux/ktime.h>
+
+typedef struct
+{
+ struct list_head link;
+ int flags;
+ const char *name;
+ int expires;
+#ifdef CONFIG_ANDROID_POWER_STAT
+ struct {
+ int count;
+ int expire_count;
+ ktime_t total_time;
+ ktime_t max_time;
+ ktime_t last_time;
+ } stat;
+#endif
+} android_suspend_lock_t;
+
+#if 0 /* none of these flags are implemented */
+#define ANDROID_SUSPEND_LOCK_FLAG_COUNTED (1U << 0)
+#define ANDROID_SUSPEND_LOCK_FLAG_USER_READABLE (1U << 1)
+#define ANDROID_SUSPEND_LOCK_FLAG_USER_SET (1U << 2)
+#define ANDROID_SUSPEND_LOCK_FLAG_USER_CLEAR (1U << 3)
+#define ANDROID_SUSPEND_LOCK_FLAG_USER_INC (1U << 4)
+#define ANDROID_SUSPEND_LOCK_FLAG_USER_DEC (1U << 5)
+#define ANDROID_SUSPEND_LOCK_FLAG_USER_VISIBLE_MASK (0x1fU << 1)
+#endif
+#define ANDROID_SUSPEND_LOCK_AUTO_EXPIRE (1U << 6)
+#define ANDROID_SUSPEND_LOCK_ACTIVE (1U << 7)
+
+enum {
+ ANDROID_STOPPED_DRAWING,
+ ANDROID_REQUEST_STOP_DRAWING,
+ ANDROID_DRAWING_OK,
+};
+
+enum {
+ ANDROID_EARLY_SUSPEND_LEVEL_BLANK_SCREEN = 50,
+ ANDROID_EARLY_SUSPEND_LEVEL_CONSOLE_SWITCH = 100,
+ ANDROID_EARLY_SUSPEND_LEVEL_DISABLE_FB = 150,
+};
+typedef struct android_early_suspend android_early_suspend_t;
+struct android_early_suspend
+{
+ struct list_head link;
+ int level;
+ void (*suspend)(android_early_suspend_t *h);
+ void (*resume)(android_early_suspend_t *h);
+};
+
+typedef enum {
+ ANDROID_CHARGING_STATE_UNKNOWN,
+ ANDROID_CHARGING_STATE_DISCHARGE,
+ ANDROID_CHARGING_STATE_MAINTAIN, /* or trickle */
+ ANDROID_CHARGING_STATE_SLOW,
+ ANDROID_CHARGING_STATE_NORMAL,
+ ANDROID_CHARGING_STATE_FAST,
+ ANDROID_CHARGING_STATE_OVERHEAT
+} android_charging_state_t;
+
+/* android_suspend_lock_t *android_allocate_suspend_lock(const char
*debug_name); */
+/* void android_free_suspend_lock(android_suspend_lock_t *lock); */
+int android_init_suspend_lock(android_suspend_lock_t *lock);
+void android_uninit_suspend_lock(android_suspend_lock_t *lock);
+void android_lock_suspend(android_suspend_lock_t *lock);
+void android_lock_suspend_auto_expire(android_suspend_lock_t *lock,
int timeout);
+void android_unlock_suspend(android_suspend_lock_t *lock);
+
+int android_power_is_driver_suspended(void);
+
+void android_register_early_suspend(android_early_suspend_t
*handler);
+void android_unregister_early_suspend(android_early_suspend_t
*handler);
+
+#endif
+
diff -Naur linux-2.6.25/include/linux/android_timed_gpio.h
linux-2.6.25-android-0.9_r1/include/linux/android_timed_gpio.h
--- linux-2.6.25/include/linux/android_timed_gpio.h 1970-01-01
01:00:00.000000000 +0100
+++ linux-2.6.25-android-0.9_r1/include/linux/android_timed_gpio.h
2008-07-24 02:04:04.000000000 +0100
@@ -0,0 +1,31 @@
+/* include/linux/android_timed_gpio.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General
Public
+ * License version 2, as published by the Free Software Foundation,
and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _LINUX_ANDROID_TIMED_GPIO_H
+#define _LINUX_ANDROID_TIMED_GPIO_H
+
+struct timed_gpio {
+ const char *name;
+ unsigned gpio;
+ int max_timeout;
+ u8 active_low;
+};
+
+struct timed_gpio_platform_data {
+ int num_gpios;
+ struct timed_gpio *gpios;
+};
+
+#endif
diff -Naur linux-2.6.25/include/linux/ashmem.h linux-2.6.25-
android-0.9_r1/include/linux/ashmem.h
--- linux-2.6.25/include/linux/ashmem.h 1970-01-01 01:00:00.000000000
+0100
+++ linux-2.6.25-android-0.9_r1/include/linux/ashmem.h 2008-07-24
02:04:04.000000000 +0100
@@ -0,0 +1,48 @@
+/*
+ * include/linux/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed. It may be redistributed and/or
modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _LINUX_ASHMEM_H
+#define _LINUX_ASHMEM_H
+
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+
+#define ASHMEM_NAME_LEN 256
+
+#define ASHMEM_NAME_DEF "dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while
unpinned? */
+#define ASHMEM_NOT_PURGED 0
+#define ASHMEM_WAS_PURGED 1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned?
*/
+#define ASHMEM_IS_UNPINNED 0
+#define ASHMEM_IS_PINNED 1
+
+struct ashmem_pin {
+ __u32 offset; /* offset into region, in bytes, page-aligned */
+ __u32 len; /* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC 0x77
+
+#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+
+#endif /* _LINUX_ASHMEM_H */
diff -Naur linux-2.6.25/include/linux/binder.h linux-2.6.25-
android-0.9_r1/include/linux/binder.h
--- linux-2.6.25/include/linux/binder.h 1970-01-01 01:00:00.000000000
+0100
+++ linux-2.6.25-android-0.9_r1/include/linux/binder.h 2008-07-24
02:04:04.000000000 +0100
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General
Public
+ * License version 2, as published by the Free Software Foundation,
and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_H
+#define _LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+ ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+ BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+ FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+/*
+ * This is the flattened representation of a Binder object for
transfer
+ * between processes. The 'offsets' supplied as part of a binder
transaction
+ * contains offsets into the data where these structures occur. The
Binder
+ * driver takes care of re-writing the structure type and data as it
moves
+ * between processes.
+ */
+struct flat_binder_object {
+ /* 8 bytes for large_flat_header. */
+ unsigned long type;
+ unsigned long flags;
+
+ /* 8 bytes of data. */
+ union {
+ void *binder; /* local object */
+ signed long handle; /* remote object */
+ };
+
+ /* extra data associated with local object */
+ void *cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver
must
+ * translate the buffer (and local binder) addresses apropriately.
+ */
+
+struct binder_write_read {
+ signed long write_size; /* bytes to write */
+ signed long write_consumed; /* bytes consumed by driver */
+ unsigned long write_buffer;
+ signed long read_size; /* bytes to read */
+ signed long read_consumed; /* bytes consumed by driver */
+ unsigned long read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+ /* driver protocol version -- increment with incompatible change */
+ signed long protocol_version;
+};
+
+/* This is the current protocol version. */
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+
+#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
+#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t)
+#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
+#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int)
+#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int)
+#define BINDER_THREAD_EXIT _IOW('b', 8, int)
+#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted. This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process. That is, the process is being destroyed.
+ * You should handle this by exiting from your process. Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+ TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
+ TF_ROOT_OBJECT = 0x04, /* contents are the component's root object
*/
+ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
+ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+ /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+ * identifying the target and contents of the transaction.
+ */
+ union {
+ size_t handle; /* target descriptor of command transaction */
+ void *ptr; /* target descriptor of return transaction */
+ } target;
+ void *cookie; /* target object cookie */
+ unsigned int code; /* transaction command */
+
+ /* General information about the transaction. */
+ unsigned int flags;
+ pid_t sender_pid;
+ uid_t sender_euid;
+ size_t data_size; /* number of bytes of data */
+ size_t offsets_size; /* number of bytes of offsets */
+
+ /* If this transaction is inline, the data immediately
+ * follows here; otherwise, it ends with a pointer to
+ * the data buffer.
+ */
+ union {
+ struct {
+ /* transaction data */
+ const void *buffer;
+ /* offsets from buffer to flat_binder_object structs */
+ const void *offsets;
+ } ptr;
+ uint8_t buf[8];
+ } data;
+};
+
+struct binder_ptr_cookie {
+ void *ptr;
+ void *cookie;
+};
+
+struct binder_pri_desc {
+ int priority;
+ int desc;
+};
+
+struct binder_pri_ptr_cookie {
+ int priority;
+ void *ptr;
+ void *cookie;
+};
+
+enum BinderDriverReturnProtocol {
+ BR_ERROR = _IOR('r', 0, int),
+ /*
+ * int: error code
+ */
+
+ BR_OK = _IO('r', 1),
+ /* No parameters! */
+
+ BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+ BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the received command.
+ */
+
+ BR_ACQUIRE_RESULT = _IOR('r', 4, int),
+ /*
+ * not currently supported
+ * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+ * Else the remote object has acquired a primary reference.
+ */
+
+ BR_DEAD_REPLY = _IO('r', 5),
+ /*
+ * The target of the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
+ */
+
+ BR_TRANSACTION_COMPLETE = _IO('r', 6),
+ /*
+ * No parameters... always refers to the last transaction requested
+ * (including replies). Note that this will be sent even for
+ * asynchronous transactions.
+ */
+
+ BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+ BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+ BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+ BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+ /*
+ * not currently supported
+ * int: priority
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_NOOP = _IO('r', 12),
+ /*
+ * No parameters. Do nothing and examine the next command. It
exists
+ * primarily so that we can replace it with a BR_SPAWN_LOOPER
command.
+ */
+
+ BR_SPAWN_LOOPER = _IO('r', 13),
+ /*
+ * No parameters. The driver has determined that a process has no
+ * threads waiting to service incomming transactions. When a
process
+ * receives this command, it must spawn a new service thread and
+ * register it via bcENTER_LOOPER.
+ */
+
+ BR_FINISHED = _IO('r', 14),
+ /*
+ * not currently supported
+ * stop threadpool thread
+ */
+
+ BR_DEAD_BINDER = _IOR('r', 15, void *),
+ /*
+ * void *: cookie
+ */
+ BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
+ /*
+ * void *: cookie
+ */
+
+ BR_FAILED_REPLY = _IO('r', 17),
+ /*
+ * The the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
+ */
+};
+
+enum BinderDriverCommandProtocol {
+ BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+ BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the sent command.
+ */
+
+ BC_ACQUIRE_RESULT = _IOW('c', 2, int),
+ /*
+ * not currently supported
+ * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+ * Else you have acquired a primary reference on the object.
+ */
+
+ BC_FREE_BUFFER = _IOW('c', 3, int),
+ /*
+ * void *: ptr to transaction data received on a read
+ */
+
+ BC_INCREFS = _IOW('c', 4, int),
+ BC_ACQUIRE = _IOW('c', 5, int),
+ BC_RELEASE = _IOW('c', 6, int),
+ BC_DECREFS = _IOW('c', 7, int),
+ /*
+ * int: descriptor
+ */
+
+ BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+ BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+ /*
+ * not currently supported
+ * int: priority
+ * int: descriptor
+ */
+
+ BC_REGISTER_LOOPER = _IO('c', 11),
+ /*
+ * No parameters.
+ * Register a spawned looper thread with the device.
+ */
+
+ BC_ENTER_LOOPER = _IO('c', 12),
+ BC_EXIT_LOOPER = _IO('c', 13),
+ /*
+ * No parameters.
+ * These two commands are sent as an application-level thread
+ * enters and exits the binder loop, respectively. They are
+ * used so the binder can have an accurate count of the number
+ * of looping threads it has available.
+ */
+
+ BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct
binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie
+ */
+
+ BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct
binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie
+ */
+
+ BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
+ /*
+ * void *: cookie
+ */
+};
+
+#endif /* _LINUX_BINDER_H */
+
diff -Naur linux-2.6.25/include/linux/logger.h linux-2.6.25-
android-0.9_r1/include/linux/logger.h
--- linux-2.6.25/include/linux/logger.h 1970-01-01 01:00:00.000000000
+0100
+++ linux-2.6.25-android-0.9_r1/include/linux/logger.h 2008-07-24
02:04:04.000000000 +0100
@@ -0,0 +1,48 @@
+/* include/linux/logger.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ * Author: Robert Love <
rl...@android.com>
+ *
+ * This software is licensed under the terms of the GNU General
Public
+ * License version 2, as published by the Free Software Foundation,
and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_LOGGER_H
+#define _LINUX_LOGGER_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct logger_entry {
+ __u16 len; /* length of the payload */
+ __u16 __pad; /* no matter what, we get 2 bytes of padding */
+ __s32 pid; /* generating process's pid */
+ __s32 tid; /* generating process's tid */
+ __s32 sec; /* seconds since Epoch */
+ __s32 nsec; /* nanoseconds */
+ char msg[0]; /* the entry's payload */
+};
+
+#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */
+#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */
+#define LOGGER_LOG_MAIN "log_main" /* everything else */
+
+#define LOGGER_ENTRY_MAX_LEN (4*1024)
+#define LOGGER_ENTRY_MAX_PAYLOAD \
+ (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
+
+#define __LOGGERIO 0xAE
+
+#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */
+#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */
+#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry
len */
+#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */
+
+#endif /* _LINUX_LOGGER_H */
diff -Naur linux-2.6.25/kernel/power/process.c linux-2.6.25-
android-0.9_r1/kernel/power/process.c
--- linux-2.6.25/kernel/power/process.c 2008-04-17 03:49:44.000000000
+0100
+++ linux-2.6.25-android-0.9_r1/kernel/power/process.c 2008-07-24
02:04:04.000000000 +0100
@@ -13,6 +13,9 @@
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/freezer.h>
+#ifdef CONFIG_ANDROID_POWER
+#include <linux/android_power.h>
+#endif
/*
* Timeout for stopping processes
@@ -164,6 +167,9 @@
struct timeval start, end;
s64 elapsed_csecs64;
unsigned int elapsed_csecs;
+#ifdef CONFIG_ANDROID_POWER
+ unsigned int wakeup = 0;
+#endif
do_gettimeofday(&start);
@@ -190,6 +196,12 @@
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
yield(); /* Yield is okay here */
+#ifdef CONFIG_ANDROID_POWER
+ if (todo && !android_power_is_driver_suspended()) {
+ wakeup = 1;
+ break;
+ }
+#endif
if (time_after(jiffies, end_time))
break;
} while (todo);
@@ -205,11 +217,22 @@
* and caller must call thaw_processes() if something fails),
* but it cleans up leftover PF_FREEZE requests.
*/
+#ifdef CONFIG_ANDROID_POWER
+ if(wakeup) {
+ printk("\n");
+ printk(KERN_ERR "Freezing of %s aborted\n",
+ freeze_user_space ? "user space " : "tasks ");
+ }
+ else {
+#endif
printk("\n");
printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
"(%d tasks refusing to freeze):\n",
elapsed_csecs / 100, elapsed_csecs % 100, todo);
show_state();
+#ifdef CONFIG_ANDROID_POWER
+ }
+#endif
read_lock(&tasklist_lock);
do_each_thread(g, p) {
task_lock(p);
======================================================
Apologies for the long post.