mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-24 19:40:21 +09:00
Automatically merging tracking-linaro-android-3.14 into merge-linux-linaro-core-tracking
Conflicting files:
This commit is contained in:
121
Documentation/android.txt
Normal file
121
Documentation/android.txt
Normal file
@@ -0,0 +1,121 @@
|
||||
=============
|
||||
A N D R O I D
|
||||
=============
|
||||
|
||||
Copyright (C) 2009 Google, Inc.
|
||||
Written by Mike Chan <mike@android.com>
|
||||
|
||||
CONTENTS:
|
||||
---------
|
||||
|
||||
1. Android
|
||||
1.1 Required enabled config options
|
||||
1.2 Required disabled config options
|
||||
1.3 Recommended enabled config options
|
||||
2. Contact
|
||||
|
||||
|
||||
1. Android
|
||||
==========
|
||||
|
||||
Android (www.android.com) is an open source operating system for mobile devices.
|
||||
This document describes configurations needed to run the Android framework on
|
||||
top of the Linux kernel.
|
||||
|
||||
To see a working defconfig look at msm_defconfig or goldfish_defconfig
|
||||
which can be found at http://android.git.kernel.org in kernel/common.git
|
||||
and kernel/msm.git
|
||||
|
||||
|
||||
1.1 Required enabled config options
|
||||
-----------------------------------
|
||||
After building a standard defconfig, ensure that these options are enabled in
|
||||
your .config or defconfig if they are not already. Based off the msm_defconfig.
|
||||
You should keep the rest of the default options enabled in the defconfig
|
||||
unless you know what you are doing.
|
||||
|
||||
ANDROID_PARANOID_NETWORK
|
||||
ASHMEM
|
||||
CONFIG_FB_MODE_HELPERS
|
||||
CONFIG_FONT_8x16
|
||||
CONFIG_FONT_8x8
|
||||
CONFIG_YAFFS_SHORT_NAMES_IN_RAM
|
||||
DAB
|
||||
EARLYSUSPEND
|
||||
FB
|
||||
FB_CFB_COPYAREA
|
||||
FB_CFB_FILLRECT
|
||||
FB_CFB_IMAGEBLIT
|
||||
FB_DEFERRED_IO
|
||||
FB_TILEBLITTING
|
||||
HIGH_RES_TIMERS
|
||||
INOTIFY
|
||||
INOTIFY_USER
|
||||
INPUT_EVDEV
|
||||
INPUT_GPIO
|
||||
INPUT_MISC
|
||||
LEDS_CLASS
|
||||
LEDS_GPIO
|
||||
LOCK_KERNEL
|
||||
LkOGGER
|
||||
LOW_MEMORY_KILLER
|
||||
MISC_DEVICES
|
||||
NEW_LEDS
|
||||
NO_HZ
|
||||
POWER_SUPPLY
|
||||
PREEMPT
|
||||
RAMFS
|
||||
RTC_CLASS
|
||||
RTC_LIB
|
||||
SWITCH
|
||||
SWITCH_GPIO
|
||||
TMPFS
|
||||
UID_STAT
|
||||
UID16
|
||||
USB_FUNCTION
|
||||
USB_FUNCTION_ADB
|
||||
USER_WAKELOCK
|
||||
VIDEO_OUTPUT_CONTROL
|
||||
WAKELOCK
|
||||
YAFFS_AUTO_YAFFS2
|
||||
YAFFS_FS
|
||||
YAFFS_YAFFS1
|
||||
YAFFS_YAFFS2
|
||||
|
||||
|
||||
1.2 Required disabled config options
|
||||
------------------------------------
|
||||
CONFIG_YAFFS_DISABLE_LAZY_LOAD
|
||||
DNOTIFY
|
||||
|
||||
|
||||
1.3 Recommended enabled config options
|
||||
------------------------------
|
||||
ANDROID_PMEM
|
||||
PSTORE_CONSOLE
|
||||
PSTORE_RAM
|
||||
SCHEDSTATS
|
||||
DEBUG_PREEMPT
|
||||
DEBUG_MUTEXES
|
||||
DEBUG_SPINLOCK_SLEEP
|
||||
DEBUG_INFO
|
||||
FRAME_POINTER
|
||||
CPU_FREQ
|
||||
CPU_FREQ_TABLE
|
||||
CPU_FREQ_DEFAULT_GOV_ONDEMAND
|
||||
CPU_FREQ_GOV_ONDEMAND
|
||||
CRC_CCITT
|
||||
EMBEDDED
|
||||
INPUT_TOUCHSCREEN
|
||||
I2C
|
||||
I2C_BOARDINFO
|
||||
LOG_BUF_SHIFT=17
|
||||
SERIAL_CORE
|
||||
SERIAL_CORE_CONSOLE
|
||||
|
||||
|
||||
2. Contact
|
||||
==========
|
||||
website: http://android.git.kernel.org
|
||||
|
||||
mailing-lists: android-kernel@googlegroups.com
|
||||
@@ -578,6 +578,15 @@ is completely unused; @cgrp->parent is still valid. (Note - can also
|
||||
be called for a newly-created cgroup if an error occurs after this
|
||||
subsystem's create() method has been called for the new cgroup).
|
||||
|
||||
int allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
|
||||
(cgroup_mutex held by caller)
|
||||
|
||||
Called prior to moving a task into a cgroup; if the subsystem
|
||||
returns an error, this will abort the attach operation. Used
|
||||
to extend the permission checks - if all subsystems in a cgroup
|
||||
return 0, the attach will be allowed to proceed, even if the
|
||||
default permission check (root or same user) fails.
|
||||
|
||||
int can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
|
||||
(cgroup_mutex held by caller)
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ Contents:
|
||||
2.3 Userspace
|
||||
2.4 Ondemand
|
||||
2.5 Conservative
|
||||
2.6 Interactive
|
||||
|
||||
3. The Governor Interface in the CPUfreq Core
|
||||
|
||||
@@ -218,6 +219,90 @@ a decision on when to decrease the frequency while running in any
|
||||
speed. Load for frequency increase is still evaluated every
|
||||
sampling rate.
|
||||
|
||||
2.6 Interactive
|
||||
---------------
|
||||
|
||||
The CPUfreq governor "interactive" is designed for latency-sensitive,
|
||||
interactive workloads. This governor sets the CPU speed depending on
|
||||
usage, similar to "ondemand" and "conservative" governors, but with a
|
||||
different set of configurable behaviors.
|
||||
|
||||
The tuneable values for this governor are:
|
||||
|
||||
target_loads: CPU load values used to adjust speed to influence the
|
||||
current CPU load toward that value. In general, the lower the target
|
||||
load, the more often the governor will raise CPU speeds to bring load
|
||||
below the target. The format is a single target load, optionally
|
||||
followed by pairs of CPU speeds and CPU loads to target at or above
|
||||
those speeds. Colons can be used between the speeds and associated
|
||||
target loads for readability. For example:
|
||||
|
||||
85 1000000:90 1700000:99
|
||||
|
||||
targets CPU load 85% below speed 1GHz, 90% at or above 1GHz, until
|
||||
1.7GHz and above, at which load 99% is targeted. If speeds are
|
||||
specified these must appear in ascending order. Higher target load
|
||||
values are typically specified for higher speeds, that is, target load
|
||||
values also usually appear in an ascending order. The default is
|
||||
target load 90% for all speeds.
|
||||
|
||||
min_sample_time: The minimum amount of time to spend at the current
|
||||
frequency before ramping down. Default is 80000 uS.
|
||||
|
||||
hispeed_freq: An intermediate "hi speed" at which to initially ramp
|
||||
when CPU load hits the value specified in go_hispeed_load. If load
|
||||
stays high for the amount of time specified in above_hispeed_delay,
|
||||
then speed may be bumped higher. Default is the maximum speed
|
||||
allowed by the policy at governor initialization time.
|
||||
|
||||
go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
|
||||
Default is 99%.
|
||||
|
||||
above_hispeed_delay: When speed is at or above hispeed_freq, wait for
|
||||
this long before raising speed in response to continued high load.
|
||||
The format is a single delay value, optionally followed by pairs of
|
||||
CPU speeds and the delay to use at or above those speeds. Colons can
|
||||
be used between the speeds and associated delays for readability. For
|
||||
example:
|
||||
|
||||
80000 1300000:200000 1500000:40000
|
||||
|
||||
uses delay 80000 uS until CPU speed 1.3 GHz, at which speed delay
|
||||
200000 uS is used until speed 1.5 GHz, at which speed (and above)
|
||||
delay 40000 uS is used. If speeds are specified these must appear in
|
||||
ascending order. Default is 20000 uS.
|
||||
|
||||
timer_rate: Sample rate for reevaluating CPU load when the CPU is not
|
||||
idle. A deferrable timer is used, such that the CPU will not be woken
|
||||
from idle to service this timer until something else needs to run.
|
||||
(The maximum time to allow deferring this timer when not running at
|
||||
minimum speed is configurable via timer_slack.) Default is 20000 uS.
|
||||
|
||||
timer_slack: Maximum additional time to defer handling the governor
|
||||
sampling timer beyond timer_rate when running at speeds above the
|
||||
minimum. For platforms that consume additional power at idle when
|
||||
CPUs are running at speeds greater than minimum, this places an upper
|
||||
bound on how long the timer will be deferred prior to re-evaluating
|
||||
load and dropping speed. For example, if timer_rate is 20000uS and
|
||||
timer_slack is 10000uS then timers will be deferred for up to 30msec
|
||||
when not at lowest speed. A value of -1 means defer timers
|
||||
indefinitely at all speeds. Default is 80000 uS.
|
||||
|
||||
boost: If non-zero, immediately boost speed of all CPUs to at least
|
||||
hispeed_freq until zero is written to this attribute. If zero, allow
|
||||
CPU speeds to drop below hispeed_freq according to load as usual.
|
||||
Default is zero.
|
||||
|
||||
boostpulse: On each write, immediately boost speed of all CPUs to
|
||||
hispeed_freq for at least the period of time specified by
|
||||
boostpulse_duration, after which speeds are allowed to drop below
|
||||
hispeed_freq according to load as usual.
|
||||
|
||||
boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
|
||||
on a write to boostpulse, before allowing speed to drop according to
|
||||
load as usual. Default is 80000 uS.
|
||||
|
||||
|
||||
3. The Governor Interface in the CPUfreq Core
|
||||
=============================================
|
||||
|
||||
|
||||
@@ -369,6 +369,8 @@ is not associated with a file:
|
||||
[stack:1001] = the stack of the thread with tid 1001
|
||||
[vdso] = the "virtual dynamic shared object",
|
||||
the kernel system call handler
|
||||
[anon:<name>] = an anonymous mapping that has been
|
||||
named by userspace
|
||||
|
||||
or if empty, the mapping is anonymous.
|
||||
|
||||
@@ -419,6 +421,7 @@ KernelPageSize: 4 kB
|
||||
MMUPageSize: 4 kB
|
||||
Locked: 374 kB
|
||||
VmFlags: rd ex mr mw me de
|
||||
Name: name from userspace
|
||||
|
||||
the first of these lines shows the same information as is displayed for the
|
||||
mapping in /proc/PID/maps. The remaining lines show the size of the mapping
|
||||
@@ -470,6 +473,9 @@ Note that there is no guarantee that every flag and associated mnemonic will
|
||||
be present in all further kernel releases. Things get changed, the flags may
|
||||
be vanished or the reverse -- new added.
|
||||
|
||||
The "Name" field will only be present on a mapping that has been named by
|
||||
userspace, and will show the name passed in by userspace.
|
||||
|
||||
This file is only present if the CONFIG_MMU kernel configuration option is
|
||||
enabled.
|
||||
|
||||
|
||||
75
Documentation/sync.txt
Normal file
75
Documentation/sync.txt
Normal file
@@ -0,0 +1,75 @@
|
||||
Motivation:
|
||||
|
||||
In complicated DMA pipelines such as graphics (multimedia, camera, gpu, display)
|
||||
a consumer of a buffer needs to know when the producer has finished producing
|
||||
it. Likewise the producer needs to know when the consumer is finished with the
|
||||
buffer so it can reuse it. A particular buffer may be consumed by multiple
|
||||
consumers which will retain the buffer for different amounts of time. In
|
||||
addition, a consumer may consume multiple buffers atomically.
|
||||
The sync framework adds an API which allows synchronization between the
|
||||
producers and consumers in a generic way while also allowing platforms which
|
||||
have shared hardware synchronization primitives to exploit them.
|
||||
|
||||
Goals:
|
||||
* provide a generic API for expressing synchronization dependencies
|
||||
* allow drivers to exploit hardware synchronization between hardware
|
||||
blocks
|
||||
* provide a userspace API that allows a compositor to manage
|
||||
dependencies.
|
||||
* provide rich telemetry data to allow debugging slowdowns and stalls of
|
||||
the graphics pipeline.
|
||||
|
||||
Objects:
|
||||
* sync_timeline
|
||||
* sync_pt
|
||||
* sync_fence
|
||||
|
||||
sync_timeline:
|
||||
|
||||
A sync_timeline is an abstract monotonically increasing counter. In general,
|
||||
each driver/hardware block context will have one of these. They can be backed
|
||||
by the appropriate hardware or rely on the generic sw_sync implementation.
|
||||
Timelines are only ever created through their specific implementations
|
||||
(i.e. sw_sync.)
|
||||
|
||||
sync_pt:
|
||||
|
||||
A sync_pt is an abstract value which marks a point on a sync_timeline. Sync_pts
|
||||
have a single timeline parent. They have 3 states: active, signaled, and error.
|
||||
They start in active state and transition, once, to either signaled (when the
|
||||
timeline counter advances beyond the sync_pt’s value) or error state.
|
||||
|
||||
sync_fence:
|
||||
|
||||
Sync_fences are the primary primitives used by drivers to coordinate
|
||||
synchronization of their buffers. They are a collection of sync_pts which may
|
||||
or may not have the same timeline parent. A sync_pt can only exist in one fence
|
||||
and the fence's list of sync_pts is immutable once created. Fences can be
|
||||
waited on synchronously or asynchronously. Two fences can also be merged to
|
||||
create a third fence containing a copy of the two fences’ sync_pts. Fences are
|
||||
backed by file descriptors to allow userspace to coordinate the display pipeline
|
||||
dependencies.
|
||||
|
||||
Use:
|
||||
|
||||
A driver implementing sync support should have a work submission function which:
|
||||
* takes a fence argument specifying when to begin work
|
||||
* asynchronously queues that work to kick off when the fence is signaled
|
||||
* returns a fence to indicate when its work will be done.
|
||||
* signals the returned fence once the work is completed.
|
||||
|
||||
Consider an imaginary display driver that has the following API:
|
||||
/*
|
||||
* assumes buf is ready to be displayed.
|
||||
* blocks until the buffer is on screen.
|
||||
*/
|
||||
void display_buffer(struct dma_buf *buf);
|
||||
|
||||
The new API will become:
|
||||
/*
|
||||
* will display buf when fence is signaled.
|
||||
* returns immediately with a fence that will signal when buf
|
||||
* is no longer displayed.
|
||||
*/
|
||||
struct sync_fence* display_buffer(struct dma_buf *buf,
|
||||
struct sync_fence *fence);
|
||||
@@ -29,6 +29,7 @@ Currently, these files are in /proc/sys/vm:
|
||||
- dirty_writeback_centisecs
|
||||
- drop_caches
|
||||
- extfrag_threshold
|
||||
- extra_free_kbytes
|
||||
- hugepages_treat_as_movable
|
||||
- hugetlb_shm_group
|
||||
- laptop_mode
|
||||
@@ -204,6 +205,21 @@ fragmentation index is <= extfrag_threshold. The default value is 500.
|
||||
|
||||
==============================================================
|
||||
|
||||
extra_free_kbytes
|
||||
|
||||
This parameter tells the VM to keep extra free memory between the threshold
|
||||
where background reclaim (kswapd) kicks in, and the threshold where direct
|
||||
reclaim (by allocating processes) kicks in.
|
||||
|
||||
This is useful for workloads that require low latency memory allocations
|
||||
and have a bounded burstiness in memory allocations, for example a
|
||||
realtime application that receives and transmits network traffic
|
||||
(causing in-kernel memory allocations) with a maximum total message burst
|
||||
size of 200MB may need 200MB of extra free memory to avoid direct reclaim
|
||||
related latencies.
|
||||
|
||||
==============================================================
|
||||
|
||||
hugepages_treat_as_movable
|
||||
|
||||
This parameter controls whether we can allocate hugepages from ZONE_MOVABLE
|
||||
|
||||
@@ -2017,6 +2017,35 @@ will produce:
|
||||
1) 1.449 us | }
|
||||
|
||||
|
||||
You can disable the hierarchical function call formatting and instead print a
|
||||
flat list of function entry and return events. This uses the format described
|
||||
in the Output Formatting section and respects all the trace options that
|
||||
control that formatting. Hierarchical formatting is the default.
|
||||
|
||||
hierachical: echo nofuncgraph-flat > trace_options
|
||||
flat: echo funcgraph-flat > trace_options
|
||||
|
||||
ie:
|
||||
|
||||
# tracer: function_graph
|
||||
#
|
||||
# entries-in-buffer/entries-written: 68355/68355 #P:2
|
||||
#
|
||||
# _-----=> irqs-off
|
||||
# / _----=> need-resched
|
||||
# | / _---=> hardirq/softirq
|
||||
# || / _--=> preempt-depth
|
||||
# ||| / delay
|
||||
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
|
||||
# | | | |||| | |
|
||||
sh-1806 [001] d... 198.843443: graph_ent: func=_raw_spin_lock
|
||||
sh-1806 [001] d... 198.843445: graph_ent: func=__raw_spin_lock
|
||||
sh-1806 [001] d..1 198.843447: graph_ret: func=__raw_spin_lock
|
||||
sh-1806 [001] d..1 198.843449: graph_ret: func=_raw_spin_lock
|
||||
sh-1806 [001] d..1 198.843451: graph_ent: func=_raw_spin_unlock_irqrestore
|
||||
sh-1806 [001] d... 198.843453: graph_ret: func=_raw_spin_unlock_irqrestore
|
||||
|
||||
|
||||
You might find other useful features for this tracer in the
|
||||
following "dynamic ftrace" section such as tracing only specific
|
||||
functions or tasks.
|
||||
|
||||
15
android/configs/README
Normal file
15
android/configs/README
Normal file
@@ -0,0 +1,15 @@
|
||||
The files in this directory are meant to be used as a base for an Android
|
||||
kernel config. All devices should have the options in android-base.cfg enabled.
|
||||
While not mandatory, the options in android-recommended.cfg enable advanced
|
||||
Android features.
|
||||
|
||||
Assuming you already have a minimalist defconfig for your device, a possible
|
||||
way to enable these options would be:
|
||||
|
||||
ARCH=<arch> scripts/kconfig/merge_config.sh <path_to>/<device>_defconfig android/configs/android-base.cfg android/configs/android-recommended.cfg
|
||||
|
||||
This will generate a .config that can then be used to save a new defconfig or
|
||||
compile a new kernel with Android features enabled.
|
||||
|
||||
Because there is no tool to consistently generate these config fragments,
|
||||
lets keep them alphabetically sorted instead of random.
|
||||
140
android/configs/android-base.cfg
Normal file
140
android/configs/android-base.cfg
Normal file
@@ -0,0 +1,140 @@
|
||||
# KEEP ALPHABETICALLY SORTED
|
||||
# CONFIG_INET_LRO is not set
|
||||
# CONFIG_MODULES is not set
|
||||
# CONFIG_OABI_COMPAT is not set
|
||||
CONFIG_ANDROID=y
|
||||
CONFIG_ANDROID_BINDER_IPC=y
|
||||
CONFIG_ANDROID_INTF_ALARM_DEV=y
|
||||
CONFIG_ANDROID_LOGGER=y
|
||||
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
|
||||
CONFIG_ASHMEM=y
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_CGROUP_DEBUG=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
CONFIG_DM_CRYPT=y
|
||||
CONFIG_DM_VERITY=y
|
||||
CONFIG_EMBEDDED=y
|
||||
CONFIG_FB=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_INET6_AH=y
|
||||
CONFIG_INET6_ESP=y
|
||||
CONFIG_INET6_IPCOMP=y
|
||||
CONFIG_INET=y
|
||||
CONFIG_INET_ESP=y
|
||||
CONFIG_IP6_NF_FILTER=y
|
||||
CONFIG_IP6_NF_IPTABLES=y
|
||||
CONFIG_IP6_NF_MANGLE=y
|
||||
CONFIG_IP6_NF_RAW=y
|
||||
CONFIG_IP6_NF_TARGET_REJECT=y
|
||||
CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
|
||||
CONFIG_IPV6_MIP6=y
|
||||
CONFIG_IPV6_MULTIPLE_TABLES=y
|
||||
CONFIG_IPV6_OPTIMISTIC_DAD=y
|
||||
CONFIG_IPV6_PRIVACY=y
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_IPV6_ROUTE_INFO=y
|
||||
CONFIG_IP_ADVANCED_ROUTER=y
|
||||
CONFIG_IP_MULTIPLE_TABLES=y
|
||||
CONFIG_IP_NF_ARPFILTER=y
|
||||
CONFIG_IP_NF_ARPTABLES=y
|
||||
CONFIG_IP_NF_ARP_MANGLE=y
|
||||
CONFIG_IP_NF_FILTER=y
|
||||
CONFIG_IP_NF_IPTABLES=y
|
||||
CONFIG_IP_NF_MANGLE=y
|
||||
CONFIG_IP_NF_MATCH_AH=y
|
||||
CONFIG_IP_NF_MATCH_ECN=y
|
||||
CONFIG_IP_NF_MATCH_TTL=y
|
||||
CONFIG_IP_NF_RAW=y
|
||||
CONFIG_IP_NF_TARGET_MASQUERADE=y
|
||||
CONFIG_IP_NF_TARGET_NETMAP=y
|
||||
CONFIG_IP_NF_TARGET_REDIRECT=y
|
||||
CONFIG_IP_NF_TARGET_REJECT=y
|
||||
CONFIG_IP_NF_TARGET_REJECT_SKERR=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_NETFILTER=y
|
||||
CONFIG_NETFILTER_TPROXY=y
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_HELPER=y
|
||||
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
|
||||
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_MAC=y
|
||||
CONFIG_NETFILTER_XT_MATCH_MARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_POLICY=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
|
||||
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STATE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STRING=y
|
||||
CONFIG_NETFILTER_XT_MATCH_TIME=y
|
||||
CONFIG_NETFILTER_XT_MATCH_U32=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TRACE=y
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_NET_CLS_U32=y
|
||||
CONFIG_NET_EMATCH=y
|
||||
CONFIG_NET_EMATCH_U32=y
|
||||
CONFIG_NET_KEY=y
|
||||
CONFIG_NET_SCHED=y
|
||||
CONFIG_NET_SCH_HTB=y
|
||||
CONFIG_NF_CONNTRACK=y
|
||||
CONFIG_NF_CONNTRACK_AMANDA=y
|
||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||
CONFIG_NF_CONNTRACK_FTP=y
|
||||
CONFIG_NF_CONNTRACK_H323=y
|
||||
CONFIG_NF_CONNTRACK_IPV4=y
|
||||
CONFIG_NF_CONNTRACK_IPV6=y
|
||||
CONFIG_NF_CONNTRACK_IRC=y
|
||||
CONFIG_NF_CONNTRACK_NETBIOS_NS=y
|
||||
CONFIG_NF_CONNTRACK_PPTP=y
|
||||
CONFIG_NF_CONNTRACK_SANE=y
|
||||
CONFIG_NF_CONNTRACK_TFTP=y
|
||||
CONFIG_NF_CT_NETLINK=y
|
||||
CONFIG_NF_CT_PROTO_DCCP=y
|
||||
CONFIG_NF_CT_PROTO_SCTP=y
|
||||
CONFIG_NF_CT_PROTO_UDPLITE=y
|
||||
CONFIG_NF_NAT=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_PM_AUTOSLEEP=y
|
||||
CONFIG_PM_WAKELOCKS=y
|
||||
CONFIG_PPP=y
|
||||
CONFIG_PPPOLAC=y
|
||||
CONFIG_PPPOPNS=y
|
||||
CONFIG_PPP_BSDCOMP=y
|
||||
CONFIG_PPP_DEFLATE=y
|
||||
CONFIG_PPP_MPPE=y
|
||||
CONFIG_PREEMPT=y
|
||||
CONFIG_RESOURCE_COUNTERS=y
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RT_GROUP_SCHED=y
|
||||
CONFIG_STAGING=y
|
||||
CONFIG_SWITCH=y
|
||||
CONFIG_SYNC=y
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_TUN=y
|
||||
CONFIG_UNIX=y
|
||||
CONFIG_USB_GADGET=y
|
||||
CONFIG_USB_G_ANDROID=y
|
||||
CONFIG_USB_OTG_WAKELOCK=y
|
||||
CONFIG_XFRM_USER=y
|
||||
121
android/configs/android-recommended.cfg
Normal file
121
android/configs/android-recommended.cfg
Normal file
@@ -0,0 +1,121 @@
|
||||
# KEEP ALPHABETICALLY SORTED
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
# CONFIG_INPUT_MOUSE is not set
|
||||
# CONFIG_LEGACY_PTYS is not set
|
||||
# CONFIG_NF_CONNTRACK_SIP is not set
|
||||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
# CONFIG_VT is not set
|
||||
CONFIG_ANDROID_TIMED_GPIO=y
|
||||
CONFIG_BACKLIGHT_LCD_SUPPORT=y
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=8192
|
||||
CONFIG_COMPACTION=y
|
||||
CONFIG_DM_UEVENT=y
|
||||
CONFIG_DRAGONRISE_FF=y
|
||||
CONFIG_ENABLE_DEFAULT_TRACERS=y
|
||||
CONFIG_EXT4_FS=y
|
||||
CONFIG_EXT4_FS_SECURITY=y
|
||||
CONFIG_FUSE_FS=y
|
||||
CONFIG_GREENASIA_FF=y
|
||||
CONFIG_HIDRAW=y
|
||||
CONFIG_HID_A4TECH=y
|
||||
CONFIG_HID_ACRUX=y
|
||||
CONFIG_HID_ACRUX_FF=y
|
||||
CONFIG_HID_APPLE=y
|
||||
CONFIG_HID_BELKIN=y
|
||||
CONFIG_HID_CHERRY=y
|
||||
CONFIG_HID_CHICONY=y
|
||||
CONFIG_HID_CYPRESS=y
|
||||
CONFIG_HID_DRAGONRISE=y
|
||||
CONFIG_HID_ELECOM=y
|
||||
CONFIG_HID_EMS_FF=y
|
||||
CONFIG_HID_EZKEY=y
|
||||
CONFIG_HID_GREENASIA=y
|
||||
CONFIG_HID_GYRATION=y
|
||||
CONFIG_HID_HOLTEK=y
|
||||
CONFIG_HID_KENSINGTON=y
|
||||
CONFIG_HID_KEYTOUCH=y
|
||||
CONFIG_HID_KYE=y
|
||||
CONFIG_HID_LCPOWER=y
|
||||
CONFIG_HID_LOGITECH=y
|
||||
CONFIG_HID_LOGITECH_DJ=y
|
||||
CONFIG_HID_MAGICMOUSE=y
|
||||
CONFIG_HID_MICROSOFT=y
|
||||
CONFIG_HID_MONTEREY=y
|
||||
CONFIG_HID_MULTITOUCH=y
|
||||
CONFIG_HID_NTRIG=y
|
||||
CONFIG_HID_ORTEK=y
|
||||
CONFIG_HID_PANTHERLORD=y
|
||||
CONFIG_HID_PETALYNX=y
|
||||
CONFIG_HID_PICOLCD=y
|
||||
CONFIG_HID_PRIMAX=y
|
||||
CONFIG_HID_PRODIKEYS=y
|
||||
CONFIG_HID_ROCCAT=y
|
||||
CONFIG_HID_SAITEK=y
|
||||
CONFIG_HID_SAMSUNG=y
|
||||
CONFIG_HID_SMARTJOYPLUS=y
|
||||
CONFIG_HID_SONY=y
|
||||
CONFIG_HID_SPEEDLINK=y
|
||||
CONFIG_HID_SUNPLUS=y
|
||||
CONFIG_HID_THRUSTMASTER=y
|
||||
CONFIG_HID_TIVO=y
|
||||
CONFIG_HID_TOPSEED=y
|
||||
CONFIG_HID_TWINHAN=y
|
||||
CONFIG_HID_UCLOGIC=y
|
||||
CONFIG_HID_WACOM=y
|
||||
CONFIG_HID_WALTOP=y
|
||||
CONFIG_HID_WIIMOTE=y
|
||||
CONFIG_HID_ZEROPLUS=y
|
||||
CONFIG_HID_ZYDACRON=y
|
||||
CONFIG_INPUT_EVDEV=y
|
||||
CONFIG_INPUT_GPIO=y
|
||||
CONFIG_INPUT_JOYSTICK=y
|
||||
CONFIG_INPUT_KEYCHORD=y
|
||||
CONFIG_INPUT_KEYRESET=y
|
||||
CONFIG_INPUT_MISC=y
|
||||
CONFIG_INPUT_TABLET=y
|
||||
CONFIG_INPUT_UINPUT=y
|
||||
CONFIG_ION=y
|
||||
CONFIG_JOYSTICK_XPAD=y
|
||||
CONFIG_JOYSTICK_XPAD_FF=y
|
||||
CONFIG_JOYSTICK_XPAD_LEDS=y
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_LOGIG940_FF=y
|
||||
CONFIG_LOGIRUMBLEPAD2_FF=y
|
||||
CONFIG_LOGITECH_FF=y
|
||||
CONFIG_MD=y
|
||||
CONFIG_MEDIA_SUPPORT=y
|
||||
CONFIG_MSDOS_FS=y
|
||||
CONFIG_PANIC_TIMEOUT=5
|
||||
CONFIG_PANTHERLORD_FF=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
CONFIG_PM_DEBUG=y
|
||||
CONFIG_PM_RUNTIME=y
|
||||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
CONFIG_POWER_SUPPLY=y
|
||||
CONFIG_PSTORE=y
|
||||
CONFIG_PSTORE_CONSOLE=y
|
||||
CONFIG_PSTORE_RAM=y
|
||||
CONFIG_SCHEDSTATS=y
|
||||
CONFIG_SMARTJOYPLUS_FF=y
|
||||
CONFIG_SND=y
|
||||
CONFIG_SOUND=y
|
||||
CONFIG_SUSPEND_TIME=y
|
||||
CONFIG_TABLET_USB_ACECAD=y
|
||||
CONFIG_TABLET_USB_AIPTEK=y
|
||||
CONFIG_TABLET_USB_GTCO=y
|
||||
CONFIG_TABLET_USB_HANWANG=y
|
||||
CONFIG_TABLET_USB_KBTAB=y
|
||||
CONFIG_TABLET_USB_WACOM=y
|
||||
CONFIG_TIMER_STATS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_UHID=y
|
||||
CONFIG_UID_STAT=y
|
||||
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_HIDDEV=y
|
||||
CONFIG_USB_USBNET=y
|
||||
CONFIG_VFAT_FS=y
|
||||
@@ -1912,6 +1912,15 @@ config XEN
|
||||
help
|
||||
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
|
||||
|
||||
config ARM_FLUSH_CONSOLE_ON_RESTART
|
||||
bool "Force flush the console on restart"
|
||||
help
|
||||
If the console is locked while the system is rebooted, the messages
|
||||
in the temporary logbuffer would not have propogated to all the
|
||||
console drivers. This option forces the console lock to be
|
||||
released if it failed to be acquired, which will cause all the
|
||||
pending messages to be flushed.
|
||||
|
||||
endmenu
|
||||
|
||||
menu "Boot options"
|
||||
@@ -1941,6 +1950,21 @@ config DEPRECATED_PARAM_STRUCT
|
||||
This was deprecated in 2001 and announced to live on for 5 years.
|
||||
Some old boot loaders still use this way.
|
||||
|
||||
config BUILD_ARM_APPENDED_DTB_IMAGE
|
||||
bool "Build a concatenated zImage/dtb by default"
|
||||
depends on OF
|
||||
help
|
||||
Enabling this option will cause a concatenated zImage and list of
|
||||
DTBs to be built by default (instead of a standalone zImage.)
|
||||
The image will built in arch/arm/boot/zImage-dtb
|
||||
|
||||
config BUILD_ARM_APPENDED_DTB_IMAGE_NAMES
|
||||
string "Default dtb names"
|
||||
depends on BUILD_ARM_APPENDED_DTB_IMAGE
|
||||
help
|
||||
Space separated list of names of dtbs to append when
|
||||
building a concatenated zImage-dtb.
|
||||
|
||||
# Compressed boot loader in ROM. Yes, we really want to ask about
|
||||
# TEXT and BSS so we preserve their values in the config files.
|
||||
config ZBOOT_ROM_TEXT
|
||||
|
||||
@@ -1172,6 +1172,14 @@ config EARLY_PRINTK
|
||||
kernel low-level debugging functions. Add earlyprintk to your
|
||||
kernel parameters to enable this console.
|
||||
|
||||
config EARLY_PRINTK_DIRECT
|
||||
bool "Early printk direct"
|
||||
depends on DEBUG_LL
|
||||
help
|
||||
Say Y here if you want to have an early console using the
|
||||
kernel low-level debugging functions and EARLY_PRINTK is
|
||||
not early enough.
|
||||
|
||||
config OC_ETM
|
||||
bool "On-chip ETM and ETB"
|
||||
depends on ARM_AMBA
|
||||
|
||||
@@ -277,6 +277,8 @@ libs-y := arch/arm/lib/ $(libs-y)
|
||||
# Default target when executing plain make
|
||||
ifeq ($(CONFIG_XIP_KERNEL),y)
|
||||
KBUILD_IMAGE := xipImage
|
||||
else ifeq ($(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE),y)
|
||||
KBUILD_IMAGE := zImage-dtb
|
||||
else
|
||||
KBUILD_IMAGE := zImage
|
||||
endif
|
||||
@@ -314,6 +316,9 @@ PHONY += dtbs
|
||||
dtbs: scripts
|
||||
$(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) dtbs
|
||||
|
||||
zImage-dtb: vmlinux scripts dtbs
|
||||
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
|
||||
|
||||
# We use MRPROPER_FILES and CLEAN_FILES now
|
||||
archclean:
|
||||
$(Q)$(MAKE) $(clean)=$(boot)
|
||||
|
||||
1
arch/arm/boot/.gitignore
vendored
1
arch/arm/boot/.gitignore
vendored
@@ -4,3 +4,4 @@ xipImage
|
||||
bootpImage
|
||||
uImage
|
||||
*.dtb
|
||||
zImage-dtb
|
||||
@@ -14,6 +14,7 @@
|
||||
ifneq ($(MACHINE),)
|
||||
include $(srctree)/$(MACHINE)/Makefile.boot
|
||||
endif
|
||||
include $(srctree)/arch/arm/boot/dts/Makefile
|
||||
|
||||
# Note: the following conditions must always be true:
|
||||
# ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
|
||||
@@ -27,6 +28,14 @@ export ZRELADDR INITRD_PHYS PARAMS_PHYS
|
||||
|
||||
targets := Image zImage xipImage bootpImage uImage
|
||||
|
||||
DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES))
|
||||
ifneq ($(DTB_NAMES),)
|
||||
DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
|
||||
else
|
||||
DTB_LIST := $(dtb-y)
|
||||
endif
|
||||
DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
|
||||
|
||||
ifeq ($(CONFIG_XIP_KERNEL),y)
|
||||
|
||||
$(obj)/xipImage: vmlinux FORCE
|
||||
@@ -55,6 +64,10 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE
|
||||
$(call if_changed,objcopy)
|
||||
@$(kecho) ' Kernel: $@ is ready'
|
||||
|
||||
$(obj)/zImage-dtb: $(obj)/zImage $(DTB_OBJS) FORCE
|
||||
$(call if_changed,cat)
|
||||
@echo ' Kernel: $@ is ready'
|
||||
|
||||
endif
|
||||
|
||||
ifneq ($(LOADADDR),)
|
||||
|
||||
@@ -734,6 +734,8 @@ __armv7_mmu_cache_on:
|
||||
bic r6, r6, #1 << 31 @ 32-bit translation system
|
||||
bic r6, r6, #3 << 0 @ use only ttbr0
|
||||
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
|
||||
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
|
||||
mcr p15, 0, r0, c7, c5, 4 @ ISB
|
||||
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
|
||||
mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
|
||||
#endif
|
||||
|
||||
@@ -323,13 +323,20 @@ dtb-$(CONFIG_ARCH_ZYNQ) += zynq-zc702.dtb \
|
||||
zynq-zc706.dtb \
|
||||
zynq-zed.dtb
|
||||
|
||||
DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES))
|
||||
ifneq ($(DTB_NAMES),)
|
||||
DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
|
||||
else
|
||||
DTB_LIST := $(dtb-y)
|
||||
endif
|
||||
|
||||
targets += dtbs
|
||||
targets += $(dtb-y)
|
||||
targets += $(DTB_LIST)
|
||||
endif
|
||||
|
||||
# *.dtb used to be generated in the directory above. Clean out the
|
||||
# old build results so people don't accidentally use them.
|
||||
dtbs: $(addprefix $(obj)/, $(dtb-y))
|
||||
dtbs: $(addprefix $(obj)/, $(DTB_LIST))
|
||||
$(Q)rm -f $(obj)/../*.dtb
|
||||
|
||||
clean-files := *.dtb
|
||||
|
||||
@@ -20,3 +20,53 @@ config SHARP_SCOOP
|
||||
|
||||
config TI_PRIV_EDMA
|
||||
bool
|
||||
|
||||
config FIQ_GLUE
|
||||
bool
|
||||
select FIQ
|
||||
|
||||
config FIQ_DEBUGGER
|
||||
bool "FIQ Mode Serial Debugger"
|
||||
select FIQ
|
||||
select FIQ_GLUE
|
||||
default n
|
||||
help
|
||||
The FIQ serial debugger can accept commands even when the
|
||||
kernel is unresponsive due to being stuck with interrupts
|
||||
disabled.
|
||||
|
||||
|
||||
config FIQ_DEBUGGER_NO_SLEEP
|
||||
bool "Keep serial debugger active"
|
||||
depends on FIQ_DEBUGGER
|
||||
default n
|
||||
help
|
||||
Enables the serial debugger at boot. Passing
|
||||
fiq_debugger.no_sleep on the kernel commandline will
|
||||
override this config option.
|
||||
|
||||
config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
|
||||
bool "Don't disable wakeup IRQ when debugger is active"
|
||||
depends on FIQ_DEBUGGER
|
||||
default n
|
||||
help
|
||||
Don't disable the wakeup irq when enabling the uart clock. This will
|
||||
cause extra interrupts, but it makes the serial debugger usable with
|
||||
on some MSM radio builds that ignore the uart clock request in power
|
||||
collapse.
|
||||
|
||||
config FIQ_DEBUGGER_CONSOLE
|
||||
bool "Console on FIQ Serial Debugger port"
|
||||
depends on FIQ_DEBUGGER
|
||||
default n
|
||||
help
|
||||
Enables a console so that printk messages are displayed on
|
||||
the debugger serial port as the occur.
|
||||
|
||||
config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
|
||||
bool "Put the FIQ debugger into console mode by default"
|
||||
depends on FIQ_DEBUGGER_CONSOLE
|
||||
default n
|
||||
help
|
||||
If enabled, this puts the fiq debugger into console mode by default.
|
||||
Otherwise, the fiq debugger will start out in debug mode.
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
|
||||
obj-y += firmware.o
|
||||
|
||||
obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger.o
|
||||
obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o
|
||||
obj-$(CONFIG_ICST) += icst.o
|
||||
obj-$(CONFIG_SA1111) += sa1111.o
|
||||
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
|
||||
|
||||
1376
arch/arm/common/fiq_debugger.c
Normal file
1376
arch/arm/common/fiq_debugger.c
Normal file
File diff suppressed because it is too large
Load Diff
94
arch/arm/common/fiq_debugger_ringbuf.h
Normal file
94
arch/arm/common/fiq_debugger_ringbuf.h
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* arch/arm/common/fiq_debugger_ringbuf.c
|
||||
*
|
||||
* simple lockless ringbuffer
|
||||
*
|
||||
* Copyright (C) 2010 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct fiq_debugger_ringbuf {
|
||||
int len;
|
||||
int head;
|
||||
int tail;
|
||||
u8 buf[];
|
||||
};
|
||||
|
||||
|
||||
static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len)
|
||||
{
|
||||
struct fiq_debugger_ringbuf *rbuf;
|
||||
|
||||
rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL);
|
||||
if (rbuf == NULL)
|
||||
return NULL;
|
||||
|
||||
rbuf->len = len;
|
||||
rbuf->head = 0;
|
||||
rbuf->tail = 0;
|
||||
smp_mb();
|
||||
|
||||
return rbuf;
|
||||
}
|
||||
|
||||
static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf)
|
||||
{
|
||||
kfree(rbuf);
|
||||
}
|
||||
|
||||
static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf)
|
||||
{
|
||||
int level = rbuf->head - rbuf->tail;
|
||||
|
||||
if (level < 0)
|
||||
level = rbuf->len + level;
|
||||
|
||||
return level;
|
||||
}
|
||||
|
||||
static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf)
|
||||
{
|
||||
return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1;
|
||||
}
|
||||
|
||||
static inline u8
|
||||
fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i)
|
||||
{
|
||||
return rbuf->buf[(rbuf->tail + i) % rbuf->len];
|
||||
}
|
||||
|
||||
static inline int
|
||||
fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count)
|
||||
{
|
||||
count = min(count, fiq_debugger_ringbuf_level(rbuf));
|
||||
|
||||
rbuf->tail = (rbuf->tail + count) % rbuf->len;
|
||||
smp_mb();
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static inline int
|
||||
fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum)
|
||||
{
|
||||
if (fiq_debugger_ringbuf_room(rbuf) == 0)
|
||||
return 0;
|
||||
|
||||
rbuf->buf[rbuf->head] = datum;
|
||||
smp_mb();
|
||||
rbuf->head = (rbuf->head + 1) % rbuf->len;
|
||||
smp_mb();
|
||||
|
||||
return 1;
|
||||
}
|
||||
118
arch/arm/common/fiq_glue.S
Normal file
118
arch/arm/common/fiq_glue.S
Normal file
@@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Copyright (C) 2008 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
.text
|
||||
|
||||
.global fiq_glue_end
|
||||
|
||||
/* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
|
||||
|
||||
ENTRY(fiq_glue)
|
||||
/* store pc, cpsr from previous mode, reserve space for spsr */
|
||||
mrs r12, spsr
|
||||
sub lr, lr, #4
|
||||
subs r10, #1
|
||||
bne nested_fiq
|
||||
|
||||
str r12, [sp, #-8]!
|
||||
str lr, [sp, #-4]!
|
||||
|
||||
/* store r8-r14 from previous mode */
|
||||
sub sp, sp, #(7 * 4)
|
||||
stmia sp, {r8-r14}^
|
||||
nop
|
||||
|
||||
/* store r0-r7 from previous mode */
|
||||
stmfd sp!, {r0-r7}
|
||||
|
||||
/* setup func(data,regs) arguments */
|
||||
mov r0, r9
|
||||
mov r1, sp
|
||||
mov r3, r8
|
||||
|
||||
mov r7, sp
|
||||
|
||||
/* Get sp and lr from non-user modes */
|
||||
and r4, r12, #MODE_MASK
|
||||
cmp r4, #USR_MODE
|
||||
beq fiq_from_usr_mode
|
||||
|
||||
mov r7, sp
|
||||
orr r4, r4, #(PSR_I_BIT | PSR_F_BIT)
|
||||
msr cpsr_c, r4
|
||||
str sp, [r7, #(4 * 13)]
|
||||
str lr, [r7, #(4 * 14)]
|
||||
mrs r5, spsr
|
||||
str r5, [r7, #(4 * 17)]
|
||||
|
||||
cmp r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
|
||||
/* use fiq stack if we reenter this mode */
|
||||
subne sp, r7, #(4 * 3)
|
||||
|
||||
fiq_from_usr_mode:
|
||||
msr cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
|
||||
mov r2, sp
|
||||
sub sp, r7, #12
|
||||
stmfd sp!, {r2, ip, lr}
|
||||
/* call func(data,regs) */
|
||||
blx r3
|
||||
ldmfd sp, {r2, ip, lr}
|
||||
mov sp, r2
|
||||
|
||||
/* restore/discard saved state */
|
||||
cmp r4, #USR_MODE
|
||||
beq fiq_from_usr_mode_exit
|
||||
|
||||
msr cpsr_c, r4
|
||||
ldr sp, [r7, #(4 * 13)]
|
||||
ldr lr, [r7, #(4 * 14)]
|
||||
msr spsr_cxsf, r5
|
||||
|
||||
fiq_from_usr_mode_exit:
|
||||
msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
|
||||
|
||||
ldmfd sp!, {r0-r7}
|
||||
ldr lr, [sp, #(4 * 7)]
|
||||
ldr r12, [sp, #(4 * 8)]
|
||||
add sp, sp, #(10 * 4)
|
||||
exit_fiq:
|
||||
msr spsr_cxsf, r12
|
||||
add r10, #1
|
||||
cmp r11, #0
|
||||
moveqs pc, lr
|
||||
bx r11 /* jump to custom fiq return function */
|
||||
|
||||
nested_fiq:
|
||||
orr r12, r12, #(PSR_F_BIT)
|
||||
b exit_fiq
|
||||
|
||||
fiq_glue_end:
|
||||
|
||||
ENTRY(fiq_glue_setup) /* func, data, sp, smc call number */
|
||||
stmfd sp!, {r4}
|
||||
mrs r4, cpsr
|
||||
msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
|
||||
movs r8, r0
|
||||
mov r9, r1
|
||||
mov sp, r2
|
||||
mov r11, r3
|
||||
moveq r10, #0
|
||||
movne r10, #1
|
||||
msr cpsr_c, r4
|
||||
ldmfd sp!, {r4}
|
||||
bx lr
|
||||
|
||||
147
arch/arm/common/fiq_glue_setup.c
Normal file
147
arch/arm/common/fiq_glue_setup.c
Normal file
@@ -0,0 +1,147 @@
|
||||
/*
|
||||
* Copyright (C) 2010 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/fiq.h>
|
||||
#include <asm/fiq_glue.h>
|
||||
|
||||
extern unsigned char fiq_glue, fiq_glue_end;
|
||||
extern void fiq_glue_setup(void *func, void *data, void *sp,
|
||||
fiq_return_handler_t fiq_return_handler);
|
||||
|
||||
static struct fiq_handler fiq_debbuger_fiq_handler = {
|
||||
.name = "fiq_glue",
|
||||
};
|
||||
DEFINE_PER_CPU(void *, fiq_stack);
|
||||
static struct fiq_glue_handler *current_handler;
|
||||
static fiq_return_handler_t fiq_return_handler;
|
||||
static DEFINE_MUTEX(fiq_glue_lock);
|
||||
|
||||
static void fiq_glue_setup_helper(void *info)
|
||||
{
|
||||
struct fiq_glue_handler *handler = info;
|
||||
fiq_glue_setup(handler->fiq, handler,
|
||||
__get_cpu_var(fiq_stack) + THREAD_START_SP,
|
||||
fiq_return_handler);
|
||||
}
|
||||
|
||||
int fiq_glue_register_handler(struct fiq_glue_handler *handler)
|
||||
{
|
||||
int ret;
|
||||
int cpu;
|
||||
|
||||
if (!handler || !handler->fiq)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&fiq_glue_lock);
|
||||
if (fiq_stack) {
|
||||
ret = -EBUSY;
|
||||
goto err_busy;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
void *stack;
|
||||
stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
|
||||
if (WARN_ON(!stack)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_alloc_fiq_stack;
|
||||
}
|
||||
per_cpu(fiq_stack, cpu) = stack;
|
||||
}
|
||||
|
||||
ret = claim_fiq(&fiq_debbuger_fiq_handler);
|
||||
if (WARN_ON(ret))
|
||||
goto err_claim_fiq;
|
||||
|
||||
current_handler = handler;
|
||||
on_each_cpu(fiq_glue_setup_helper, handler, true);
|
||||
set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);
|
||||
|
||||
mutex_unlock(&fiq_glue_lock);
|
||||
return 0;
|
||||
|
||||
err_claim_fiq:
|
||||
err_alloc_fiq_stack:
|
||||
for_each_possible_cpu(cpu) {
|
||||
__free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
|
||||
per_cpu(fiq_stack, cpu) = NULL;
|
||||
}
|
||||
err_busy:
|
||||
mutex_unlock(&fiq_glue_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void fiq_glue_update_return_handler(void (*fiq_return)(void))
|
||||
{
|
||||
fiq_return_handler = fiq_return;
|
||||
if (current_handler)
|
||||
on_each_cpu(fiq_glue_setup_helper, current_handler, true);
|
||||
}
|
||||
|
||||
int fiq_glue_set_return_handler(void (*fiq_return)(void))
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&fiq_glue_lock);
|
||||
if (fiq_return_handler) {
|
||||
ret = -EBUSY;
|
||||
goto err_busy;
|
||||
}
|
||||
fiq_glue_update_return_handler(fiq_return);
|
||||
ret = 0;
|
||||
err_busy:
|
||||
mutex_unlock(&fiq_glue_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fiq_glue_set_return_handler);
|
||||
|
||||
int fiq_glue_clear_return_handler(void (*fiq_return)(void))
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&fiq_glue_lock);
|
||||
if (WARN_ON(fiq_return_handler != fiq_return)) {
|
||||
ret = -EINVAL;
|
||||
goto err_inval;
|
||||
}
|
||||
fiq_glue_update_return_handler(NULL);
|
||||
ret = 0;
|
||||
err_inval:
|
||||
mutex_unlock(&fiq_glue_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fiq_glue_clear_return_handler);
|
||||
|
||||
/**
|
||||
* fiq_glue_resume - Restore fiqs after suspend or low power idle states
|
||||
*
|
||||
* This must be called before calling local_fiq_enable after returning from a
|
||||
* power state where the fiq mode registers were lost. If a driver provided
|
||||
* a resume hook when it registered the handler it will be called.
|
||||
*/
|
||||
|
||||
void fiq_glue_resume(void)
|
||||
{
|
||||
if (!current_handler)
|
||||
return;
|
||||
fiq_glue_setup(current_handler->fiq, current_handler,
|
||||
__get_cpu_var(fiq_stack) + THREAD_START_SP,
|
||||
fiq_return_handler);
|
||||
if (current_handler->resume)
|
||||
current_handler->resume(current_handler);
|
||||
}
|
||||
|
||||
64
arch/arm/include/asm/fiq_debugger.h
Normal file
64
arch/arm/include/asm/fiq_debugger.h
Normal file
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* arch/arm/include/asm/fiq_debugger.h
|
||||
*
|
||||
* Copyright (C) 2010 Google, Inc.
|
||||
* Author: Colin Cross <ccross@android.com>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
|
||||
#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
|
||||
|
||||
#include <linux/serial_core.h>
|
||||
|
||||
#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR
|
||||
#define FIQ_DEBUGGER_BREAK 0x00ff0100
|
||||
|
||||
#define FIQ_DEBUGGER_FIQ_IRQ_NAME "fiq"
|
||||
#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME "signal"
|
||||
#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME "wakeup"
|
||||
|
||||
/**
|
||||
* struct fiq_debugger_pdata - fiq debugger platform data
|
||||
* @uart_resume: used to restore uart state right before enabling
|
||||
* the fiq.
|
||||
* @uart_enable: Do the work necessary to communicate with the uart
|
||||
* hw (enable clocks, etc.). This must be ref-counted.
|
||||
* @uart_disable: Do the work necessary to disable the uart hw
|
||||
* (disable clocks, etc.). This must be ref-counted.
|
||||
* @uart_dev_suspend: called during PM suspend, generally not needed
|
||||
* for real fiq mode debugger.
|
||||
* @uart_dev_resume: called during PM resume, generally not needed
|
||||
* for real fiq mode debugger.
|
||||
*/
|
||||
struct fiq_debugger_pdata {
|
||||
int (*uart_init)(struct platform_device *pdev);
|
||||
void (*uart_free)(struct platform_device *pdev);
|
||||
int (*uart_resume)(struct platform_device *pdev);
|
||||
int (*uart_getc)(struct platform_device *pdev);
|
||||
void (*uart_putc)(struct platform_device *pdev, unsigned int c);
|
||||
void (*uart_flush)(struct platform_device *pdev);
|
||||
void (*uart_enable)(struct platform_device *pdev);
|
||||
void (*uart_disable)(struct platform_device *pdev);
|
||||
|
||||
int (*uart_dev_suspend)(struct platform_device *pdev);
|
||||
int (*uart_dev_resume)(struct platform_device *pdev);
|
||||
|
||||
void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
|
||||
bool enable);
|
||||
void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq);
|
||||
|
||||
void (*force_irq)(struct platform_device *pdev, unsigned int irq);
|
||||
void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq);
|
||||
};
|
||||
|
||||
#endif
|
||||
33
arch/arm/include/asm/fiq_glue.h
Normal file
33
arch/arm/include/asm/fiq_glue.h
Normal file
@@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (C) 2010 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_FIQ_GLUE_H
|
||||
#define __ASM_FIQ_GLUE_H
|
||||
|
||||
struct fiq_glue_handler {
|
||||
void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp);
|
||||
void (*resume)(struct fiq_glue_handler *h);
|
||||
};
|
||||
typedef void (*fiq_return_handler_t)(void);
|
||||
|
||||
int fiq_glue_register_handler(struct fiq_glue_handler *handler);
|
||||
int fiq_glue_set_return_handler(fiq_return_handler_t fiq_return);
|
||||
int fiq_glue_clear_return_handler(fiq_return_handler_t fiq_return);
|
||||
|
||||
#ifdef CONFIG_FIQ_GLUE
|
||||
void fiq_glue_resume(void);
|
||||
#else
|
||||
static inline void fiq_glue_resume(void) {}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <linux/threads.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
#define NR_IPI 8
|
||||
#define NR_IPI 9
|
||||
|
||||
typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
|
||||
@@ -66,6 +66,7 @@
|
||||
#define L2X0_STNDBY_MODE_EN (1 << 0)
|
||||
|
||||
/* Registers shifts and masks */
|
||||
#define L2X0_CACHE_ID_REV_MASK (0x3f)
|
||||
#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
|
||||
#define L2X0_CACHE_ID_PART_L210 (1 << 6)
|
||||
#define L2X0_CACHE_ID_PART_L310 (3 << 6)
|
||||
@@ -106,6 +107,8 @@
|
||||
|
||||
#define L2X0_WAY_SIZE_SHIFT 3
|
||||
|
||||
#define REV_PL310_R2P0 4
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask);
|
||||
#if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF)
|
||||
|
||||
@@ -17,15 +17,23 @@
|
||||
#define TRACER_ACCESSED_BIT 0
|
||||
#define TRACER_RUNNING_BIT 1
|
||||
#define TRACER_CYCLE_ACC_BIT 2
|
||||
#define TRACER_TRACE_DATA_BIT 3
|
||||
#define TRACER_TIMESTAMP_BIT 4
|
||||
#define TRACER_BRANCHOUTPUT_BIT 5
|
||||
#define TRACER_RETURN_STACK_BIT 6
|
||||
#define TRACER_ACCESSED BIT(TRACER_ACCESSED_BIT)
|
||||
#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT)
|
||||
#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT)
|
||||
#define TRACER_TRACE_DATA BIT(TRACER_TRACE_DATA_BIT)
|
||||
#define TRACER_TIMESTAMP BIT(TRACER_TIMESTAMP_BIT)
|
||||
#define TRACER_BRANCHOUTPUT BIT(TRACER_BRANCHOUTPUT_BIT)
|
||||
#define TRACER_RETURN_STACK BIT(TRACER_RETURN_STACK_BIT)
|
||||
|
||||
#define TRACER_TIMEOUT 10000
|
||||
|
||||
#define etm_writel(t, v, x) \
|
||||
(writel_relaxed((v), (t)->etm_regs + (x)))
|
||||
#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))
|
||||
#define etm_writel(t, id, v, x) \
|
||||
(writel_relaxed((v), (t)->etm_regs[(id)] + (x)))
|
||||
#define etm_readl(t, id, x) (readl_relaxed((t)->etm_regs[(id)] + (x)))
|
||||
|
||||
/* CoreSight Management Registers */
|
||||
#define CSMR_LOCKACCESS 0xfb0
|
||||
@@ -43,7 +51,7 @@
|
||||
#define ETMCTRL_POWERDOWN 1
|
||||
#define ETMCTRL_PROGRAM (1 << 10)
|
||||
#define ETMCTRL_PORTSEL (1 << 11)
|
||||
#define ETMCTRL_DO_CONTEXTID (3 << 14)
|
||||
#define ETMCTRL_CONTEXTIDSIZE(x) (((x) & 3) << 14)
|
||||
#define ETMCTRL_PORTMASK1 (7 << 4)
|
||||
#define ETMCTRL_PORTMASK2 (1 << 21)
|
||||
#define ETMCTRL_PORTMASK (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2)
|
||||
@@ -55,9 +63,12 @@
|
||||
#define ETMCTRL_DATA_DO_BOTH (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR)
|
||||
#define ETMCTRL_BRANCH_OUTPUT (1 << 8)
|
||||
#define ETMCTRL_CYCLEACCURATE (1 << 12)
|
||||
#define ETMCTRL_TIMESTAMP_EN (1 << 28)
|
||||
#define ETMCTRL_RETURN_STACK_EN (1 << 29)
|
||||
|
||||
/* ETM configuration code register */
|
||||
#define ETMR_CONFCODE (0x04)
|
||||
#define ETMCCR_ETMIDR_PRESENT BIT(31)
|
||||
|
||||
/* ETM trace start/stop resource control register */
|
||||
#define ETMR_TRACESSCTRL (0x18)
|
||||
@@ -113,10 +124,25 @@
|
||||
#define ETMR_TRACEENCTRL 0x24
|
||||
#define ETMTE_INCLEXCL BIT(24)
|
||||
#define ETMR_TRACEENEVT 0x20
|
||||
#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \
|
||||
ETMCTRL_DATA_DO_ADDR | \
|
||||
ETMCTRL_BRANCH_OUTPUT | \
|
||||
ETMCTRL_DO_CONTEXTID)
|
||||
|
||||
#define ETMR_VIEWDATAEVT 0x30
|
||||
#define ETMR_VIEWDATACTRL1 0x34
|
||||
#define ETMR_VIEWDATACTRL2 0x38
|
||||
#define ETMR_VIEWDATACTRL3 0x3c
|
||||
#define ETMVDC3_EXCLONLY BIT(16)
|
||||
|
||||
#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT)
|
||||
|
||||
#define ETMR_ID 0x1e4
|
||||
#define ETMIDR_VERSION(x) (((x) >> 4) & 0xff)
|
||||
#define ETMIDR_VERSION_3_1 0x21
|
||||
#define ETMIDR_VERSION_PFT_1_0 0x30
|
||||
|
||||
#define ETMR_CCE 0x1e8
|
||||
#define ETMCCER_RETURN_STACK_IMPLEMENTED BIT(23)
|
||||
#define ETMCCER_TIMESTAMPING_IMPLEMENTED BIT(22)
|
||||
|
||||
#define ETMR_TRACEIDR 0x200
|
||||
|
||||
/* ETM management registers, "ETM Architecture", 3.5.24 */
|
||||
#define ETMMR_OSLAR 0x300
|
||||
@@ -140,14 +166,16 @@
|
||||
#define ETBFF_TRIGIN BIT(8)
|
||||
#define ETBFF_TRIGEVT BIT(9)
|
||||
#define ETBFF_TRIGFL BIT(10)
|
||||
#define ETBFF_STOPFL BIT(12)
|
||||
|
||||
#define etb_writel(t, v, x) \
|
||||
(writel_relaxed((v), (t)->etb_regs + (x)))
|
||||
#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
|
||||
|
||||
#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
|
||||
#define etm_unlock(t) \
|
||||
do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
|
||||
#define etm_lock(t, id) \
|
||||
do { etm_writel((t), (id), 0, CSMR_LOCKACCESS); } while (0)
|
||||
#define etm_unlock(t, id) \
|
||||
do { etm_writel((t), (id), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
|
||||
|
||||
#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
|
||||
#define etb_unlock(t) \
|
||||
|
||||
@@ -35,6 +35,9 @@ extern void (*handle_arch_irq)(struct pt_regs *);
|
||||
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
|
||||
#endif
|
||||
|
||||
void arch_trigger_all_cpu_backtrace(void);
|
||||
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
28
arch/arm/include/asm/mach/mmc.h
Normal file
28
arch/arm/include/asm/mach/mmc.h
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
* arch/arm/include/asm/mach/mmc.h
|
||||
*/
|
||||
#ifndef ASMARM_MACH_MMC_H
|
||||
#define ASMARM_MACH_MMC_H
|
||||
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/card.h>
|
||||
#include <linux/mmc/sdio_func.h>
|
||||
|
||||
struct embedded_sdio_data {
|
||||
struct sdio_cis cis;
|
||||
struct sdio_cccr cccr;
|
||||
struct sdio_embedded_func *funcs;
|
||||
int num_funcs;
|
||||
};
|
||||
|
||||
struct mmc_platform_data {
|
||||
unsigned int ocr_mask; /* available voltages */
|
||||
int built_in; /* built-in device flag */
|
||||
int card_present; /* card detect state */
|
||||
u32 (*translate_vdd)(struct device *, unsigned int);
|
||||
unsigned int (*status)(struct device *);
|
||||
struct embedded_sdio_data *embedded_sdio;
|
||||
int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -86,6 +86,8 @@ extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
|
||||
|
||||
extern int register_ipi_completion(struct completion *completion, int cpu);
|
||||
|
||||
extern void smp_send_all_cpu_backtrace(void);
|
||||
|
||||
struct smp_operations {
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sysrq.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/clk.h>
|
||||
@@ -37,26 +38,37 @@ MODULE_AUTHOR("Alexander Shishkin");
|
||||
struct tracectx {
|
||||
unsigned int etb_bufsz;
|
||||
void __iomem *etb_regs;
|
||||
void __iomem *etm_regs;
|
||||
void __iomem **etm_regs;
|
||||
int etm_regs_count;
|
||||
unsigned long flags;
|
||||
int ncmppairs;
|
||||
int etm_portsz;
|
||||
int etm_contextid_size;
|
||||
u32 etb_fc;
|
||||
unsigned long range_start;
|
||||
unsigned long range_end;
|
||||
unsigned long data_range_start;
|
||||
unsigned long data_range_end;
|
||||
bool dump_initial_etb;
|
||||
struct device *dev;
|
||||
struct clk *emu_clk;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
static struct tracectx tracer;
|
||||
static struct tracectx tracer = {
|
||||
.range_start = (unsigned long)_stext,
|
||||
.range_end = (unsigned long)_etext,
|
||||
};
|
||||
|
||||
static inline bool trace_isrunning(struct tracectx *t)
|
||||
{
|
||||
return !!(t->flags & TRACER_RUNNING);
|
||||
}
|
||||
|
||||
static int etm_setup_address_range(struct tracectx *t, int n,
|
||||
static int etm_setup_address_range(struct tracectx *t, int id, int n,
|
||||
unsigned long start, unsigned long end, int exclude, int data)
|
||||
{
|
||||
u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \
|
||||
u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_IGNSECURITY |
|
||||
ETMAAT_NOVALCMP;
|
||||
|
||||
if (n < 1 || n > t->ncmppairs)
|
||||
@@ -72,95 +84,185 @@ static int etm_setup_address_range(struct tracectx *t, int n,
|
||||
flags |= ETMAAT_IEXEC;
|
||||
|
||||
/* first comparator for the range */
|
||||
etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2));
|
||||
etm_writel(t, start, ETMR_COMP_VAL(n * 2));
|
||||
etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2));
|
||||
etm_writel(t, id, start, ETMR_COMP_VAL(n * 2));
|
||||
|
||||
/* second comparator is right next to it */
|
||||
etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
|
||||
etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1));
|
||||
etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
|
||||
etm_writel(t, id, end, ETMR_COMP_VAL(n * 2 + 1));
|
||||
|
||||
flags = exclude ? ETMTE_INCLEXCL : 0;
|
||||
etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL);
|
||||
if (data) {
|
||||
flags = exclude ? ETMVDC3_EXCLONLY : 0;
|
||||
if (exclude)
|
||||
n += 8;
|
||||
etm_writel(t, id, flags | BIT(n), ETMR_VIEWDATACTRL3);
|
||||
} else {
|
||||
flags = exclude ? ETMTE_INCLEXCL : 0;
|
||||
etm_writel(t, id, flags | (1 << n), ETMR_TRACEENCTRL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int trace_start_etm(struct tracectx *t, int id)
|
||||
{
|
||||
u32 v;
|
||||
unsigned long timeout = TRACER_TIMEOUT;
|
||||
|
||||
v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
|
||||
v |= ETMCTRL_CONTEXTIDSIZE(t->etm_contextid_size);
|
||||
|
||||
if (t->flags & TRACER_CYCLE_ACC)
|
||||
v |= ETMCTRL_CYCLEACCURATE;
|
||||
|
||||
if (t->flags & TRACER_BRANCHOUTPUT)
|
||||
v |= ETMCTRL_BRANCH_OUTPUT;
|
||||
|
||||
if (t->flags & TRACER_TRACE_DATA)
|
||||
v |= ETMCTRL_DATA_DO_ADDR;
|
||||
|
||||
if (t->flags & TRACER_TIMESTAMP)
|
||||
v |= ETMCTRL_TIMESTAMP_EN;
|
||||
|
||||
if (t->flags & TRACER_RETURN_STACK)
|
||||
v |= ETMCTRL_RETURN_STACK_EN;
|
||||
|
||||
etm_unlock(t, id);
|
||||
|
||||
etm_writel(t, id, v, ETMR_CTRL);
|
||||
|
||||
while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
|
||||
;
|
||||
if (!timeout) {
|
||||
dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
|
||||
etm_lock(t, id);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (t->range_start || t->range_end)
|
||||
etm_setup_address_range(t, id, 1,
|
||||
t->range_start, t->range_end, 0, 0);
|
||||
else
|
||||
etm_writel(t, id, ETMTE_INCLEXCL, ETMR_TRACEENCTRL);
|
||||
|
||||
etm_writel(t, id, 0, ETMR_TRACEENCTRL2);
|
||||
etm_writel(t, id, 0, ETMR_TRACESSCTRL);
|
||||
etm_writel(t, id, 0x6f, ETMR_TRACEENEVT);
|
||||
|
||||
etm_writel(t, id, 0, ETMR_VIEWDATACTRL1);
|
||||
etm_writel(t, id, 0, ETMR_VIEWDATACTRL2);
|
||||
|
||||
if (t->data_range_start || t->data_range_end)
|
||||
etm_setup_address_range(t, id, 2, t->data_range_start,
|
||||
t->data_range_end, 0, 1);
|
||||
else
|
||||
etm_writel(t, id, ETMVDC3_EXCLONLY, ETMR_VIEWDATACTRL3);
|
||||
|
||||
etm_writel(t, id, 0x6f, ETMR_VIEWDATAEVT);
|
||||
|
||||
v &= ~ETMCTRL_PROGRAM;
|
||||
v |= ETMCTRL_PORTSEL;
|
||||
|
||||
etm_writel(t, id, v, ETMR_CTRL);
|
||||
|
||||
timeout = TRACER_TIMEOUT;
|
||||
while (etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
|
||||
;
|
||||
if (!timeout) {
|
||||
dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
|
||||
etm_lock(t, id);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
etm_lock(t, id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int trace_start(struct tracectx *t)
|
||||
{
|
||||
u32 v;
|
||||
unsigned long timeout = TRACER_TIMEOUT;
|
||||
int ret;
|
||||
int id;
|
||||
u32 etb_fc = t->etb_fc;
|
||||
|
||||
etb_unlock(t);
|
||||
|
||||
etb_writel(t, 0, ETBR_FORMATTERCTRL);
|
||||
t->dump_initial_etb = false;
|
||||
etb_writel(t, 0, ETBR_WRITEADDR);
|
||||
etb_writel(t, etb_fc, ETBR_FORMATTERCTRL);
|
||||
etb_writel(t, 1, ETBR_CTRL);
|
||||
|
||||
etb_lock(t);
|
||||
|
||||
/* configure etm */
|
||||
v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
|
||||
|
||||
if (t->flags & TRACER_CYCLE_ACC)
|
||||
v |= ETMCTRL_CYCLEACCURATE;
|
||||
|
||||
etm_unlock(t);
|
||||
|
||||
etm_writel(t, v, ETMR_CTRL);
|
||||
|
||||
while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
|
||||
;
|
||||
if (!timeout) {
|
||||
dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
|
||||
etm_lock(t);
|
||||
return -EFAULT;
|
||||
/* configure etm(s) */
|
||||
for (id = 0; id < t->etm_regs_count; id++) {
|
||||
ret = trace_start_etm(t, id);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
etm_setup_address_range(t, 1, (unsigned long)_stext,
|
||||
(unsigned long)_etext, 0, 0);
|
||||
etm_writel(t, 0, ETMR_TRACEENCTRL2);
|
||||
etm_writel(t, 0, ETMR_TRACESSCTRL);
|
||||
etm_writel(t, 0x6f, ETMR_TRACEENEVT);
|
||||
|
||||
v &= ~ETMCTRL_PROGRAM;
|
||||
v |= ETMCTRL_PORTSEL;
|
||||
|
||||
etm_writel(t, v, ETMR_CTRL);
|
||||
|
||||
timeout = TRACER_TIMEOUT;
|
||||
while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
|
||||
;
|
||||
if (!timeout) {
|
||||
dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
|
||||
etm_lock(t);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
etm_lock(t);
|
||||
|
||||
t->flags |= TRACER_RUNNING;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int trace_stop(struct tracectx *t)
|
||||
static int trace_stop_etm(struct tracectx *t, int id)
|
||||
{
|
||||
unsigned long timeout = TRACER_TIMEOUT;
|
||||
|
||||
etm_unlock(t);
|
||||
etm_unlock(t, id);
|
||||
|
||||
etm_writel(t, 0x440, ETMR_CTRL);
|
||||
while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
|
||||
etm_writel(t, id, 0x440, ETMR_CTRL);
|
||||
while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
|
||||
;
|
||||
if (!timeout) {
|
||||
dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
|
||||
etm_lock(t);
|
||||
dev_err(t->dev,
|
||||
"etm%d: Waiting for progbit to assert timed out\n",
|
||||
id);
|
||||
etm_lock(t, id);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
etm_lock(t);
|
||||
etm_lock(t, id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int trace_power_down_etm(struct tracectx *t, int id)
|
||||
{
|
||||
unsigned long timeout = TRACER_TIMEOUT;
|
||||
etm_unlock(t, id);
|
||||
while (!(etm_readl(t, id, ETMR_STATUS) & ETMST_PROGBIT) && --timeout)
|
||||
;
|
||||
if (!timeout) {
|
||||
dev_err(t->dev, "etm%d: Waiting for status progbit to assert timed out\n",
|
||||
id);
|
||||
etm_lock(t, id);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
etm_writel(t, id, 0x441, ETMR_CTRL);
|
||||
|
||||
etm_lock(t, id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int trace_stop(struct tracectx *t)
|
||||
{
|
||||
int id;
|
||||
unsigned long timeout = TRACER_TIMEOUT;
|
||||
u32 etb_fc = t->etb_fc;
|
||||
|
||||
for (id = 0; id < t->etm_regs_count; id++)
|
||||
trace_stop_etm(t, id);
|
||||
|
||||
for (id = 0; id < t->etm_regs_count; id++)
|
||||
trace_power_down_etm(t, id);
|
||||
|
||||
etb_unlock(t);
|
||||
etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
|
||||
if (etb_fc) {
|
||||
etb_fc |= ETBFF_STOPFL;
|
||||
etb_writel(t, t->etb_fc, ETBR_FORMATTERCTRL);
|
||||
}
|
||||
etb_writel(t, etb_fc | ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
|
||||
|
||||
timeout = TRACER_TIMEOUT;
|
||||
while (etb_readl(t, ETBR_FORMATTERCTRL) &
|
||||
@@ -185,24 +287,15 @@ static int trace_stop(struct tracectx *t)
|
||||
static int etb_getdatalen(struct tracectx *t)
|
||||
{
|
||||
u32 v;
|
||||
int rp, wp;
|
||||
int wp;
|
||||
|
||||
v = etb_readl(t, ETBR_STATUS);
|
||||
|
||||
if (v & 1)
|
||||
return t->etb_bufsz;
|
||||
|
||||
rp = etb_readl(t, ETBR_READADDR);
|
||||
wp = etb_readl(t, ETBR_WRITEADDR);
|
||||
|
||||
if (rp > wp) {
|
||||
etb_writel(t, 0, ETBR_READADDR);
|
||||
etb_writel(t, 0, ETBR_WRITEADDR);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return wp - rp;
|
||||
return wp;
|
||||
}
|
||||
|
||||
/* sysrq+v will always stop the running trace and leave it at that */
|
||||
@@ -235,21 +328,18 @@ static void etm_dump(void)
|
||||
printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
|
||||
printk(KERN_INFO "\n--- ETB buffer end ---\n");
|
||||
|
||||
/* deassert the overflow bit */
|
||||
etb_writel(t, 1, ETBR_CTRL);
|
||||
etb_writel(t, 0, ETBR_CTRL);
|
||||
|
||||
etb_writel(t, 0, ETBR_TRIGGERCOUNT);
|
||||
etb_writel(t, 0, ETBR_READADDR);
|
||||
etb_writel(t, 0, ETBR_WRITEADDR);
|
||||
|
||||
etb_lock(t);
|
||||
}
|
||||
|
||||
static void sysrq_etm_dump(int key)
|
||||
{
|
||||
if (!mutex_trylock(&tracer.mutex)) {
|
||||
printk(KERN_INFO "Tracing hardware busy\n");
|
||||
return;
|
||||
}
|
||||
dev_dbg(tracer.dev, "Dumping ETB buffer\n");
|
||||
etm_dump();
|
||||
mutex_unlock(&tracer.mutex);
|
||||
}
|
||||
|
||||
static struct sysrq_key_op sysrq_etm_op = {
|
||||
@@ -276,6 +366,10 @@ static ssize_t etb_read(struct file *file, char __user *data,
|
||||
struct tracectx *t = file->private_data;
|
||||
u32 first = 0;
|
||||
u32 *buf;
|
||||
int wpos;
|
||||
int skip;
|
||||
long wlength;
|
||||
loff_t pos = *ppos;
|
||||
|
||||
mutex_lock(&t->mutex);
|
||||
|
||||
@@ -287,31 +381,39 @@ static ssize_t etb_read(struct file *file, char __user *data,
|
||||
etb_unlock(t);
|
||||
|
||||
total = etb_getdatalen(t);
|
||||
if (total == 0 && t->dump_initial_etb)
|
||||
total = t->etb_bufsz;
|
||||
if (total == t->etb_bufsz)
|
||||
first = etb_readl(t, ETBR_WRITEADDR);
|
||||
|
||||
if (pos > total * 4) {
|
||||
skip = 0;
|
||||
wpos = total;
|
||||
} else {
|
||||
skip = (int)pos % 4;
|
||||
wpos = (int)pos / 4;
|
||||
}
|
||||
total -= wpos;
|
||||
first = (first + wpos) % t->etb_bufsz;
|
||||
|
||||
etb_writel(t, first, ETBR_READADDR);
|
||||
|
||||
length = min(total * 4, (int)len);
|
||||
buf = vmalloc(length);
|
||||
wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
|
||||
length = min(total * 4 - skip, (int)len);
|
||||
buf = vmalloc(wlength * 4);
|
||||
|
||||
dev_dbg(t->dev, "ETB buffer length: %d\n", total);
|
||||
dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
|
||||
length, pos, wlength, first);
|
||||
dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
|
||||
dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
|
||||
for (i = 0; i < length / 4; i++)
|
||||
for (i = 0; i < wlength; i++)
|
||||
buf[i] = etb_readl(t, ETBR_READMEM);
|
||||
|
||||
/* the only way to deassert overflow bit in ETB status is this */
|
||||
etb_writel(t, 1, ETBR_CTRL);
|
||||
etb_writel(t, 0, ETBR_CTRL);
|
||||
|
||||
etb_writel(t, 0, ETBR_WRITEADDR);
|
||||
etb_writel(t, 0, ETBR_READADDR);
|
||||
etb_writel(t, 0, ETBR_TRIGGERCOUNT);
|
||||
|
||||
etb_lock(t);
|
||||
|
||||
length -= copy_to_user(data, buf, length);
|
||||
length -= copy_to_user(data, (u8 *)buf + skip, length);
|
||||
vfree(buf);
|
||||
*ppos = pos + length;
|
||||
|
||||
out:
|
||||
mutex_unlock(&t->mutex);
|
||||
@@ -348,28 +450,17 @@ static int etb_probe(struct amba_device *dev, const struct amba_id *id)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&t->mutex);
|
||||
t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
|
||||
if (!t->etb_regs) {
|
||||
ret = -ENOMEM;
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
t->dev = &dev->dev;
|
||||
t->dump_initial_etb = true;
|
||||
amba_set_drvdata(dev, t);
|
||||
|
||||
etb_miscdev.parent = &dev->dev;
|
||||
|
||||
ret = misc_register(&etb_miscdev);
|
||||
if (ret)
|
||||
goto out_unmap;
|
||||
|
||||
t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
|
||||
if (IS_ERR(t->emu_clk)) {
|
||||
dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
clk_enable(t->emu_clk);
|
||||
|
||||
etb_unlock(t);
|
||||
t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
|
||||
dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
|
||||
@@ -378,6 +469,20 @@ static int etb_probe(struct amba_device *dev, const struct amba_id *id)
|
||||
etb_writel(t, 0, ETBR_CTRL);
|
||||
etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
|
||||
etb_lock(t);
|
||||
mutex_unlock(&t->mutex);
|
||||
|
||||
etb_miscdev.parent = &dev->dev;
|
||||
|
||||
ret = misc_register(&etb_miscdev);
|
||||
if (ret)
|
||||
goto out_unmap;
|
||||
|
||||
/* Get optional clock. Currently used to select clock source on omap3 */
|
||||
t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
|
||||
if (IS_ERR(t->emu_clk))
|
||||
dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
|
||||
else
|
||||
clk_enable(t->emu_clk);
|
||||
|
||||
dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
|
||||
|
||||
@@ -385,9 +490,12 @@ out:
|
||||
return ret;
|
||||
|
||||
out_unmap:
|
||||
mutex_lock(&t->mutex);
|
||||
iounmap(t->etb_regs);
|
||||
t->etb_regs = NULL;
|
||||
|
||||
out_release:
|
||||
mutex_unlock(&t->mutex);
|
||||
amba_release_regions(dev);
|
||||
|
||||
return ret;
|
||||
@@ -400,8 +508,10 @@ static int etb_remove(struct amba_device *dev)
|
||||
iounmap(t->etb_regs);
|
||||
t->etb_regs = NULL;
|
||||
|
||||
clk_disable(t->emu_clk);
|
||||
clk_put(t->emu_clk);
|
||||
if (!IS_ERR(t->emu_clk)) {
|
||||
clk_disable(t->emu_clk);
|
||||
clk_put(t->emu_clk);
|
||||
}
|
||||
|
||||
amba_release_regions(dev);
|
||||
|
||||
@@ -445,7 +555,10 @@ static ssize_t trace_running_store(struct kobject *kobj,
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&tracer.mutex);
|
||||
ret = value ? trace_start(&tracer) : trace_stop(&tracer);
|
||||
if (!tracer.etb_regs)
|
||||
ret = -ENODEV;
|
||||
else
|
||||
ret = value ? trace_start(&tracer) : trace_stop(&tracer);
|
||||
mutex_unlock(&tracer.mutex);
|
||||
|
||||
return ret ? : n;
|
||||
@@ -460,36 +573,50 @@ static ssize_t trace_info_show(struct kobject *kobj,
|
||||
{
|
||||
u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
|
||||
int datalen;
|
||||
int id;
|
||||
int ret;
|
||||
|
||||
etb_unlock(&tracer);
|
||||
datalen = etb_getdatalen(&tracer);
|
||||
etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
|
||||
etb_ra = etb_readl(&tracer, ETBR_READADDR);
|
||||
etb_st = etb_readl(&tracer, ETBR_STATUS);
|
||||
etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
|
||||
etb_lock(&tracer);
|
||||
mutex_lock(&tracer.mutex);
|
||||
if (tracer.etb_regs) {
|
||||
etb_unlock(&tracer);
|
||||
datalen = etb_getdatalen(&tracer);
|
||||
etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
|
||||
etb_ra = etb_readl(&tracer, ETBR_READADDR);
|
||||
etb_st = etb_readl(&tracer, ETBR_STATUS);
|
||||
etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
|
||||
etb_lock(&tracer);
|
||||
} else {
|
||||
etb_wa = etb_ra = etb_st = etb_fc = ~0;
|
||||
datalen = -1;
|
||||
}
|
||||
|
||||
etm_unlock(&tracer);
|
||||
etm_ctrl = etm_readl(&tracer, ETMR_CTRL);
|
||||
etm_st = etm_readl(&tracer, ETMR_STATUS);
|
||||
etm_lock(&tracer);
|
||||
|
||||
return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
|
||||
ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
|
||||
"ETBR_WRITEADDR:\t%08x\n"
|
||||
"ETBR_READADDR:\t%08x\n"
|
||||
"ETBR_STATUS:\t%08x\n"
|
||||
"ETBR_FORMATTERCTRL:\t%08x\n"
|
||||
"ETMR_CTRL:\t%08x\n"
|
||||
"ETMR_STATUS:\t%08x\n",
|
||||
"ETBR_FORMATTERCTRL:\t%08x\n",
|
||||
datalen,
|
||||
tracer.ncmppairs,
|
||||
etb_wa,
|
||||
etb_ra,
|
||||
etb_st,
|
||||
etb_fc,
|
||||
etb_fc
|
||||
);
|
||||
|
||||
for (id = 0; id < tracer.etm_regs_count; id++) {
|
||||
etm_unlock(&tracer, id);
|
||||
etm_ctrl = etm_readl(&tracer, id, ETMR_CTRL);
|
||||
etm_st = etm_readl(&tracer, id, ETMR_STATUS);
|
||||
etm_lock(&tracer, id);
|
||||
ret += sprintf(buf + ret, "ETMR_CTRL:\t%08x\n"
|
||||
"ETMR_STATUS:\t%08x\n",
|
||||
etm_ctrl,
|
||||
etm_st
|
||||
);
|
||||
}
|
||||
mutex_unlock(&tracer.mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct kobj_attribute trace_info_attr =
|
||||
@@ -528,42 +655,260 @@ static ssize_t trace_mode_store(struct kobject *kobj,
|
||||
static struct kobj_attribute trace_mode_attr =
|
||||
__ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
|
||||
|
||||
static ssize_t trace_contextid_size_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
/* 0: No context id tracing, 1: One byte, 2: Two bytes, 3: Four bytes */
|
||||
return sprintf(buf, "%d\n", (1 << tracer.etm_contextid_size) >> 1);
|
||||
}
|
||||
|
||||
static ssize_t trace_contextid_size_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t n)
|
||||
{
|
||||
unsigned int contextid_size;
|
||||
|
||||
if (sscanf(buf, "%u", &contextid_size) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (contextid_size == 3 || contextid_size > 4)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&tracer.mutex);
|
||||
tracer.etm_contextid_size = fls(contextid_size);
|
||||
mutex_unlock(&tracer.mutex);
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static struct kobj_attribute trace_contextid_size_attr =
|
||||
__ATTR(trace_contextid_size, 0644,
|
||||
trace_contextid_size_show, trace_contextid_size_store);
|
||||
|
||||
static ssize_t trace_branch_output_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_BRANCHOUTPUT));
|
||||
}
|
||||
|
||||
static ssize_t trace_branch_output_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t n)
|
||||
{
|
||||
unsigned int branch_output;
|
||||
|
||||
if (sscanf(buf, "%u", &branch_output) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&tracer.mutex);
|
||||
if (branch_output) {
|
||||
tracer.flags |= TRACER_BRANCHOUTPUT;
|
||||
/* Branch broadcasting is incompatible with the return stack */
|
||||
tracer.flags &= ~TRACER_RETURN_STACK;
|
||||
} else {
|
||||
tracer.flags &= ~TRACER_BRANCHOUTPUT;
|
||||
}
|
||||
mutex_unlock(&tracer.mutex);
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static struct kobj_attribute trace_branch_output_attr =
|
||||
__ATTR(trace_branch_output, 0644,
|
||||
trace_branch_output_show, trace_branch_output_store);
|
||||
|
||||
static ssize_t trace_return_stack_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_RETURN_STACK));
|
||||
}
|
||||
|
||||
static ssize_t trace_return_stack_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t n)
|
||||
{
|
||||
unsigned int return_stack;
|
||||
|
||||
if (sscanf(buf, "%u", &return_stack) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&tracer.mutex);
|
||||
if (return_stack) {
|
||||
tracer.flags |= TRACER_RETURN_STACK;
|
||||
/* Return stack is incompatible with branch broadcasting */
|
||||
tracer.flags &= ~TRACER_BRANCHOUTPUT;
|
||||
} else {
|
||||
tracer.flags &= ~TRACER_RETURN_STACK;
|
||||
}
|
||||
mutex_unlock(&tracer.mutex);
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static struct kobj_attribute trace_return_stack_attr =
|
||||
__ATTR(trace_return_stack, 0644,
|
||||
trace_return_stack_show, trace_return_stack_store);
|
||||
|
||||
static ssize_t trace_timestamp_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_TIMESTAMP));
|
||||
}
|
||||
|
||||
static ssize_t trace_timestamp_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t n)
|
||||
{
|
||||
unsigned int timestamp;
|
||||
|
||||
if (sscanf(buf, "%u", ×tamp) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&tracer.mutex);
|
||||
if (timestamp)
|
||||
tracer.flags |= TRACER_TIMESTAMP;
|
||||
else
|
||||
tracer.flags &= ~TRACER_TIMESTAMP;
|
||||
mutex_unlock(&tracer.mutex);
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static struct kobj_attribute trace_timestamp_attr =
|
||||
__ATTR(trace_timestamp, 0644,
|
||||
trace_timestamp_show, trace_timestamp_store);
|
||||
|
||||
static ssize_t trace_range_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%08lx %08lx\n",
|
||||
tracer.range_start, tracer.range_end);
|
||||
}
|
||||
|
||||
static ssize_t trace_range_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t n)
|
||||
{
|
||||
unsigned long range_start, range_end;
|
||||
|
||||
if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&tracer.mutex);
|
||||
tracer.range_start = range_start;
|
||||
tracer.range_end = range_end;
|
||||
mutex_unlock(&tracer.mutex);
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
|
||||
static struct kobj_attribute trace_range_attr =
|
||||
__ATTR(trace_range, 0644, trace_range_show, trace_range_store);
|
||||
|
||||
static ssize_t trace_data_range_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
unsigned long range_start;
|
||||
u64 range_end;
|
||||
mutex_lock(&tracer.mutex);
|
||||
range_start = tracer.data_range_start;
|
||||
range_end = tracer.data_range_end;
|
||||
if (!range_end && (tracer.flags & TRACER_TRACE_DATA))
|
||||
range_end = 0x100000000ULL;
|
||||
mutex_unlock(&tracer.mutex);
|
||||
return sprintf(buf, "%08lx %08llx\n", range_start, range_end);
|
||||
}
|
||||
|
||||
static ssize_t trace_data_range_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t n)
|
||||
{
|
||||
unsigned long range_start;
|
||||
u64 range_end;
|
||||
|
||||
if (sscanf(buf, "%lx %llx", &range_start, &range_end) != 2)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&tracer.mutex);
|
||||
tracer.data_range_start = range_start;
|
||||
tracer.data_range_end = (unsigned long)range_end;
|
||||
if (range_end)
|
||||
tracer.flags |= TRACER_TRACE_DATA;
|
||||
else
|
||||
tracer.flags &= ~TRACER_TRACE_DATA;
|
||||
mutex_unlock(&tracer.mutex);
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
|
||||
static struct kobj_attribute trace_data_range_attr =
|
||||
__ATTR(trace_data_range, 0644,
|
||||
trace_data_range_show, trace_data_range_store);
|
||||
|
||||
static int etm_probe(struct amba_device *dev, const struct amba_id *id)
|
||||
{
|
||||
struct tracectx *t = &tracer;
|
||||
int ret = 0;
|
||||
void __iomem **new_regs;
|
||||
int new_count;
|
||||
u32 etmccr;
|
||||
u32 etmidr;
|
||||
u32 etmccer = 0;
|
||||
u8 etm_version = 0;
|
||||
|
||||
if (t->etm_regs) {
|
||||
dev_dbg(&dev->dev, "ETM already initialized\n");
|
||||
ret = -EBUSY;
|
||||
mutex_lock(&t->mutex);
|
||||
new_count = t->etm_regs_count + 1;
|
||||
new_regs = krealloc(t->etm_regs,
|
||||
sizeof(t->etm_regs[0]) * new_count, GFP_KERNEL);
|
||||
|
||||
if (!new_regs) {
|
||||
dev_dbg(&dev->dev, "Failed to allocate ETM register array\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
t->etm_regs = new_regs;
|
||||
|
||||
ret = amba_request_regions(dev, NULL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
|
||||
if (!t->etm_regs) {
|
||||
t->etm_regs[t->etm_regs_count] =
|
||||
ioremap_nocache(dev->res.start, resource_size(&dev->res));
|
||||
if (!t->etm_regs[t->etm_regs_count]) {
|
||||
ret = -ENOMEM;
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
amba_set_drvdata(dev, t);
|
||||
amba_set_drvdata(dev, t->etm_regs[t->etm_regs_count]);
|
||||
|
||||
mutex_init(&t->mutex);
|
||||
t->dev = &dev->dev;
|
||||
t->flags = TRACER_CYCLE_ACC;
|
||||
t->flags = TRACER_CYCLE_ACC | TRACER_TRACE_DATA | TRACER_BRANCHOUTPUT;
|
||||
t->etm_portsz = 1;
|
||||
t->etm_contextid_size = 3;
|
||||
|
||||
etm_unlock(t);
|
||||
(void)etm_readl(t, ETMMR_PDSR);
|
||||
etm_unlock(t, t->etm_regs_count);
|
||||
(void)etm_readl(t, t->etm_regs_count, ETMMR_PDSR);
|
||||
/* dummy first read */
|
||||
(void)etm_readl(&tracer, ETMMR_OSSRR);
|
||||
(void)etm_readl(&tracer, t->etm_regs_count, ETMMR_OSSRR);
|
||||
|
||||
t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf;
|
||||
etm_writel(t, 0x440, ETMR_CTRL);
|
||||
etm_lock(t);
|
||||
etmccr = etm_readl(t, t->etm_regs_count, ETMR_CONFCODE);
|
||||
t->ncmppairs = etmccr & 0xf;
|
||||
if (etmccr & ETMCCR_ETMIDR_PRESENT) {
|
||||
etmidr = etm_readl(t, t->etm_regs_count, ETMR_ID);
|
||||
etm_version = ETMIDR_VERSION(etmidr);
|
||||
if (etm_version >= ETMIDR_VERSION_3_1)
|
||||
etmccer = etm_readl(t, t->etm_regs_count, ETMR_CCE);
|
||||
}
|
||||
etm_writel(t, t->etm_regs_count, 0x441, ETMR_CTRL);
|
||||
etm_writel(t, t->etm_regs_count, new_count, ETMR_TRACEIDR);
|
||||
etm_lock(t, t->etm_regs_count);
|
||||
|
||||
ret = sysfs_create_file(&dev->dev.kobj,
|
||||
&trace_running_attr.attr);
|
||||
@@ -579,32 +924,97 @@ static int etm_probe(struct amba_device *dev, const struct amba_id *id)
|
||||
if (ret)
|
||||
dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
|
||||
|
||||
dev_dbg(t->dev, "ETM AMBA driver initialized.\n");
|
||||
ret = sysfs_create_file(&dev->dev.kobj,
|
||||
&trace_contextid_size_attr.attr);
|
||||
if (ret)
|
||||
dev_dbg(&dev->dev,
|
||||
"Failed to create trace_contextid_size in sysfs\n");
|
||||
|
||||
ret = sysfs_create_file(&dev->dev.kobj,
|
||||
&trace_branch_output_attr.attr);
|
||||
if (ret)
|
||||
dev_dbg(&dev->dev,
|
||||
"Failed to create trace_branch_output in sysfs\n");
|
||||
|
||||
if (etmccer & ETMCCER_RETURN_STACK_IMPLEMENTED) {
|
||||
ret = sysfs_create_file(&dev->dev.kobj,
|
||||
&trace_return_stack_attr.attr);
|
||||
if (ret)
|
||||
dev_dbg(&dev->dev,
|
||||
"Failed to create trace_return_stack in sysfs\n");
|
||||
}
|
||||
|
||||
if (etmccer & ETMCCER_TIMESTAMPING_IMPLEMENTED) {
|
||||
ret = sysfs_create_file(&dev->dev.kobj,
|
||||
&trace_timestamp_attr.attr);
|
||||
if (ret)
|
||||
dev_dbg(&dev->dev,
|
||||
"Failed to create trace_timestamp in sysfs\n");
|
||||
}
|
||||
|
||||
ret = sysfs_create_file(&dev->dev.kobj, &trace_range_attr.attr);
|
||||
if (ret)
|
||||
dev_dbg(&dev->dev, "Failed to create trace_range in sysfs\n");
|
||||
|
||||
if (etm_version < ETMIDR_VERSION_PFT_1_0) {
|
||||
ret = sysfs_create_file(&dev->dev.kobj,
|
||||
&trace_data_range_attr.attr);
|
||||
if (ret)
|
||||
dev_dbg(&dev->dev,
|
||||
"Failed to create trace_data_range in sysfs\n");
|
||||
} else {
|
||||
tracer.flags &= ~TRACER_TRACE_DATA;
|
||||
}
|
||||
|
||||
dev_dbg(&dev->dev, "ETM AMBA driver initialized.\n");
|
||||
|
||||
/* Enable formatter if there are multiple trace sources */
|
||||
if (new_count > 1)
|
||||
t->etb_fc = ETBFF_ENFCONT | ETBFF_ENFTC;
|
||||
|
||||
t->etm_regs_count = new_count;
|
||||
|
||||
out:
|
||||
mutex_unlock(&t->mutex);
|
||||
return ret;
|
||||
|
||||
out_unmap:
|
||||
iounmap(t->etm_regs);
|
||||
iounmap(t->etm_regs[t->etm_regs_count]);
|
||||
|
||||
out_release:
|
||||
amba_release_regions(dev);
|
||||
|
||||
mutex_unlock(&t->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int etm_remove(struct amba_device *dev)
|
||||
{
|
||||
struct tracectx *t = amba_get_drvdata(dev);
|
||||
|
||||
iounmap(t->etm_regs);
|
||||
t->etm_regs = NULL;
|
||||
|
||||
amba_release_regions(dev);
|
||||
int i;
|
||||
struct tracectx *t = &tracer;
|
||||
void __iomem *etm_regs = amba_get_drvdata(dev);
|
||||
|
||||
sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
|
||||
sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
|
||||
sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
|
||||
sysfs_remove_file(&dev->dev.kobj, &trace_range_attr.attr);
|
||||
sysfs_remove_file(&dev->dev.kobj, &trace_data_range_attr.attr);
|
||||
|
||||
mutex_lock(&t->mutex);
|
||||
for (i = 0; i < t->etm_regs_count; i++)
|
||||
if (t->etm_regs[i] == etm_regs)
|
||||
break;
|
||||
for (; i < t->etm_regs_count - 1; i++)
|
||||
t->etm_regs[i] = t->etm_regs[i + 1];
|
||||
t->etm_regs_count--;
|
||||
if (!t->etm_regs_count) {
|
||||
kfree(t->etm_regs);
|
||||
t->etm_regs = NULL;
|
||||
}
|
||||
mutex_unlock(&t->mutex);
|
||||
|
||||
iounmap(etm_regs);
|
||||
amba_release_regions(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -614,6 +1024,10 @@ static struct amba_id etm_ids[] = {
|
||||
.id = 0x0003b921,
|
||||
.mask = 0x0007ffff,
|
||||
},
|
||||
{
|
||||
.id = 0x0003b950,
|
||||
.mask = 0x0007ffff,
|
||||
},
|
||||
{ 0, 0 },
|
||||
};
|
||||
|
||||
@@ -631,6 +1045,8 @@ static int __init etm_init(void)
|
||||
{
|
||||
int retval;
|
||||
|
||||
mutex_init(&tracer.mutex);
|
||||
|
||||
retval = amba_driver_register(&etb_driver);
|
||||
if (retval) {
|
||||
printk(KERN_ERR "Failed to register etb\n");
|
||||
|
||||
@@ -144,6 +144,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
|
||||
|
||||
static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr)
|
||||
{
|
||||
if (user_mode(regs))
|
||||
return -1;
|
||||
kgdb_handle_exception(1, SIGTRAP, 0, regs);
|
||||
|
||||
return 0;
|
||||
@@ -151,6 +153,8 @@ static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr)
|
||||
|
||||
static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
|
||||
{
|
||||
if (user_mode(regs))
|
||||
return -1;
|
||||
compiled_break = 1;
|
||||
kgdb_handle_exception(1, SIGTRAP, 0, regs);
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/console.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/idmap.h>
|
||||
@@ -59,9 +60,46 @@ static const char *isa_modes[] = {
|
||||
"ARM" , "Thumb" , "Jazelle", "ThumbEE"
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void arch_trigger_all_cpu_backtrace(void)
|
||||
{
|
||||
smp_send_all_cpu_backtrace();
|
||||
}
|
||||
#else
|
||||
void arch_trigger_all_cpu_backtrace(void)
|
||||
{
|
||||
dump_stack();
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
|
||||
typedef void (*phys_reset_t)(unsigned long);
|
||||
|
||||
#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
|
||||
void arm_machine_flush_console(void)
|
||||
{
|
||||
printk("\n");
|
||||
pr_emerg("Restarting %s\n", linux_banner);
|
||||
if (console_trylock()) {
|
||||
console_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
mdelay(50);
|
||||
|
||||
local_irq_disable();
|
||||
if (!console_trylock())
|
||||
pr_emerg("arm_restart: Console was locked! Busting\n");
|
||||
else
|
||||
pr_emerg("arm_restart: Console was locked!\n");
|
||||
console_unlock();
|
||||
}
|
||||
#else
|
||||
void arm_machine_flush_console(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* A temporary stack to use for CPU reset. This is static so that we
|
||||
* don't clobber it with the identity mapping. When running with this
|
||||
@@ -149,6 +187,7 @@ void arch_cpu_idle_prepare(void)
|
||||
|
||||
void arch_cpu_idle_enter(void)
|
||||
{
|
||||
idle_notifier_call_chain(IDLE_START);
|
||||
ledtrig_cpu(CPU_LED_IDLE_START);
|
||||
#ifdef CONFIG_PL310_ERRATA_769419
|
||||
wmb();
|
||||
@@ -158,6 +197,7 @@ void arch_cpu_idle_enter(void)
|
||||
void arch_cpu_idle_exit(void)
|
||||
{
|
||||
ledtrig_cpu(CPU_LED_IDLE_END);
|
||||
idle_notifier_call_chain(IDLE_END);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
@@ -187,6 +227,16 @@ void arch_cpu_idle(void)
|
||||
*/
|
||||
void machine_shutdown(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Disable preemption so we're guaranteed to
|
||||
* run to power off or reboot and prevent
|
||||
* the possibility of switching to another
|
||||
* thread that might wind up blocking on
|
||||
* one of the stopped CPUs.
|
||||
*/
|
||||
preempt_disable();
|
||||
#endif
|
||||
disable_nonboot_cpus();
|
||||
}
|
||||
|
||||
@@ -235,6 +285,10 @@ void machine_restart(char *cmd)
|
||||
local_irq_disable();
|
||||
smp_send_stop();
|
||||
|
||||
/* Flush the console to make sure all the relevant messages make it
|
||||
* out to the console drivers */
|
||||
arm_machine_flush_console();
|
||||
|
||||
arm_pm_restart(reboot_mode, cmd);
|
||||
|
||||
/* Give a grace period for failure to restart of 1s */
|
||||
@@ -246,6 +300,77 @@ void machine_restart(char *cmd)
|
||||
while (1);
|
||||
}
|
||||
|
||||
/*
|
||||
* dump a block of kernel memory from around the given address
|
||||
*/
|
||||
static void show_data(unsigned long addr, int nbytes, const char *name)
|
||||
{
|
||||
int i, j;
|
||||
int nlines;
|
||||
u32 *p;
|
||||
|
||||
/*
|
||||
* don't attempt to dump non-kernel addresses or
|
||||
* values that are probably just small negative numbers
|
||||
*/
|
||||
if (addr < PAGE_OFFSET || addr > -256UL)
|
||||
return;
|
||||
|
||||
printk("\n%s: %#lx:\n", name, addr);
|
||||
|
||||
/*
|
||||
* round address down to a 32 bit boundary
|
||||
* and always dump a multiple of 32 bytes
|
||||
*/
|
||||
p = (u32 *)(addr & ~(sizeof(u32) - 1));
|
||||
nbytes += (addr & (sizeof(u32) - 1));
|
||||
nlines = (nbytes + 31) / 32;
|
||||
|
||||
|
||||
for (i = 0; i < nlines; i++) {
|
||||
/*
|
||||
* just display low 16 bits of address to keep
|
||||
* each line of the dump < 80 characters
|
||||
*/
|
||||
printk("%04lx ", (unsigned long)p & 0xffff);
|
||||
for (j = 0; j < 8; j++) {
|
||||
u32 data;
|
||||
if (probe_kernel_address(p, data)) {
|
||||
printk(" ********");
|
||||
} else {
|
||||
printk(" %08x", data);
|
||||
}
|
||||
++p;
|
||||
}
|
||||
printk("\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void show_extra_register_data(struct pt_regs *regs, int nbytes)
|
||||
{
|
||||
mm_segment_t fs;
|
||||
|
||||
fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
|
||||
show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
|
||||
show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
|
||||
show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
|
||||
show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
|
||||
show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
|
||||
show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
|
||||
show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
|
||||
show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
|
||||
show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
|
||||
show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
|
||||
show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
|
||||
show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
|
||||
show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
|
||||
show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
|
||||
show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
|
||||
set_fs(fs);
|
||||
}
|
||||
|
||||
void __show_regs(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long flags;
|
||||
@@ -302,6 +427,8 @@ void __show_regs(struct pt_regs *regs)
|
||||
printk("Control: %08x%s\n", ctrl, buf);
|
||||
}
|
||||
#endif
|
||||
|
||||
show_extra_register_data(regs, 128);
|
||||
}
|
||||
|
||||
void show_regs(struct pt_regs * regs)
|
||||
|
||||
@@ -69,6 +69,7 @@ enum ipi_msg_type {
|
||||
IPI_CPU_STOP,
|
||||
IPI_IRQ_WORK,
|
||||
IPI_COMPLETION,
|
||||
IPI_CPU_BACKTRACE,
|
||||
};
|
||||
|
||||
static DECLARE_COMPLETION(cpu_running);
|
||||
@@ -471,6 +472,7 @@ static const char *ipi_types[NR_IPI] = {
|
||||
S(IPI_CPU_STOP, "CPU stop interrupts"),
|
||||
S(IPI_IRQ_WORK, "IRQ work interrupts"),
|
||||
S(IPI_COMPLETION, "completion interrupts"),
|
||||
S(IPI_CPU_BACKTRACE, "CPU backtrace"),
|
||||
};
|
||||
|
||||
void show_ipi_list(struct seq_file *p, int prec)
|
||||
@@ -543,6 +545,58 @@ static void ipi_complete(unsigned int cpu)
|
||||
complete(per_cpu(cpu_completion, cpu));
|
||||
}
|
||||
|
||||
static cpumask_t backtrace_mask;
|
||||
static DEFINE_RAW_SPINLOCK(backtrace_lock);
|
||||
|
||||
/* "in progress" flag of arch_trigger_all_cpu_backtrace */
|
||||
static unsigned long backtrace_flag;
|
||||
|
||||
void smp_send_all_cpu_backtrace(void)
|
||||
{
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
int i;
|
||||
|
||||
if (test_and_set_bit(0, &backtrace_flag))
|
||||
/*
|
||||
* If there is already a trigger_all_cpu_backtrace() in progress
|
||||
* (backtrace_flag == 1), don't output double cpu dump infos.
|
||||
*/
|
||||
return;
|
||||
|
||||
cpumask_copy(&backtrace_mask, cpu_online_mask);
|
||||
cpu_clear(this_cpu, backtrace_mask);
|
||||
|
||||
pr_info("Backtrace for cpu %d (current):\n", this_cpu);
|
||||
dump_stack();
|
||||
|
||||
pr_info("\nsending IPI to all other CPUs:\n");
|
||||
smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
|
||||
|
||||
/* Wait for up to 10 seconds for all other CPUs to do the backtrace */
|
||||
for (i = 0; i < 10 * 1000; i++) {
|
||||
if (cpumask_empty(&backtrace_mask))
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
|
||||
clear_bit(0, &backtrace_flag);
|
||||
smp_mb__after_clear_bit();
|
||||
}
|
||||
|
||||
/*
|
||||
* ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
|
||||
*/
|
||||
static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
|
||||
{
|
||||
if (cpu_isset(cpu, backtrace_mask)) {
|
||||
raw_spin_lock(&backtrace_lock);
|
||||
pr_warning("IPI backtrace for cpu %d\n", cpu);
|
||||
show_regs(regs);
|
||||
raw_spin_unlock(&backtrace_lock);
|
||||
cpu_clear(cpu, backtrace_mask);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Main handler for inter-processor interrupts
|
||||
*/
|
||||
@@ -607,6 +661,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||
irq_exit();
|
||||
break;
|
||||
|
||||
case IPI_CPU_BACKTRACE:
|
||||
ipi_cpu_backtrace(cpu, regs);
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
|
||||
cpu, ipinr);
|
||||
|
||||
@@ -34,6 +34,9 @@ static void __iomem *l2x0_base;
|
||||
static DEFINE_RAW_SPINLOCK(l2x0_lock);
|
||||
static u32 l2x0_way_mask; /* Bitmask of active ways */
|
||||
static u32 l2x0_size;
|
||||
static u32 l2x0_cache_id;
|
||||
static unsigned int l2x0_sets;
|
||||
static unsigned int l2x0_ways;
|
||||
static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
|
||||
|
||||
/* Aurora don't have the cache ID register available, so we have to
|
||||
@@ -50,6 +53,13 @@ struct l2x0_of_data {
|
||||
|
||||
static bool of_init = false;
|
||||
|
||||
static inline bool is_pl310_rev(int rev)
|
||||
{
|
||||
return (l2x0_cache_id &
|
||||
(L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
|
||||
(L2X0_CACHE_ID_PART_L310 | rev);
|
||||
}
|
||||
|
||||
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
|
||||
{
|
||||
/* wait for cache operation by line or way to complete */
|
||||
@@ -138,6 +148,23 @@ static void l2x0_cache_sync(void)
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PL310_ERRATA_727915
|
||||
static void l2x0_for_each_set_way(void __iomem *reg)
|
||||
{
|
||||
int set;
|
||||
int way;
|
||||
unsigned long flags;
|
||||
|
||||
for (way = 0; way < l2x0_ways; way++) {
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
for (set = 0; set < l2x0_sets; set++)
|
||||
writel_relaxed((way << 28) | (set << 5), reg);
|
||||
cache_sync();
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __l2x0_flush_all(void)
|
||||
{
|
||||
debug_writel(0x03);
|
||||
@@ -151,6 +178,13 @@ static void l2x0_flush_all(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
#ifdef CONFIG_PL310_ERRATA_727915
|
||||
if (is_pl310_rev(REV_PL310_R2P0)) {
|
||||
l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* clean all ways */
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
__l2x0_flush_all();
|
||||
@@ -161,11 +195,20 @@ static void l2x0_clean_all(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
#ifdef CONFIG_PL310_ERRATA_727915
|
||||
if (is_pl310_rev(REV_PL310_R2P0)) {
|
||||
l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* clean all ways */
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
debug_writel(0x03);
|
||||
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
|
||||
cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
|
||||
cache_sync();
|
||||
debug_writel(0x00);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
@@ -324,65 +367,64 @@ static void l2x0_unlock(u32 cache_id)
|
||||
void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
|
||||
{
|
||||
u32 aux;
|
||||
u32 cache_id;
|
||||
u32 way_size = 0;
|
||||
int ways;
|
||||
int way_size_shift = L2X0_WAY_SIZE_SHIFT;
|
||||
const char *type;
|
||||
|
||||
l2x0_base = base;
|
||||
if (cache_id_part_number_from_dt)
|
||||
cache_id = cache_id_part_number_from_dt;
|
||||
l2x0_cache_id = cache_id_part_number_from_dt;
|
||||
else
|
||||
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
|
||||
l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
|
||||
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
|
||||
|
||||
aux &= aux_mask;
|
||||
aux |= aux_val;
|
||||
|
||||
/* Determine the number of ways */
|
||||
switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
|
||||
switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
|
||||
case L2X0_CACHE_ID_PART_L310:
|
||||
if (aux & (1 << 16))
|
||||
ways = 16;
|
||||
l2x0_ways = 16;
|
||||
else
|
||||
ways = 8;
|
||||
l2x0_ways = 8;
|
||||
type = "L310";
|
||||
#ifdef CONFIG_PL310_ERRATA_753970
|
||||
/* Unmapped register. */
|
||||
sync_reg_offset = L2X0_DUMMY_REG;
|
||||
#endif
|
||||
if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
|
||||
if ((l2x0_cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
|
||||
outer_cache.set_debug = pl310_set_debug;
|
||||
break;
|
||||
case L2X0_CACHE_ID_PART_L210:
|
||||
ways = (aux >> 13) & 0xf;
|
||||
l2x0_ways = (aux >> 13) & 0xf;
|
||||
type = "L210";
|
||||
break;
|
||||
|
||||
case AURORA_CACHE_ID:
|
||||
sync_reg_offset = AURORA_SYNC_REG;
|
||||
ways = (aux >> 13) & 0xf;
|
||||
ways = 2 << ((ways + 1) >> 2);
|
||||
l2x0_ways = (aux >> 13) & 0xf;
|
||||
l2x0_ways = 2 << ((l2x0_ways + 1) >> 2);
|
||||
way_size_shift = AURORA_WAY_SIZE_SHIFT;
|
||||
type = "Aurora";
|
||||
break;
|
||||
default:
|
||||
/* Assume unknown chips have 8 ways */
|
||||
ways = 8;
|
||||
l2x0_ways = 8;
|
||||
type = "L2x0 series";
|
||||
break;
|
||||
}
|
||||
|
||||
l2x0_way_mask = (1 << ways) - 1;
|
||||
l2x0_way_mask = (1 << l2x0_ways) - 1;
|
||||
|
||||
/*
|
||||
* L2 cache Size = Way size * Number of ways
|
||||
*/
|
||||
way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
|
||||
way_size = 1 << (way_size + way_size_shift);
|
||||
way_size = SZ_1K << (way_size + way_size_shift);
|
||||
|
||||
l2x0_size = ways * way_size * SZ_1K;
|
||||
l2x0_size = l2x0_ways * way_size;
|
||||
l2x0_sets = way_size / CACHE_LINE_SIZE;
|
||||
|
||||
/*
|
||||
* Check if l2x0 controller is already enabled.
|
||||
@@ -391,7 +433,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
|
||||
*/
|
||||
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
|
||||
/* Make sure that I&D is not locked down when starting */
|
||||
l2x0_unlock(cache_id);
|
||||
l2x0_unlock(l2x0_cache_id);
|
||||
|
||||
/* l2x0 controller is disabled */
|
||||
writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
|
||||
@@ -420,7 +462,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
|
||||
|
||||
pr_info("%s cache controller enabled\n", type);
|
||||
pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
|
||||
ways, cache_id, aux, l2x0_size >> 10);
|
||||
l2x0_ways, l2x0_cache_id, aux, l2x0_size >> 10);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
|
||||
@@ -270,6 +270,11 @@ v6_dma_clean_range:
|
||||
* - end - virtual end address of region
|
||||
*/
|
||||
ENTRY(v6_dma_flush_range)
|
||||
#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
|
||||
sub r2, r1, r0
|
||||
cmp r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT
|
||||
bhi v6_dma_flush_dcache_all
|
||||
#endif
|
||||
#ifdef CONFIG_DMA_CACHE_RWFO
|
||||
ldrb r2, [r0] @ read for ownership
|
||||
strb r2, [r0] @ write for ownership
|
||||
@@ -292,6 +297,18 @@ ENTRY(v6_dma_flush_range)
|
||||
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
|
||||
mov pc, lr
|
||||
|
||||
#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
|
||||
v6_dma_flush_dcache_all:
|
||||
mov r0, #0
|
||||
#ifdef HARVARD_CACHE
|
||||
mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
|
||||
#else
|
||||
mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
|
||||
#endif
|
||||
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
|
||||
mov pc, lr
|
||||
#endif
|
||||
|
||||
/*
|
||||
* dma_map_area(start, size, dir)
|
||||
* - start - kernel virtual start address
|
||||
|
||||
@@ -274,10 +274,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
* If we're in an interrupt or have no user
|
||||
* If we're in an interrupt, or have no irqs, or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (in_atomic() || irqs_disabled() || !mm)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(regs))
|
||||
|
||||
@@ -1,13 +1,6 @@
|
||||
#ifndef _ASM_X86_IDLE_H
|
||||
#define _ASM_X86_IDLE_H
|
||||
|
||||
#define IDLE_START 1
|
||||
#define IDLE_END 2
|
||||
|
||||
struct notifier_block;
|
||||
void idle_notifier_register(struct notifier_block *n);
|
||||
void idle_notifier_unregister(struct notifier_block *n);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
void enter_idle(void);
|
||||
void exit_idle(void);
|
||||
|
||||
@@ -40,19 +40,6 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static DEFINE_PER_CPU(unsigned char, is_idle);
|
||||
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
|
||||
|
||||
void idle_notifier_register(struct notifier_block *n)
|
||||
{
|
||||
atomic_notifier_chain_register(&idle_notifier, n);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(idle_notifier_register);
|
||||
|
||||
void idle_notifier_unregister(struct notifier_block *n)
|
||||
{
|
||||
atomic_notifier_chain_unregister(&idle_notifier, n);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(idle_notifier_unregister);
|
||||
#endif
|
||||
|
||||
struct kmem_cache *task_xstate_cachep;
|
||||
@@ -257,14 +244,14 @@ static inline void play_dead(void)
|
||||
void enter_idle(void)
|
||||
{
|
||||
this_cpu_write(is_idle, 1);
|
||||
atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
|
||||
idle_notifier_call_chain(IDLE_START);
|
||||
}
|
||||
|
||||
static void __exit_idle(void)
|
||||
{
|
||||
if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
|
||||
return;
|
||||
atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
|
||||
idle_notifier_call_chain(IDLE_END);
|
||||
}
|
||||
|
||||
/* Called from interrupts to signify idle end */
|
||||
|
||||
@@ -1107,6 +1107,22 @@ static void disk_release(struct device *dev)
|
||||
blk_put_queue(disk->queue);
|
||||
kfree(disk);
|
||||
}
|
||||
|
||||
static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
struct disk_part_iter piter;
|
||||
struct hd_struct *part;
|
||||
int cnt = 0;
|
||||
|
||||
disk_part_iter_init(&piter, disk, 0);
|
||||
while((part = disk_part_iter_next(&piter)))
|
||||
cnt++;
|
||||
disk_part_iter_exit(&piter);
|
||||
add_uevent_var(env, "NPARTS=%u", cnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct class block_class = {
|
||||
.name = "block",
|
||||
};
|
||||
@@ -1126,6 +1142,7 @@ static struct device_type disk_type = {
|
||||
.groups = disk_attr_groups,
|
||||
.release = disk_release,
|
||||
.devnode = block_devnode,
|
||||
.uevent = disk_uevent,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
||||
@@ -216,10 +216,21 @@ static void part_release(struct device *dev)
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct hd_struct *part = dev_to_part(dev);
|
||||
|
||||
add_uevent_var(env, "PARTN=%u", part->partno);
|
||||
if (part->info && part->info->volname[0])
|
||||
add_uevent_var(env, "PARTNAME=%s", part->info->volname);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct device_type part_type = {
|
||||
.name = "partition",
|
||||
.groups = part_attr_groups,
|
||||
.release = part_release,
|
||||
.uevent = part_uevent,
|
||||
};
|
||||
|
||||
static void delete_partition_rcu_cb(struct rcu_head *head)
|
||||
|
||||
@@ -98,6 +98,8 @@ source "drivers/memstick/Kconfig"
|
||||
|
||||
source "drivers/leds/Kconfig"
|
||||
|
||||
source "drivers/switch/Kconfig"
|
||||
|
||||
source "drivers/accessibility/Kconfig"
|
||||
|
||||
source "drivers/infiniband/Kconfig"
|
||||
|
||||
@@ -113,6 +113,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/
|
||||
obj-y += mmc/
|
||||
obj-$(CONFIG_MEMSTICK) += memstick/
|
||||
obj-y += leds/
|
||||
obj-$(CONFIG_SWITCH) += switch/
|
||||
obj-$(CONFIG_INFINIBAND) += infiniband/
|
||||
obj-$(CONFIG_SGI_SN) += sn/
|
||||
obj-y += firmware/
|
||||
|
||||
@@ -57,6 +57,12 @@ struct suspend_stats suspend_stats;
|
||||
static DEFINE_MUTEX(dpm_list_mtx);
|
||||
static pm_message_t pm_transition;
|
||||
|
||||
static void dpm_drv_timeout(unsigned long data);
|
||||
struct dpm_drv_wd_data {
|
||||
struct device *dev;
|
||||
struct task_struct *tsk;
|
||||
};
|
||||
|
||||
static int async_error;
|
||||
|
||||
static char *pm_verb(int event)
|
||||
@@ -738,6 +744,30 @@ static bool is_async(struct device *dev)
|
||||
&& !pm_trace_is_enabled();
|
||||
}
|
||||
|
||||
/**
|
||||
* dpm_drv_timeout - Driver suspend / resume watchdog handler
|
||||
* @data: struct device which timed out
|
||||
*
|
||||
* Called when a driver has timed out suspending or resuming.
|
||||
* There's not much we can do here to recover so
|
||||
* BUG() out for a crash-dump
|
||||
*
|
||||
*/
|
||||
static void dpm_drv_timeout(unsigned long data)
|
||||
{
|
||||
struct dpm_drv_wd_data *wd_data = (void *)data;
|
||||
struct device *dev = wd_data->dev;
|
||||
struct task_struct *tsk = wd_data->tsk;
|
||||
|
||||
printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
|
||||
(dev->driver ? dev->driver->name : "no driver"));
|
||||
|
||||
printk(KERN_EMERG "dpm suspend stack:\n");
|
||||
show_stack(tsk, NULL);
|
||||
|
||||
BUG();
|
||||
}
|
||||
|
||||
/**
|
||||
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
|
||||
* @state: PM transition of the system being carried out.
|
||||
@@ -1130,6 +1160,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
int error = 0;
|
||||
struct timer_list timer;
|
||||
struct dpm_drv_wd_data data;
|
||||
DECLARE_DPM_WATCHDOG_ON_STACK(wd);
|
||||
|
||||
dpm_wait_for_children(dev, async);
|
||||
@@ -1153,6 +1185,14 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Complete;
|
||||
|
||||
data.dev = dev;
|
||||
data.tsk = get_current();
|
||||
init_timer_on_stack(&timer);
|
||||
timer.expires = jiffies + HZ * 12;
|
||||
timer.function = dpm_drv_timeout;
|
||||
timer.data = (unsigned long)&data;
|
||||
add_timer(&timer);
|
||||
|
||||
dpm_watchdog_set(&wd, dev);
|
||||
device_lock(dev);
|
||||
@@ -1213,6 +1253,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||
device_unlock(dev);
|
||||
dpm_watchdog_clear(&wd);
|
||||
|
||||
del_timer_sync(&timer);
|
||||
destroy_timer_on_stack(&timer);
|
||||
|
||||
Complete:
|
||||
complete_all(&dev->power.completion);
|
||||
if (error)
|
||||
|
||||
@@ -6,6 +6,19 @@ menu "Character devices"
|
||||
|
||||
source "drivers/tty/Kconfig"
|
||||
|
||||
config DEVMEM
|
||||
bool "Memory device driver"
|
||||
default y
|
||||
help
|
||||
The memory driver provides two character devices, mem and kmem, which
|
||||
provide access to the system's memory. The mem device is a view of
|
||||
physical memory, and each byte in the device corresponds to the
|
||||
matching physical address. The kmem device is the same as mem, but
|
||||
the addresses correspond to the kernel's virtual address space rather
|
||||
than physical memory. These devices are standard parts of a Linux
|
||||
system and most users should say Y here. You might say N if very
|
||||
security conscience or memory is tight.
|
||||
|
||||
config DEVKMEM
|
||||
bool "/dev/kmem virtual device support"
|
||||
default y
|
||||
@@ -579,6 +592,10 @@ config DEVPORT
|
||||
depends on ISA || PCI
|
||||
default y
|
||||
|
||||
config DCC_TTY
|
||||
tristate "DCC tty driver"
|
||||
depends on ARM
|
||||
|
||||
source "drivers/s390/char/Kconfig"
|
||||
|
||||
config MSM_SMD_PKT
|
||||
|
||||
@@ -55,6 +55,7 @@ obj-$(CONFIG_PCMCIA) += pcmcia/
|
||||
obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
|
||||
obj-$(CONFIG_TCG_TPM) += tpm/
|
||||
|
||||
obj-$(CONFIG_DCC_TTY) += dcc_tty.o
|
||||
obj-$(CONFIG_PS3_FLASH) += ps3flash.o
|
||||
|
||||
obj-$(CONFIG_JS_RTC) += js-rtc.o
|
||||
|
||||
326
drivers/char/dcc_tty.c
Normal file
326
drivers/char/dcc_tty.c
Normal file
@@ -0,0 +1,326 @@
|
||||
/* drivers/char/dcc_tty.c
|
||||
*
|
||||
* Copyright (C) 2007 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/tty_driver.h>
|
||||
#include <linux/tty_flip.h>
|
||||
|
||||
MODULE_DESCRIPTION("DCC TTY Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION("1.0");
|
||||
|
||||
DEFINE_SPINLOCK(g_dcc_tty_lock);
|
||||
static struct hrtimer g_dcc_timer;
|
||||
static char g_dcc_buffer[16];
|
||||
static int g_dcc_buffer_head;
|
||||
static int g_dcc_buffer_count;
|
||||
static unsigned g_dcc_write_delay_usecs = 1;
|
||||
static struct tty_driver *g_dcc_tty_driver;
|
||||
static struct tty_struct *g_dcc_tty;
|
||||
static int g_dcc_tty_open_count;
|
||||
|
||||
static void dcc_poll_locked(void)
|
||||
{
|
||||
char ch;
|
||||
int rch;
|
||||
int written;
|
||||
|
||||
while (g_dcc_buffer_count) {
|
||||
ch = g_dcc_buffer[g_dcc_buffer_head];
|
||||
asm(
|
||||
"mrc 14, 0, r15, c0, c1, 0\n"
|
||||
"mcrcc 14, 0, %1, c0, c5, 0\n"
|
||||
"movcc %0, #1\n"
|
||||
"movcs %0, #0\n"
|
||||
: "=r" (written)
|
||||
: "r" (ch)
|
||||
);
|
||||
if (written) {
|
||||
if (ch == '\n')
|
||||
g_dcc_buffer[g_dcc_buffer_head] = '\r';
|
||||
else {
|
||||
g_dcc_buffer_head = (g_dcc_buffer_head + 1) % ARRAY_SIZE(g_dcc_buffer);
|
||||
g_dcc_buffer_count--;
|
||||
if (g_dcc_tty)
|
||||
tty_wakeup(g_dcc_tty);
|
||||
}
|
||||
g_dcc_write_delay_usecs = 1;
|
||||
} else {
|
||||
if (g_dcc_write_delay_usecs > 0x100)
|
||||
break;
|
||||
g_dcc_write_delay_usecs <<= 1;
|
||||
udelay(g_dcc_write_delay_usecs);
|
||||
}
|
||||
}
|
||||
|
||||
if (g_dcc_tty && !test_bit(TTY_THROTTLED, &g_dcc_tty->flags)) {
|
||||
asm(
|
||||
"mrc 14, 0, %0, c0, c1, 0\n"
|
||||
"tst %0, #(1 << 30)\n"
|
||||
"moveq %0, #-1\n"
|
||||
"mrcne 14, 0, %0, c0, c5, 0\n"
|
||||
: "=r" (rch)
|
||||
);
|
||||
if (rch >= 0) {
|
||||
ch = rch;
|
||||
tty_insert_flip_string(g_dcc_tty->port, &ch, 1);
|
||||
tty_flip_buffer_push(g_dcc_tty->port);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (g_dcc_buffer_count)
|
||||
hrtimer_start(&g_dcc_timer, ktime_set(0, g_dcc_write_delay_usecs * NSEC_PER_USEC), HRTIMER_MODE_REL);
|
||||
else
|
||||
hrtimer_start(&g_dcc_timer, ktime_set(0, 20 * NSEC_PER_MSEC), HRTIMER_MODE_REL);
|
||||
}
|
||||
|
||||
static int dcc_tty_open(struct tty_struct * tty, struct file * filp)
|
||||
{
|
||||
int ret;
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
|
||||
if (g_dcc_tty == NULL || g_dcc_tty == tty) {
|
||||
g_dcc_tty = tty;
|
||||
g_dcc_tty_open_count++;
|
||||
ret = 0;
|
||||
} else
|
||||
ret = -EBUSY;
|
||||
spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
|
||||
|
||||
printk("dcc_tty_open, tty %p, f_flags %x, returned %d\n", tty, filp->f_flags, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dcc_tty_close(struct tty_struct * tty, struct file * filp)
|
||||
{
|
||||
printk("dcc_tty_close, tty %p, f_flags %x\n", tty, filp->f_flags);
|
||||
if (g_dcc_tty == tty) {
|
||||
if (--g_dcc_tty_open_count == 0)
|
||||
g_dcc_tty = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int dcc_write(const unsigned char *buf_start, int count)
|
||||
{
|
||||
const unsigned char *buf = buf_start;
|
||||
unsigned long irq_flags;
|
||||
int copy_len;
|
||||
int space_left;
|
||||
int tail;
|
||||
|
||||
if (count < 1)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
|
||||
do {
|
||||
tail = (g_dcc_buffer_head + g_dcc_buffer_count) % ARRAY_SIZE(g_dcc_buffer);
|
||||
copy_len = ARRAY_SIZE(g_dcc_buffer) - tail;
|
||||
space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
|
||||
if (copy_len > space_left)
|
||||
copy_len = space_left;
|
||||
if (copy_len > count)
|
||||
copy_len = count;
|
||||
memcpy(&g_dcc_buffer[tail], buf, copy_len);
|
||||
g_dcc_buffer_count += copy_len;
|
||||
buf += copy_len;
|
||||
count -= copy_len;
|
||||
if (copy_len < count && copy_len < space_left) {
|
||||
space_left -= copy_len;
|
||||
copy_len = count;
|
||||
if (copy_len > space_left) {
|
||||
copy_len = space_left;
|
||||
}
|
||||
memcpy(g_dcc_buffer, buf, copy_len);
|
||||
buf += copy_len;
|
||||
count -= copy_len;
|
||||
g_dcc_buffer_count += copy_len;
|
||||
}
|
||||
dcc_poll_locked();
|
||||
space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
|
||||
} while(count && space_left);
|
||||
spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
|
||||
return buf - buf_start;
|
||||
}
|
||||
|
||||
static int dcc_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
|
||||
{
|
||||
int ret;
|
||||
/* printk("dcc_tty_write %p, %d\n", buf, count); */
|
||||
ret = dcc_write(buf, count);
|
||||
if (ret != count)
|
||||
printk("dcc_tty_write %p, %d, returned %d\n", buf, count, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dcc_tty_write_room(struct tty_struct *tty)
|
||||
{
|
||||
int space_left;
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
|
||||
space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
|
||||
spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
|
||||
return space_left;
|
||||
}
|
||||
|
||||
static int dcc_tty_chars_in_buffer(struct tty_struct *tty)
|
||||
{
|
||||
int ret;
|
||||
asm(
|
||||
"mrc 14, 0, %0, c0, c1, 0\n"
|
||||
"mov %0, %0, LSR #30\n"
|
||||
"and %0, %0, #1\n"
|
||||
: "=r" (ret)
|
||||
);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dcc_tty_unthrottle(struct tty_struct * tty)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
|
||||
dcc_poll_locked();
|
||||
spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
|
||||
}
|
||||
|
||||
static enum hrtimer_restart dcc_tty_timer_func(struct hrtimer *timer)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
|
||||
dcc_poll_locked();
|
||||
spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
void dcc_console_write(struct console *co, const char *b, unsigned count)
|
||||
{
|
||||
#if 1
|
||||
dcc_write(b, count);
|
||||
#else
|
||||
/* blocking printk */
|
||||
while (count > 0) {
|
||||
int written;
|
||||
written = dcc_write(b, count);
|
||||
if (written) {
|
||||
b += written;
|
||||
count -= written;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct tty_driver *dcc_console_device(struct console *c, int *index)
|
||||
{
|
||||
*index = 0;
|
||||
return g_dcc_tty_driver;
|
||||
}
|
||||
|
||||
static int __init dcc_console_setup(struct console *co, char *options)
|
||||
{
|
||||
if (co->index != 0)
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static struct console dcc_console =
|
||||
{
|
||||
.name = "ttyDCC",
|
||||
.write = dcc_console_write,
|
||||
.device = dcc_console_device,
|
||||
.setup = dcc_console_setup,
|
||||
.flags = CON_PRINTBUFFER,
|
||||
.index = -1,
|
||||
};
|
||||
|
||||
static struct tty_operations dcc_tty_ops = {
|
||||
.open = dcc_tty_open,
|
||||
.close = dcc_tty_close,
|
||||
.write = dcc_tty_write,
|
||||
.write_room = dcc_tty_write_room,
|
||||
.chars_in_buffer = dcc_tty_chars_in_buffer,
|
||||
.unthrottle = dcc_tty_unthrottle,
|
||||
};
|
||||
|
||||
static int __init dcc_tty_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
hrtimer_init(&g_dcc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
g_dcc_timer.function = dcc_tty_timer_func;
|
||||
|
||||
g_dcc_tty_driver = alloc_tty_driver(1);
|
||||
if (!g_dcc_tty_driver) {
|
||||
printk(KERN_ERR "dcc_tty_probe: alloc_tty_driver failed\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_alloc_tty_driver_failed;
|
||||
}
|
||||
g_dcc_tty_driver->owner = THIS_MODULE;
|
||||
g_dcc_tty_driver->driver_name = "dcc";
|
||||
g_dcc_tty_driver->name = "ttyDCC";
|
||||
g_dcc_tty_driver->major = 0; // auto assign
|
||||
g_dcc_tty_driver->minor_start = 0;
|
||||
g_dcc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
|
||||
g_dcc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
|
||||
g_dcc_tty_driver->init_termios = tty_std_termios;
|
||||
g_dcc_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
|
||||
tty_set_operations(g_dcc_tty_driver, &dcc_tty_ops);
|
||||
ret = tty_register_driver(g_dcc_tty_driver);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "dcc_tty_probe: tty_register_driver failed, %d\n", ret);
|
||||
goto err_tty_register_driver_failed;
|
||||
}
|
||||
tty_register_device(g_dcc_tty_driver, 0, NULL);
|
||||
|
||||
register_console(&dcc_console);
|
||||
hrtimer_start(&g_dcc_timer, ktime_set(0, 0), HRTIMER_MODE_REL);
|
||||
|
||||
return 0;
|
||||
|
||||
err_tty_register_driver_failed:
|
||||
put_tty_driver(g_dcc_tty_driver);
|
||||
g_dcc_tty_driver = NULL;
|
||||
err_alloc_tty_driver_failed:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit dcc_tty_exit(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
tty_unregister_device(g_dcc_tty_driver, 0);
|
||||
ret = tty_unregister_driver(g_dcc_tty_driver);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "dcc_tty_remove: tty_unregister_driver failed, %d\n", ret);
|
||||
} else {
|
||||
put_tty_driver(g_dcc_tty_driver);
|
||||
}
|
||||
g_dcc_tty_driver = NULL;
|
||||
}
|
||||
|
||||
module_init(dcc_tty_init);
|
||||
module_exit(dcc_tty_exit);
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
|
||||
#ifdef CONFIG_STRICT_DEVMEM
|
||||
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
|
||||
{
|
||||
@@ -83,7 +84,9 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEVMEM
|
||||
void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
|
||||
{
|
||||
}
|
||||
@@ -210,6 +213,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
|
||||
*ppos += written;
|
||||
return written;
|
||||
}
|
||||
#endif /* CONFIG_DEVMEM */
|
||||
|
||||
#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
|
||||
|
||||
int __weak phys_mem_access_prot_allowed(struct file *file,
|
||||
unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
|
||||
@@ -331,6 +337,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_DEVMEM */
|
||||
|
||||
#ifdef CONFIG_DEVKMEM
|
||||
static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
|
||||
@@ -691,6 +698,8 @@ static loff_t null_lseek(struct file *file, loff_t offset, int orig)
|
||||
return file->f_pos = 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
|
||||
|
||||
/*
|
||||
* The memory devices use the full 32/64 bits of the offset, and so we cannot
|
||||
* check against negative addresses: they are ok. The return value is weird,
|
||||
@@ -724,10 +733,14 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
|
||||
static int open_port(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define zero_lseek null_lseek
|
||||
#define full_lseek null_lseek
|
||||
@@ -737,6 +750,7 @@ static int open_port(struct inode *inode, struct file *filp)
|
||||
#define open_mem open_port
|
||||
#define open_kmem open_mem
|
||||
|
||||
#ifdef CONFIG_DEVMEM
|
||||
static const struct file_operations mem_fops = {
|
||||
.llseek = memory_lseek,
|
||||
.read = read_mem,
|
||||
@@ -745,6 +759,7 @@ static const struct file_operations mem_fops = {
|
||||
.open = open_mem,
|
||||
.get_unmapped_area = get_unmapped_area_mem,
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEVKMEM
|
||||
static const struct file_operations kmem_fops = {
|
||||
@@ -806,7 +821,9 @@ static const struct memdev {
|
||||
const struct file_operations *fops;
|
||||
struct backing_dev_info *dev_info;
|
||||
} devlist[] = {
|
||||
#ifdef CONFIG_DEVMEM
|
||||
[1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
|
||||
#endif
|
||||
#ifdef CONFIG_DEVKMEM
|
||||
[2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
|
||||
#endif
|
||||
|
||||
@@ -102,6 +102,16 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
|
||||
Be aware that not all cpufreq drivers support the conservative
|
||||
governor. If unsure have a look at the help section of the
|
||||
driver. Fallback governor will be the performance governor.
|
||||
|
||||
config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
|
||||
bool "interactive"
|
||||
select CPU_FREQ_GOV_INTERACTIVE
|
||||
help
|
||||
Use the CPUFreq governor 'interactive' as default. This allows
|
||||
you to get a full dynamic cpu frequency capable system by simply
|
||||
loading your cpufreq low-level hardware driver, using the
|
||||
'interactive' governor for latency-sensitive workloads.
|
||||
|
||||
endchoice
|
||||
|
||||
config CPU_FREQ_GOV_PERFORMANCE
|
||||
@@ -159,6 +169,24 @@ config CPU_FREQ_GOV_ONDEMAND
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config CPU_FREQ_GOV_INTERACTIVE
|
||||
tristate "'interactive' cpufreq policy governor"
|
||||
default n
|
||||
help
|
||||
'interactive' - This driver adds a dynamic cpufreq policy governor
|
||||
designed for latency-sensitive workloads.
|
||||
|
||||
This governor attempts to reduce the latency of clock
|
||||
increases so that the system is more responsive to
|
||||
interactive workloads.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called cpufreq_interactive.
|
||||
|
||||
For details, take a look at linux/Documentation/cpu-freq.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config CPU_FREQ_GOV_CONSERVATIVE
|
||||
tristate "'conservative' cpufreq governor"
|
||||
depends on CPU_FREQ
|
||||
|
||||
@@ -9,6 +9,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
|
||||
|
||||
obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
|
||||
|
||||
1343
drivers/cpufreq/cpufreq_interactive.c
Normal file
1343
drivers/cpufreq/cpufreq_interactive.c
Normal file
File diff suppressed because it is too large
Load Diff
@@ -182,7 +182,12 @@ static inline int performance_multiplier(void)
|
||||
|
||||
/* for higher loadavg, we are more reluctant */
|
||||
|
||||
mult += 2 * get_loadavg();
|
||||
/*
|
||||
* this doesn't work as intended - it is almost always 0, but can
|
||||
* sometimes, depending on workload, spike very high into the hundreds
|
||||
* even when the average cpu load is under 10%.
|
||||
*/
|
||||
/* mult += 2 * get_loadavg(); */
|
||||
|
||||
/* for IO wait tasks (per cpu!) we add 5x each */
|
||||
mult += 10 * nr_iowait_cpu(smp_processor_id());
|
||||
|
||||
@@ -1410,8 +1410,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
|
||||
* UGCI) cram a lot of unrelated inputs into the
|
||||
* same interface. */
|
||||
hidinput->report = report;
|
||||
if (drv->input_configured)
|
||||
drv->input_configured(hid, hidinput);
|
||||
if (drv->input_configured &&
|
||||
drv->input_configured(hid, hidinput))
|
||||
goto out_cleanup;
|
||||
if (input_register_device(hidinput->input))
|
||||
goto out_cleanup;
|
||||
hidinput = NULL;
|
||||
@@ -1432,8 +1433,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
|
||||
}
|
||||
|
||||
if (hidinput) {
|
||||
if (drv->input_configured)
|
||||
drv->input_configured(hid, hidinput);
|
||||
if (drv->input_configured &&
|
||||
drv->input_configured(hid, hidinput))
|
||||
goto out_cleanup;
|
||||
if (input_register_device(hidinput->input))
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
@@ -418,6 +418,16 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
|
||||
(usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON)
|
||||
td->mt_flags |= INPUT_MT_POINTER;
|
||||
|
||||
/* Only map fields from TouchScreen or TouchPad collections.
|
||||
* We need to ignore fields that belong to other collections
|
||||
* such as Mouse that might have the same GenericDesktop usages. */
|
||||
if (field->application == HID_DG_TOUCHSCREEN)
|
||||
set_bit(INPUT_PROP_DIRECT, hi->input->propbit);
|
||||
else if (field->application == HID_DG_TOUCHPAD)
|
||||
set_bit(INPUT_PROP_POINTER, hi->input->propbit);
|
||||
else
|
||||
return 0;
|
||||
|
||||
if (usage->usage_index)
|
||||
prev_usage = &field->usage[usage->usage_index - 1];
|
||||
|
||||
@@ -747,12 +757,13 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
|
||||
mt_sync_frame(td, report->field[0]->hidinput->input);
|
||||
}
|
||||
|
||||
static void mt_touch_input_configured(struct hid_device *hdev,
|
||||
static int mt_touch_input_configured(struct hid_device *hdev,
|
||||
struct hid_input *hi)
|
||||
{
|
||||
struct mt_device *td = hid_get_drvdata(hdev);
|
||||
struct mt_class *cls = &td->mtclass;
|
||||
struct input_dev *input = hi->input;
|
||||
int ret;
|
||||
|
||||
if (!td->maxcontacts)
|
||||
td->maxcontacts = MT_DEFAULT_MAXCONTACT;
|
||||
@@ -767,9 +778,12 @@ static void mt_touch_input_configured(struct hid_device *hdev,
|
||||
if (cls->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
|
||||
td->mt_flags |= INPUT_MT_DROP_UNUSED;
|
||||
|
||||
input_mt_init_slots(input, td->maxcontacts, td->mt_flags);
|
||||
ret = input_mt_init_slots(input, td->maxcontacts, td->mt_flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
td->mt_flags = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
|
||||
@@ -902,14 +916,15 @@ static void mt_post_parse(struct mt_device *td)
|
||||
cls->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
|
||||
}
|
||||
|
||||
static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
|
||||
static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
|
||||
{
|
||||
struct mt_device *td = hid_get_drvdata(hdev);
|
||||
char *name;
|
||||
const char *suffix = NULL;
|
||||
int ret = 0;
|
||||
|
||||
if (hi->report->id == td->mt_report_id)
|
||||
mt_touch_input_configured(hdev, hi);
|
||||
ret = mt_touch_input_configured(hdev, hi);
|
||||
|
||||
if (hi->report->field[0]->physical == HID_DG_STYLUS) {
|
||||
suffix = "Pen";
|
||||
@@ -925,6 +940,7 @@ static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
|
||||
hi->input->name = name;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
*/
|
||||
struct iio_event_interface {
|
||||
wait_queue_head_t wait;
|
||||
struct mutex read_lock;
|
||||
DECLARE_KFIFO(det_events, struct iio_event_data, 16);
|
||||
|
||||
struct list_head dev_attr_list;
|
||||
@@ -111,14 +112,16 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
|
||||
if (count < sizeof(struct iio_event_data))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irq(&ev_int->wait.lock);
|
||||
if (mutex_lock_interruptible(&ev_int->read_lock))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
if (kfifo_is_empty(&ev_int->det_events)) {
|
||||
if (filep->f_flags & O_NONBLOCK) {
|
||||
ret = -EAGAIN;
|
||||
goto error_unlock;
|
||||
}
|
||||
/* Blocking on device; waiting for something to be there */
|
||||
ret = wait_event_interruptible_locked_irq(ev_int->wait,
|
||||
ret = wait_event_interruptible(ev_int->wait,
|
||||
!kfifo_is_empty(&ev_int->det_events) ||
|
||||
indio_dev->info == NULL);
|
||||
if (ret)
|
||||
@@ -133,7 +136,7 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
|
||||
ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
|
||||
|
||||
error_unlock:
|
||||
spin_unlock_irq(&ev_int->wait.lock);
|
||||
mutex_unlock(&ev_int->read_lock);
|
||||
|
||||
return ret ? ret : copied;
|
||||
}
|
||||
@@ -425,6 +428,7 @@ static void iio_setup_ev_int(struct iio_event_interface *ev_int)
|
||||
{
|
||||
INIT_KFIFO(ev_int->det_events);
|
||||
init_waitqueue_head(&ev_int->wait);
|
||||
mutex_init(&ev_int->read_lock);
|
||||
}
|
||||
|
||||
static const char *iio_event_group_name = "events";
|
||||
@@ -488,6 +492,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
|
||||
|
||||
error_free_setup_event_lines:
|
||||
iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
|
||||
mutex_destroy(&indio_dev->event_interface->read_lock);
|
||||
kfree(indio_dev->event_interface);
|
||||
error_ret:
|
||||
|
||||
@@ -514,5 +519,6 @@ void iio_device_unregister_eventset(struct iio_dev *indio_dev)
|
||||
return;
|
||||
iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
|
||||
kfree(indio_dev->event_interface->group.attrs);
|
||||
mutex_destroy(&indio_dev->event_interface->read_lock);
|
||||
kfree(indio_dev->event_interface);
|
||||
}
|
||||
|
||||
@@ -174,6 +174,15 @@ config INPUT_APMPOWER
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called apm-power.
|
||||
|
||||
config INPUT_KEYRESET
|
||||
tristate "Reset key"
|
||||
depends on INPUT
|
||||
---help---
|
||||
Say Y here if you want to reboot when some keys are pressed;
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called keyreset.
|
||||
|
||||
comment "Input Device Drivers"
|
||||
|
||||
source "drivers/input/keyboard/Kconfig"
|
||||
|
||||
@@ -25,3 +25,4 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/
|
||||
obj-$(CONFIG_INPUT_MISC) += misc/
|
||||
|
||||
obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o
|
||||
obj-$(CONFIG_INPUT_KEYRESET) += keyreset.o
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <linux/major.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/wakelock.h>
|
||||
#include "input-compat.h"
|
||||
|
||||
struct evdev {
|
||||
@@ -46,6 +47,9 @@ struct evdev_client {
|
||||
unsigned int tail;
|
||||
unsigned int packet_head; /* [future] position of the first element of next packet */
|
||||
spinlock_t buffer_lock; /* protects access to buffer, head and tail */
|
||||
struct wake_lock wake_lock;
|
||||
bool use_wake_lock;
|
||||
char name[28];
|
||||
struct fasync_struct *fasync;
|
||||
struct evdev *evdev;
|
||||
struct list_head node;
|
||||
@@ -150,10 +154,14 @@ static void __pass_event(struct evdev_client *client,
|
||||
client->buffer[client->tail].value = 0;
|
||||
|
||||
client->packet_head = client->tail;
|
||||
if (client->use_wake_lock)
|
||||
wake_unlock(&client->wake_lock);
|
||||
}
|
||||
|
||||
if (event->type == EV_SYN && event->code == SYN_REPORT) {
|
||||
client->packet_head = client->head;
|
||||
if (client->use_wake_lock)
|
||||
wake_lock(&client->wake_lock);
|
||||
kill_fasync(&client->fasync, SIGIO, POLL_IN);
|
||||
}
|
||||
}
|
||||
@@ -372,10 +380,14 @@ static int evdev_release(struct inode *inode, struct file *file)
|
||||
|
||||
evdev_detach_client(evdev, client);
|
||||
|
||||
if (client->use_wake_lock)
|
||||
wake_lock_destroy(&client->wake_lock);
|
||||
|
||||
if (is_vmalloc_addr(client))
|
||||
vfree(client);
|
||||
else
|
||||
kfree(client);
|
||||
kfree(client);
|
||||
|
||||
evdev_close_device(evdev);
|
||||
|
||||
@@ -408,6 +420,8 @@ static int evdev_open(struct inode *inode, struct file *file)
|
||||
|
||||
client->bufsize = bufsize;
|
||||
spin_lock_init(&client->buffer_lock);
|
||||
snprintf(client->name, sizeof(client->name), "%s-%d",
|
||||
dev_name(&evdev->dev), task_tgid_vnr(current));
|
||||
client->evdev = evdev;
|
||||
evdev_attach_client(evdev, client);
|
||||
|
||||
@@ -474,6 +488,9 @@ static int evdev_fetch_next_event(struct evdev_client *client,
|
||||
if (have_event) {
|
||||
*event = client->buffer[client->tail++];
|
||||
client->tail &= client->bufsize - 1;
|
||||
if (client->use_wake_lock &&
|
||||
client->packet_head == client->tail)
|
||||
wake_unlock(&client->wake_lock);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&client->buffer_lock);
|
||||
@@ -811,6 +828,11 @@ static int evdev_handle_mt_request(struct input_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* HACK: disable conflicting EVIOCREVOKE until Android userspace stops using
|
||||
* EVIOCSSUSPENDBLOCK
|
||||
*/
|
||||
/*
|
||||
static int evdev_revoke(struct evdev *evdev, struct evdev_client *client,
|
||||
struct file *file)
|
||||
{
|
||||
@@ -821,6 +843,36 @@ static int evdev_revoke(struct evdev *evdev, struct evdev_client *client,
|
||||
|
||||
return 0;
|
||||
}
|
||||
*/
|
||||
|
||||
static int evdev_enable_suspend_block(struct evdev *evdev,
|
||||
struct evdev_client *client)
|
||||
{
|
||||
if (client->use_wake_lock)
|
||||
return 0;
|
||||
|
||||
spin_lock_irq(&client->buffer_lock);
|
||||
wake_lock_init(&client->wake_lock, WAKE_LOCK_SUSPEND, client->name);
|
||||
client->use_wake_lock = true;
|
||||
if (client->packet_head != client->tail)
|
||||
wake_lock(&client->wake_lock);
|
||||
spin_unlock_irq(&client->buffer_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int evdev_disable_suspend_block(struct evdev *evdev,
|
||||
struct evdev_client *client)
|
||||
{
|
||||
if (!client->use_wake_lock)
|
||||
return 0;
|
||||
|
||||
spin_lock_irq(&client->buffer_lock);
|
||||
client->use_wake_lock = false;
|
||||
wake_lock_destroy(&client->wake_lock);
|
||||
spin_unlock_irq(&client->buffer_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long evdev_do_ioctl(struct file *file, unsigned int cmd,
|
||||
void __user *p, int compat_mode)
|
||||
@@ -884,12 +936,17 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
|
||||
else
|
||||
return evdev_ungrab(evdev, client);
|
||||
|
||||
/*
|
||||
* HACK: disable conflicting EVIOCREVOKE until Android userspace stops
|
||||
* using EVIOCSSUSPENDBLOCK
|
||||
*/
|
||||
/*
|
||||
case EVIOCREVOKE:
|
||||
if (p)
|
||||
return -EINVAL;
|
||||
else
|
||||
return evdev_revoke(evdev, client, file);
|
||||
|
||||
*/
|
||||
case EVIOCSCLOCKID:
|
||||
if (copy_from_user(&i, p, sizeof(unsigned int)))
|
||||
return -EFAULT;
|
||||
@@ -909,6 +966,15 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
|
||||
|
||||
case EVIOCSKEYCODE_V2:
|
||||
return evdev_handle_set_keycode_v2(dev, p);
|
||||
|
||||
case EVIOCGSUSPENDBLOCK:
|
||||
return put_user(client->use_wake_lock, ip);
|
||||
|
||||
case EVIOCSSUSPENDBLOCK:
|
||||
if (p)
|
||||
return evdev_enable_suspend_block(evdev, client);
|
||||
else
|
||||
return evdev_disable_suspend_block(evdev, client);
|
||||
}
|
||||
|
||||
size = _IOC_SIZE(cmd);
|
||||
|
||||
239
drivers/input/keyreset.c
Normal file
239
drivers/input/keyreset.c
Normal file
@@ -0,0 +1,239 @@
|
||||
/* drivers/input/keyreset.c
|
||||
*
|
||||
* Copyright (C) 2008 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/input.h>
|
||||
#include <linux/keyreset.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/syscalls.h>
|
||||
|
||||
|
||||
struct keyreset_state {
|
||||
struct input_handler input_handler;
|
||||
unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
|
||||
unsigned long upbit[BITS_TO_LONGS(KEY_CNT)];
|
||||
unsigned long key[BITS_TO_LONGS(KEY_CNT)];
|
||||
spinlock_t lock;
|
||||
int key_down_target;
|
||||
int key_down;
|
||||
int key_up;
|
||||
int restart_disabled;
|
||||
int (*reset_fn)(void);
|
||||
};
|
||||
|
||||
int restart_requested;
|
||||
static void deferred_restart(struct work_struct *dummy)
|
||||
{
|
||||
restart_requested = 2;
|
||||
sys_sync();
|
||||
restart_requested = 3;
|
||||
kernel_restart(NULL);
|
||||
}
|
||||
static DECLARE_WORK(restart_work, deferred_restart);
|
||||
|
||||
static void keyreset_event(struct input_handle *handle, unsigned int type,
|
||||
unsigned int code, int value)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct keyreset_state *state = handle->private;
|
||||
|
||||
if (type != EV_KEY)
|
||||
return;
|
||||
|
||||
if (code >= KEY_MAX)
|
||||
return;
|
||||
|
||||
if (!test_bit(code, state->keybit))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&state->lock, flags);
|
||||
if (!test_bit(code, state->key) == !value)
|
||||
goto done;
|
||||
__change_bit(code, state->key);
|
||||
if (test_bit(code, state->upbit)) {
|
||||
if (value) {
|
||||
state->restart_disabled = 1;
|
||||
state->key_up++;
|
||||
} else
|
||||
state->key_up--;
|
||||
} else {
|
||||
if (value)
|
||||
state->key_down++;
|
||||
else
|
||||
state->key_down--;
|
||||
}
|
||||
if (state->key_down == 0 && state->key_up == 0)
|
||||
state->restart_disabled = 0;
|
||||
|
||||
pr_debug("reset key changed %d %d new state %d-%d-%d\n", code, value,
|
||||
state->key_down, state->key_up, state->restart_disabled);
|
||||
|
||||
if (value && !state->restart_disabled &&
|
||||
state->key_down == state->key_down_target) {
|
||||
state->restart_disabled = 1;
|
||||
if (restart_requested)
|
||||
panic("keyboard reset failed, %d", restart_requested);
|
||||
if (state->reset_fn) {
|
||||
restart_requested = state->reset_fn();
|
||||
} else {
|
||||
pr_info("keyboard reset\n");
|
||||
schedule_work(&restart_work);
|
||||
restart_requested = 1;
|
||||
}
|
||||
}
|
||||
done:
|
||||
spin_unlock_irqrestore(&state->lock, flags);
|
||||
}
|
||||
|
||||
static int keyreset_connect(struct input_handler *handler,
|
||||
struct input_dev *dev,
|
||||
const struct input_device_id *id)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
struct input_handle *handle;
|
||||
struct keyreset_state *state =
|
||||
container_of(handler, struct keyreset_state, input_handler);
|
||||
|
||||
for (i = 0; i < KEY_MAX; i++) {
|
||||
if (test_bit(i, state->keybit) && test_bit(i, dev->keybit))
|
||||
break;
|
||||
}
|
||||
if (i == KEY_MAX)
|
||||
return -ENODEV;
|
||||
|
||||
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
|
||||
if (!handle)
|
||||
return -ENOMEM;
|
||||
|
||||
handle->dev = dev;
|
||||
handle->handler = handler;
|
||||
handle->name = "keyreset";
|
||||
handle->private = state;
|
||||
|
||||
ret = input_register_handle(handle);
|
||||
if (ret)
|
||||
goto err_input_register_handle;
|
||||
|
||||
ret = input_open_device(handle);
|
||||
if (ret)
|
||||
goto err_input_open_device;
|
||||
|
||||
pr_info("using input dev %s for key reset\n", dev->name);
|
||||
|
||||
return 0;
|
||||
|
||||
err_input_open_device:
|
||||
input_unregister_handle(handle);
|
||||
err_input_register_handle:
|
||||
kfree(handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void keyreset_disconnect(struct input_handle *handle)
|
||||
{
|
||||
input_close_device(handle);
|
||||
input_unregister_handle(handle);
|
||||
kfree(handle);
|
||||
}
|
||||
|
||||
static const struct input_device_id keyreset_ids[] = {
|
||||
{
|
||||
.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
|
||||
.evbit = { BIT_MASK(EV_KEY) },
|
||||
},
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(input, keyreset_ids);
|
||||
|
||||
static int keyreset_probe(struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
int key, *keyp;
|
||||
struct keyreset_state *state;
|
||||
struct keyreset_platform_data *pdata = pdev->dev.platform_data;
|
||||
|
||||
if (!pdata)
|
||||
return -EINVAL;
|
||||
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&state->lock);
|
||||
keyp = pdata->keys_down;
|
||||
while ((key = *keyp++)) {
|
||||
if (key >= KEY_MAX)
|
||||
continue;
|
||||
state->key_down_target++;
|
||||
__set_bit(key, state->keybit);
|
||||
}
|
||||
if (pdata->keys_up) {
|
||||
keyp = pdata->keys_up;
|
||||
while ((key = *keyp++)) {
|
||||
if (key >= KEY_MAX)
|
||||
continue;
|
||||
__set_bit(key, state->keybit);
|
||||
__set_bit(key, state->upbit);
|
||||
}
|
||||
}
|
||||
|
||||
if (pdata->reset_fn)
|
||||
state->reset_fn = pdata->reset_fn;
|
||||
|
||||
state->input_handler.event = keyreset_event;
|
||||
state->input_handler.connect = keyreset_connect;
|
||||
state->input_handler.disconnect = keyreset_disconnect;
|
||||
state->input_handler.name = KEYRESET_NAME;
|
||||
state->input_handler.id_table = keyreset_ids;
|
||||
ret = input_register_handler(&state->input_handler);
|
||||
if (ret) {
|
||||
kfree(state);
|
||||
return ret;
|
||||
}
|
||||
platform_set_drvdata(pdev, state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int keyreset_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct keyreset_state *state = platform_get_drvdata(pdev);
|
||||
input_unregister_handler(&state->input_handler);
|
||||
kfree(state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
struct platform_driver keyreset_driver = {
|
||||
.driver.name = KEYRESET_NAME,
|
||||
.probe = keyreset_probe,
|
||||
.remove = keyreset_remove,
|
||||
};
|
||||
|
||||
static int __init keyreset_init(void)
|
||||
{
|
||||
return platform_driver_register(&keyreset_driver);
|
||||
}
|
||||
|
||||
static void __exit keyreset_exit(void)
|
||||
{
|
||||
return platform_driver_unregister(&keyreset_driver);
|
||||
}
|
||||
|
||||
module_init(keyreset_init);
|
||||
module_exit(keyreset_exit);
|
||||
@@ -308,6 +308,17 @@ config INPUT_ATI_REMOTE2
|
||||
To compile this driver as a module, choose M here: the module will be
|
||||
called ati_remote2.
|
||||
|
||||
config INPUT_KEYCHORD
|
||||
tristate "Key chord input driver support"
|
||||
help
|
||||
Say Y here if you want to enable the key chord driver
|
||||
accessible at /dev/keychord. This driver can be used
|
||||
for receiving notifications when client specified key
|
||||
combinations are pressed.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called keychord.
|
||||
|
||||
config INPUT_KEYSPAN_REMOTE
|
||||
tristate "Keyspan DMR USB remote control"
|
||||
depends on USB_ARCH_HAS_HCD
|
||||
@@ -443,6 +454,11 @@ config INPUT_SGI_BTNS
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called sgi_btns.
|
||||
|
||||
config INPUT_GPIO
|
||||
tristate "GPIO driver support"
|
||||
help
|
||||
Say Y here if you want to support gpio based keys, wheels etc...
|
||||
|
||||
config HP_SDC_RTC
|
||||
tristate "HP SDC Real Time Clock"
|
||||
depends on (GSC || HP300) && SERIO
|
||||
|
||||
@@ -29,9 +29,11 @@ obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
|
||||
obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
|
||||
obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o
|
||||
obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o
|
||||
obj-$(CONFIG_INPUT_GPIO) += gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o
|
||||
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
|
||||
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
|
||||
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
|
||||
obj-$(CONFIG_INPUT_KEYCHORD) += keychord.o
|
||||
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
|
||||
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
|
||||
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
|
||||
|
||||
192
drivers/input/misc/gpio_axis.c
Normal file
192
drivers/input/misc/gpio_axis.c
Normal file
@@ -0,0 +1,192 @@
|
||||
/* drivers/input/misc/gpio_axis.c
|
||||
*
|
||||
* Copyright (C) 2007 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio_event.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct gpio_axis_state {
|
||||
struct gpio_event_input_devs *input_devs;
|
||||
struct gpio_event_axis_info *info;
|
||||
uint32_t pos;
|
||||
};
|
||||
|
||||
uint16_t gpio_axis_4bit_gray_map_table[] = {
|
||||
[0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */
|
||||
[0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */
|
||||
[0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */
|
||||
[0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */
|
||||
[0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */
|
||||
[0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */
|
||||
[0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */
|
||||
[0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */
|
||||
};
|
||||
uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in)
|
||||
{
|
||||
return gpio_axis_4bit_gray_map_table[in];
|
||||
}
|
||||
|
||||
uint16_t gpio_axis_5bit_singletrack_map_table[] = {
|
||||
[0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /* 10000 10100 11100 */
|
||||
[0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /* 11110 11010 11000 */
|
||||
[0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /* 01000 01010 01110 */
|
||||
[0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /* 01111 01101 01100 */
|
||||
[0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /* 00100 00101 00111 */
|
||||
[0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /* 10111 10110 00110 */
|
||||
[0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /* 00010 10010 10011 */
|
||||
[0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /* 11011 01011 00011 */
|
||||
[0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001 */
|
||||
[0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001 */
|
||||
};
|
||||
uint16_t gpio_axis_5bit_singletrack_map(
|
||||
struct gpio_event_axis_info *info, uint16_t in)
|
||||
{
|
||||
return gpio_axis_5bit_singletrack_map_table[in];
|
||||
}
|
||||
|
||||
static void gpio_event_update_axis(struct gpio_axis_state *as, int report)
|
||||
{
|
||||
struct gpio_event_axis_info *ai = as->info;
|
||||
int i;
|
||||
int change;
|
||||
uint16_t state = 0;
|
||||
uint16_t pos;
|
||||
uint16_t old_pos = as->pos;
|
||||
for (i = ai->count - 1; i >= 0; i--)
|
||||
state = (state << 1) | gpio_get_value(ai->gpio[i]);
|
||||
pos = ai->map(ai, state);
|
||||
if (ai->flags & GPIOEAF_PRINT_RAW)
|
||||
pr_info("axis %d-%d raw %x, pos %d -> %d\n",
|
||||
ai->type, ai->code, state, old_pos, pos);
|
||||
if (report && pos != old_pos) {
|
||||
if (ai->type == EV_REL) {
|
||||
change = (ai->decoded_size + pos - old_pos) %
|
||||
ai->decoded_size;
|
||||
if (change > ai->decoded_size / 2)
|
||||
change -= ai->decoded_size;
|
||||
if (change == ai->decoded_size / 2) {
|
||||
if (ai->flags & GPIOEAF_PRINT_EVENT)
|
||||
pr_info("axis %d-%d unknown direction, "
|
||||
"pos %d -> %d\n", ai->type,
|
||||
ai->code, old_pos, pos);
|
||||
change = 0; /* no closest direction */
|
||||
}
|
||||
if (ai->flags & GPIOEAF_PRINT_EVENT)
|
||||
pr_info("axis %d-%d change %d\n",
|
||||
ai->type, ai->code, change);
|
||||
input_report_rel(as->input_devs->dev[ai->dev],
|
||||
ai->code, change);
|
||||
} else {
|
||||
if (ai->flags & GPIOEAF_PRINT_EVENT)
|
||||
pr_info("axis %d-%d now %d\n",
|
||||
ai->type, ai->code, pos);
|
||||
input_event(as->input_devs->dev[ai->dev],
|
||||
ai->type, ai->code, pos);
|
||||
}
|
||||
input_sync(as->input_devs->dev[ai->dev]);
|
||||
}
|
||||
as->pos = pos;
|
||||
}
|
||||
|
||||
static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct gpio_axis_state *as = dev_id;
|
||||
gpio_event_update_axis(as, 1);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
|
||||
struct gpio_event_info *info, void **data, int func)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
int irq;
|
||||
struct gpio_event_axis_info *ai;
|
||||
struct gpio_axis_state *as;
|
||||
|
||||
ai = container_of(info, struct gpio_event_axis_info, info);
|
||||
if (func == GPIO_EVENT_FUNC_SUSPEND) {
|
||||
for (i = 0; i < ai->count; i++)
|
||||
disable_irq(gpio_to_irq(ai->gpio[i]));
|
||||
return 0;
|
||||
}
|
||||
if (func == GPIO_EVENT_FUNC_RESUME) {
|
||||
for (i = 0; i < ai->count; i++)
|
||||
enable_irq(gpio_to_irq(ai->gpio[i]));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (func == GPIO_EVENT_FUNC_INIT) {
|
||||
*data = as = kmalloc(sizeof(*as), GFP_KERNEL);
|
||||
if (as == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto err_alloc_axis_state_failed;
|
||||
}
|
||||
as->input_devs = input_devs;
|
||||
as->info = ai;
|
||||
if (ai->dev >= input_devs->count) {
|
||||
pr_err("gpio_event_axis: bad device index %d >= %d "
|
||||
"for %d:%d\n", ai->dev, input_devs->count,
|
||||
ai->type, ai->code);
|
||||
ret = -EINVAL;
|
||||
goto err_bad_device_index;
|
||||
}
|
||||
|
||||
input_set_capability(input_devs->dev[ai->dev],
|
||||
ai->type, ai->code);
|
||||
if (ai->type == EV_ABS) {
|
||||
input_set_abs_params(input_devs->dev[ai->dev], ai->code,
|
||||
0, ai->decoded_size - 1, 0, 0);
|
||||
}
|
||||
for (i = 0; i < ai->count; i++) {
|
||||
ret = gpio_request(ai->gpio[i], "gpio_event_axis");
|
||||
if (ret < 0)
|
||||
goto err_request_gpio_failed;
|
||||
ret = gpio_direction_input(ai->gpio[i]);
|
||||
if (ret < 0)
|
||||
goto err_gpio_direction_input_failed;
|
||||
ret = irq = gpio_to_irq(ai->gpio[i]);
|
||||
if (ret < 0)
|
||||
goto err_get_irq_num_failed;
|
||||
ret = request_irq(irq, gpio_axis_irq_handler,
|
||||
IRQF_TRIGGER_RISING |
|
||||
IRQF_TRIGGER_FALLING,
|
||||
"gpio_event_axis", as);
|
||||
if (ret < 0)
|
||||
goto err_request_irq_failed;
|
||||
}
|
||||
gpio_event_update_axis(as, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
as = *data;
|
||||
for (i = ai->count - 1; i >= 0; i--) {
|
||||
free_irq(gpio_to_irq(ai->gpio[i]), as);
|
||||
err_request_irq_failed:
|
||||
err_get_irq_num_failed:
|
||||
err_gpio_direction_input_failed:
|
||||
gpio_free(ai->gpio[i]);
|
||||
err_request_gpio_failed:
|
||||
;
|
||||
}
|
||||
err_bad_device_index:
|
||||
kfree(as);
|
||||
*data = NULL;
|
||||
err_alloc_axis_state_failed:
|
||||
return ret;
|
||||
}
|
||||
228
drivers/input/misc/gpio_event.c
Normal file
228
drivers/input/misc/gpio_event.c
Normal file
@@ -0,0 +1,228 @@
|
||||
/* drivers/input/misc/gpio_event.c
|
||||
*
|
||||
* Copyright (C) 2007 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/gpio_event.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct gpio_event {
|
||||
struct gpio_event_input_devs *input_devs;
|
||||
const struct gpio_event_platform_data *info;
|
||||
void *state[0];
|
||||
};
|
||||
|
||||
static int gpio_input_event(
|
||||
struct input_dev *dev, unsigned int type, unsigned int code, int value)
|
||||
{
|
||||
int i;
|
||||
int devnr;
|
||||
int ret = 0;
|
||||
int tmp_ret;
|
||||
struct gpio_event_info **ii;
|
||||
struct gpio_event *ip = input_get_drvdata(dev);
|
||||
|
||||
for (devnr = 0; devnr < ip->input_devs->count; devnr++)
|
||||
if (ip->input_devs->dev[devnr] == dev)
|
||||
break;
|
||||
if (devnr == ip->input_devs->count) {
|
||||
pr_err("gpio_input_event: unknown device %p\n", dev);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) {
|
||||
if ((*ii)->event) {
|
||||
tmp_ret = (*ii)->event(ip->input_devs, *ii,
|
||||
&ip->state[i],
|
||||
devnr, type, code, value);
|
||||
if (tmp_ret)
|
||||
ret = tmp_ret;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gpio_event_call_all_func(struct gpio_event *ip, int func)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
struct gpio_event_info **ii;
|
||||
|
||||
if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) {
|
||||
ii = ip->info->info;
|
||||
for (i = 0; i < ip->info->info_count; i++, ii++) {
|
||||
if ((*ii)->func == NULL) {
|
||||
ret = -ENODEV;
|
||||
pr_err("gpio_event_probe: Incomplete pdata, "
|
||||
"no function\n");
|
||||
goto err_no_func;
|
||||
}
|
||||
if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend)
|
||||
continue;
|
||||
ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i],
|
||||
func);
|
||||
if (ret) {
|
||||
pr_err("gpio_event_probe: function failed\n");
|
||||
goto err_func_failed;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
i = ip->info->info_count;
|
||||
ii = ip->info->info + i;
|
||||
while (i > 0) {
|
||||
i--;
|
||||
ii--;
|
||||
if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend)
|
||||
continue;
|
||||
(*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1);
|
||||
err_func_failed:
|
||||
err_no_func:
|
||||
;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __maybe_unused gpio_event_suspend(struct gpio_event *ip)
|
||||
{
|
||||
gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND);
|
||||
if (ip->info->power)
|
||||
ip->info->power(ip->info, 0);
|
||||
}
|
||||
|
||||
static void __maybe_unused gpio_event_resume(struct gpio_event *ip)
|
||||
{
|
||||
if (ip->info->power)
|
||||
ip->info->power(ip->info, 1);
|
||||
gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME);
|
||||
}
|
||||
|
||||
static int gpio_event_probe(struct platform_device *pdev)
|
||||
{
|
||||
int err;
|
||||
struct gpio_event *ip;
|
||||
struct gpio_event_platform_data *event_info;
|
||||
int dev_count = 1;
|
||||
int i;
|
||||
int registered = 0;
|
||||
|
||||
event_info = pdev->dev.platform_data;
|
||||
if (event_info == NULL) {
|
||||
pr_err("gpio_event_probe: No pdata\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if ((!event_info->name && !event_info->names[0]) ||
|
||||
!event_info->info || !event_info->info_count) {
|
||||
pr_err("gpio_event_probe: Incomplete pdata\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if (!event_info->name)
|
||||
while (event_info->names[dev_count])
|
||||
dev_count++;
|
||||
ip = kzalloc(sizeof(*ip) +
|
||||
sizeof(ip->state[0]) * event_info->info_count +
|
||||
sizeof(*ip->input_devs) +
|
||||
sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL);
|
||||
if (ip == NULL) {
|
||||
err = -ENOMEM;
|
||||
pr_err("gpio_event_probe: Failed to allocate private data\n");
|
||||
goto err_kp_alloc_failed;
|
||||
}
|
||||
ip->input_devs = (void*)&ip->state[event_info->info_count];
|
||||
platform_set_drvdata(pdev, ip);
|
||||
|
||||
for (i = 0; i < dev_count; i++) {
|
||||
struct input_dev *input_dev = input_allocate_device();
|
||||
if (input_dev == NULL) {
|
||||
err = -ENOMEM;
|
||||
pr_err("gpio_event_probe: "
|
||||
"Failed to allocate input device\n");
|
||||
goto err_input_dev_alloc_failed;
|
||||
}
|
||||
input_set_drvdata(input_dev, ip);
|
||||
input_dev->name = event_info->name ?
|
||||
event_info->name : event_info->names[i];
|
||||
input_dev->event = gpio_input_event;
|
||||
ip->input_devs->dev[i] = input_dev;
|
||||
}
|
||||
ip->input_devs->count = dev_count;
|
||||
ip->info = event_info;
|
||||
if (event_info->power)
|
||||
ip->info->power(ip->info, 1);
|
||||
|
||||
err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT);
|
||||
if (err)
|
||||
goto err_call_all_func_failed;
|
||||
|
||||
for (i = 0; i < dev_count; i++) {
|
||||
err = input_register_device(ip->input_devs->dev[i]);
|
||||
if (err) {
|
||||
pr_err("gpio_event_probe: Unable to register %s "
|
||||
"input device\n", ip->input_devs->dev[i]->name);
|
||||
goto err_input_register_device_failed;
|
||||
}
|
||||
registered++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_input_register_device_failed:
|
||||
gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
|
||||
err_call_all_func_failed:
|
||||
if (event_info->power)
|
||||
ip->info->power(ip->info, 0);
|
||||
for (i = 0; i < registered; i++)
|
||||
input_unregister_device(ip->input_devs->dev[i]);
|
||||
for (i = dev_count - 1; i >= registered; i--) {
|
||||
input_free_device(ip->input_devs->dev[i]);
|
||||
err_input_dev_alloc_failed:
|
||||
;
|
||||
}
|
||||
kfree(ip);
|
||||
err_kp_alloc_failed:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int gpio_event_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct gpio_event *ip = platform_get_drvdata(pdev);
|
||||
int i;
|
||||
|
||||
gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
|
||||
if (ip->info->power)
|
||||
ip->info->power(ip->info, 0);
|
||||
for (i = 0; i < ip->input_devs->count; i++)
|
||||
input_unregister_device(ip->input_devs->dev[i]);
|
||||
kfree(ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver gpio_event_driver = {
|
||||
.probe = gpio_event_probe,
|
||||
.remove = gpio_event_remove,
|
||||
.driver = {
|
||||
.name = GPIO_EVENT_DEV_NAME,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(gpio_event_driver);
|
||||
|
||||
MODULE_DESCRIPTION("GPIO Event Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
390
drivers/input/misc/gpio_input.c
Normal file
390
drivers/input/misc/gpio_input.c
Normal file
@@ -0,0 +1,390 @@
|
||||
/* drivers/input/misc/gpio_input.c
|
||||
*
|
||||
* Copyright (C) 2007 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio_event.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pm_wakeup.h>
|
||||
|
||||
enum {
|
||||
DEBOUNCE_UNSTABLE = BIT(0), /* Got irq, while debouncing */
|
||||
DEBOUNCE_PRESSED = BIT(1),
|
||||
DEBOUNCE_NOTPRESSED = BIT(2),
|
||||
DEBOUNCE_WAIT_IRQ = BIT(3), /* Stable irq state */
|
||||
DEBOUNCE_POLL = BIT(4), /* Stable polling state */
|
||||
|
||||
DEBOUNCE_UNKNOWN =
|
||||
DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED,
|
||||
};
|
||||
|
||||
struct gpio_key_state {
|
||||
struct gpio_input_state *ds;
|
||||
uint8_t debounce;
|
||||
};
|
||||
|
||||
struct gpio_input_state {
|
||||
struct gpio_event_input_devs *input_devs;
|
||||
const struct gpio_event_input_info *info;
|
||||
struct hrtimer timer;
|
||||
int use_irq;
|
||||
int debounce_count;
|
||||
spinlock_t irq_lock;
|
||||
struct wakeup_source *ws;
|
||||
struct gpio_key_state key_state[0];
|
||||
};
|
||||
|
||||
static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer)
|
||||
{
|
||||
int i;
|
||||
int pressed;
|
||||
struct gpio_input_state *ds =
|
||||
container_of(timer, struct gpio_input_state, timer);
|
||||
unsigned gpio_flags = ds->info->flags;
|
||||
unsigned npolarity;
|
||||
int nkeys = ds->info->keymap_size;
|
||||
const struct gpio_event_direct_entry *key_entry;
|
||||
struct gpio_key_state *key_state;
|
||||
unsigned long irqflags;
|
||||
uint8_t debounce;
|
||||
bool sync_needed;
|
||||
|
||||
#if 0
|
||||
key_entry = kp->keys_info->keymap;
|
||||
key_state = kp->key_state;
|
||||
for (i = 0; i < nkeys; i++, key_entry++, key_state++)
|
||||
pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
|
||||
gpio_read_detect_status(key_entry->gpio));
|
||||
#endif
|
||||
key_entry = ds->info->keymap;
|
||||
key_state = ds->key_state;
|
||||
sync_needed = false;
|
||||
spin_lock_irqsave(&ds->irq_lock, irqflags);
|
||||
for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
|
||||
debounce = key_state->debounce;
|
||||
if (debounce & DEBOUNCE_WAIT_IRQ)
|
||||
continue;
|
||||
if (key_state->debounce & DEBOUNCE_UNSTABLE) {
|
||||
debounce = key_state->debounce = DEBOUNCE_UNKNOWN;
|
||||
enable_irq(gpio_to_irq(key_entry->gpio));
|
||||
if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE)
|
||||
pr_info("gpio_keys_scan_keys: key %x-%x, %d "
|
||||
"(%d) continue debounce\n",
|
||||
ds->info->type, key_entry->code,
|
||||
i, key_entry->gpio);
|
||||
}
|
||||
npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH);
|
||||
pressed = gpio_get_value(key_entry->gpio) ^ npolarity;
|
||||
if (debounce & DEBOUNCE_POLL) {
|
||||
if (pressed == !(debounce & DEBOUNCE_PRESSED)) {
|
||||
ds->debounce_count++;
|
||||
key_state->debounce = DEBOUNCE_UNKNOWN;
|
||||
if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
|
||||
pr_info("gpio_keys_scan_keys: key %x-"
|
||||
"%x, %d (%d) start debounce\n",
|
||||
ds->info->type, key_entry->code,
|
||||
i, key_entry->gpio);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) {
|
||||
if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
|
||||
pr_info("gpio_keys_scan_keys: key %x-%x, %d "
|
||||
"(%d) debounce pressed 1\n",
|
||||
ds->info->type, key_entry->code,
|
||||
i, key_entry->gpio);
|
||||
key_state->debounce = DEBOUNCE_PRESSED;
|
||||
continue;
|
||||
}
|
||||
if (!pressed && (debounce & DEBOUNCE_PRESSED)) {
|
||||
if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
|
||||
pr_info("gpio_keys_scan_keys: key %x-%x, %d "
|
||||
"(%d) debounce pressed 0\n",
|
||||
ds->info->type, key_entry->code,
|
||||
i, key_entry->gpio);
|
||||
key_state->debounce = DEBOUNCE_NOTPRESSED;
|
||||
continue;
|
||||
}
|
||||
/* key is stable */
|
||||
ds->debounce_count--;
|
||||
if (ds->use_irq)
|
||||
key_state->debounce |= DEBOUNCE_WAIT_IRQ;
|
||||
else
|
||||
key_state->debounce |= DEBOUNCE_POLL;
|
||||
if (gpio_flags & GPIOEDF_PRINT_KEYS)
|
||||
pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) "
|
||||
"changed to %d\n", ds->info->type,
|
||||
key_entry->code, i, key_entry->gpio, pressed);
|
||||
input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
|
||||
key_entry->code, pressed);
|
||||
sync_needed = true;
|
||||
}
|
||||
if (sync_needed) {
|
||||
for (i = 0; i < ds->input_devs->count; i++)
|
||||
input_sync(ds->input_devs->dev[i]);
|
||||
}
|
||||
|
||||
#if 0
|
||||
key_entry = kp->keys_info->keymap;
|
||||
key_state = kp->key_state;
|
||||
for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
|
||||
pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
|
||||
gpio_read_detect_status(key_entry->gpio));
|
||||
}
|
||||
#endif
|
||||
|
||||
if (ds->debounce_count)
|
||||
hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL);
|
||||
else if (!ds->use_irq)
|
||||
hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL);
|
||||
else
|
||||
__pm_relax(ds->ws);
|
||||
|
||||
spin_unlock_irqrestore(&ds->irq_lock, irqflags);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct gpio_key_state *ks = dev_id;
|
||||
struct gpio_input_state *ds = ks->ds;
|
||||
int keymap_index = ks - ds->key_state;
|
||||
const struct gpio_event_direct_entry *key_entry;
|
||||
unsigned long irqflags;
|
||||
int pressed;
|
||||
|
||||
if (!ds->use_irq)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
key_entry = &ds->info->keymap[keymap_index];
|
||||
|
||||
if (ds->info->debounce_time.tv64) {
|
||||
spin_lock_irqsave(&ds->irq_lock, irqflags);
|
||||
if (ks->debounce & DEBOUNCE_WAIT_IRQ) {
|
||||
ks->debounce = DEBOUNCE_UNKNOWN;
|
||||
if (ds->debounce_count++ == 0) {
|
||||
__pm_stay_awake(ds->ws);
|
||||
hrtimer_start(
|
||||
&ds->timer, ds->info->debounce_time,
|
||||
HRTIMER_MODE_REL);
|
||||
}
|
||||
if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
|
||||
pr_info("gpio_event_input_irq_handler: "
|
||||
"key %x-%x, %d (%d) start debounce\n",
|
||||
ds->info->type, key_entry->code,
|
||||
keymap_index, key_entry->gpio);
|
||||
} else {
|
||||
disable_irq_nosync(irq);
|
||||
ks->debounce = DEBOUNCE_UNSTABLE;
|
||||
}
|
||||
spin_unlock_irqrestore(&ds->irq_lock, irqflags);
|
||||
} else {
|
||||
pressed = gpio_get_value(key_entry->gpio) ^
|
||||
!(ds->info->flags & GPIOEDF_ACTIVE_HIGH);
|
||||
if (ds->info->flags & GPIOEDF_PRINT_KEYS)
|
||||
pr_info("gpio_event_input_irq_handler: key %x-%x, %d "
|
||||
"(%d) changed to %d\n",
|
||||
ds->info->type, key_entry->code, keymap_index,
|
||||
key_entry->gpio, pressed);
|
||||
input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
|
||||
key_entry->code, pressed);
|
||||
input_sync(ds->input_devs->dev[key_entry->dev]);
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int gpio_event_input_request_irqs(struct gpio_input_state *ds)
|
||||
{
|
||||
int i;
|
||||
int err;
|
||||
unsigned int irq;
|
||||
unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
|
||||
|
||||
for (i = 0; i < ds->info->keymap_size; i++) {
|
||||
err = irq = gpio_to_irq(ds->info->keymap[i].gpio);
|
||||
if (err < 0)
|
||||
goto err_gpio_get_irq_num_failed;
|
||||
err = request_irq(irq, gpio_event_input_irq_handler,
|
||||
req_flags, "gpio_keys", &ds->key_state[i]);
|
||||
if (err) {
|
||||
pr_err("gpio_event_input_request_irqs: request_irq "
|
||||
"failed for input %d, irq %d\n",
|
||||
ds->info->keymap[i].gpio, irq);
|
||||
goto err_request_irq_failed;
|
||||
}
|
||||
if (ds->info->info.no_suspend) {
|
||||
err = enable_irq_wake(irq);
|
||||
if (err) {
|
||||
pr_err("gpio_event_input_request_irqs: "
|
||||
"enable_irq_wake failed for input %d, "
|
||||
"irq %d\n",
|
||||
ds->info->keymap[i].gpio, irq);
|
||||
goto err_enable_irq_wake_failed;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
for (i = ds->info->keymap_size - 1; i >= 0; i--) {
|
||||
irq = gpio_to_irq(ds->info->keymap[i].gpio);
|
||||
if (ds->info->info.no_suspend)
|
||||
disable_irq_wake(irq);
|
||||
err_enable_irq_wake_failed:
|
||||
free_irq(irq, &ds->key_state[i]);
|
||||
err_request_irq_failed:
|
||||
err_gpio_get_irq_num_failed:
|
||||
;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
|
||||
struct gpio_event_info *info, void **data, int func)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
unsigned long irqflags;
|
||||
struct gpio_event_input_info *di;
|
||||
struct gpio_input_state *ds = *data;
|
||||
char *wlname;
|
||||
|
||||
di = container_of(info, struct gpio_event_input_info, info);
|
||||
|
||||
if (func == GPIO_EVENT_FUNC_SUSPEND) {
|
||||
if (ds->use_irq)
|
||||
for (i = 0; i < di->keymap_size; i++)
|
||||
disable_irq(gpio_to_irq(di->keymap[i].gpio));
|
||||
hrtimer_cancel(&ds->timer);
|
||||
return 0;
|
||||
}
|
||||
if (func == GPIO_EVENT_FUNC_RESUME) {
|
||||
spin_lock_irqsave(&ds->irq_lock, irqflags);
|
||||
if (ds->use_irq)
|
||||
for (i = 0; i < di->keymap_size; i++)
|
||||
enable_irq(gpio_to_irq(di->keymap[i].gpio));
|
||||
hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
|
||||
spin_unlock_irqrestore(&ds->irq_lock, irqflags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (func == GPIO_EVENT_FUNC_INIT) {
|
||||
if (ktime_to_ns(di->poll_time) <= 0)
|
||||
di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC);
|
||||
|
||||
*data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) *
|
||||
di->keymap_size, GFP_KERNEL);
|
||||
if (ds == NULL) {
|
||||
ret = -ENOMEM;
|
||||
pr_err("gpio_event_input_func: "
|
||||
"Failed to allocate private data\n");
|
||||
goto err_ds_alloc_failed;
|
||||
}
|
||||
ds->debounce_count = di->keymap_size;
|
||||
ds->input_devs = input_devs;
|
||||
ds->info = di;
|
||||
wlname = kasprintf(GFP_KERNEL, "gpio_input:%s%s",
|
||||
input_devs->dev[0]->name,
|
||||
(input_devs->count > 1) ? "..." : "");
|
||||
|
||||
ds->ws = wakeup_source_register(wlname);
|
||||
kfree(wlname);
|
||||
if (!ds->ws) {
|
||||
ret = -ENOMEM;
|
||||
pr_err("gpio_event_input_func: "
|
||||
"Failed to allocate wakeup source\n");
|
||||
goto err_ws_failed;
|
||||
}
|
||||
|
||||
spin_lock_init(&ds->irq_lock);
|
||||
|
||||
for (i = 0; i < di->keymap_size; i++) {
|
||||
int dev = di->keymap[i].dev;
|
||||
if (dev >= input_devs->count) {
|
||||
pr_err("gpio_event_input_func: bad device "
|
||||
"index %d >= %d for key code %d\n",
|
||||
dev, input_devs->count,
|
||||
di->keymap[i].code);
|
||||
ret = -EINVAL;
|
||||
goto err_bad_keymap;
|
||||
}
|
||||
input_set_capability(input_devs->dev[dev], di->type,
|
||||
di->keymap[i].code);
|
||||
ds->key_state[i].ds = ds;
|
||||
ds->key_state[i].debounce = DEBOUNCE_UNKNOWN;
|
||||
}
|
||||
|
||||
for (i = 0; i < di->keymap_size; i++) {
|
||||
ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in");
|
||||
if (ret) {
|
||||
pr_err("gpio_event_input_func: gpio_request "
|
||||
"failed for %d\n", di->keymap[i].gpio);
|
||||
goto err_gpio_request_failed;
|
||||
}
|
||||
ret = gpio_direction_input(di->keymap[i].gpio);
|
||||
if (ret) {
|
||||
pr_err("gpio_event_input_func: "
|
||||
"gpio_direction_input failed for %d\n",
|
||||
di->keymap[i].gpio);
|
||||
goto err_gpio_configure_failed;
|
||||
}
|
||||
}
|
||||
|
||||
ret = gpio_event_input_request_irqs(ds);
|
||||
|
||||
spin_lock_irqsave(&ds->irq_lock, irqflags);
|
||||
ds->use_irq = ret == 0;
|
||||
|
||||
pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s "
|
||||
"mode\n", input_devs->dev[0]->name,
|
||||
(input_devs->count > 1) ? "..." : "",
|
||||
ret == 0 ? "interrupt" : "polling");
|
||||
|
||||
hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
ds->timer.function = gpio_event_input_timer_func;
|
||||
hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
|
||||
spin_unlock_irqrestore(&ds->irq_lock, irqflags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
spin_lock_irqsave(&ds->irq_lock, irqflags);
|
||||
hrtimer_cancel(&ds->timer);
|
||||
if (ds->use_irq) {
|
||||
for (i = di->keymap_size - 1; i >= 0; i--) {
|
||||
int irq = gpio_to_irq(di->keymap[i].gpio);
|
||||
if (ds->info->info.no_suspend)
|
||||
disable_irq_wake(irq);
|
||||
free_irq(irq, &ds->key_state[i]);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&ds->irq_lock, irqflags);
|
||||
|
||||
for (i = di->keymap_size - 1; i >= 0; i--) {
|
||||
err_gpio_configure_failed:
|
||||
gpio_free(di->keymap[i].gpio);
|
||||
err_gpio_request_failed:
|
||||
;
|
||||
}
|
||||
err_bad_keymap:
|
||||
wakeup_source_unregister(ds->ws);
|
||||
err_ws_failed:
|
||||
kfree(ds);
|
||||
err_ds_alloc_failed:
|
||||
return ret;
|
||||
}
|
||||
441
drivers/input/misc/gpio_matrix.c
Normal file
441
drivers/input/misc/gpio_matrix.c
Normal file
@@ -0,0 +1,441 @@
|
||||
/* drivers/input/misc/gpio_matrix.c
|
||||
*
|
||||
* Copyright (C) 2007 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio_event.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/wakelock.h>
|
||||
|
||||
struct gpio_kp {
|
||||
struct gpio_event_input_devs *input_devs;
|
||||
struct gpio_event_matrix_info *keypad_info;
|
||||
struct hrtimer timer;
|
||||
struct wake_lock wake_lock;
|
||||
int current_output;
|
||||
unsigned int use_irq:1;
|
||||
unsigned int key_state_changed:1;
|
||||
unsigned int last_key_state_changed:1;
|
||||
unsigned int some_keys_pressed:2;
|
||||
unsigned int disabled_irq:1;
|
||||
unsigned long keys_pressed[0];
|
||||
};
|
||||
|
||||
static void clear_phantom_key(struct gpio_kp *kp, int out, int in)
|
||||
{
|
||||
struct gpio_event_matrix_info *mi = kp->keypad_info;
|
||||
int key_index = out * mi->ninputs + in;
|
||||
unsigned short keyentry = mi->keymap[key_index];
|
||||
unsigned short keycode = keyentry & MATRIX_KEY_MASK;
|
||||
unsigned short dev = keyentry >> MATRIX_CODE_BITS;
|
||||
|
||||
if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) {
|
||||
if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
|
||||
pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
|
||||
"cleared\n", keycode, out, in,
|
||||
mi->output_gpios[out], mi->input_gpios[in]);
|
||||
__clear_bit(key_index, kp->keys_pressed);
|
||||
} else {
|
||||
if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
|
||||
pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
|
||||
"not cleared\n", keycode, out, in,
|
||||
mi->output_gpios[out], mi->input_gpios[in]);
|
||||
}
|
||||
}
|
||||
|
||||
static int restore_keys_for_input(struct gpio_kp *kp, int out, int in)
|
||||
{
|
||||
int rv = 0;
|
||||
int key_index;
|
||||
|
||||
key_index = out * kp->keypad_info->ninputs + in;
|
||||
while (out < kp->keypad_info->noutputs) {
|
||||
if (test_bit(key_index, kp->keys_pressed)) {
|
||||
rv = 1;
|
||||
clear_phantom_key(kp, out, in);
|
||||
}
|
||||
key_index += kp->keypad_info->ninputs;
|
||||
out++;
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
static void remove_phantom_keys(struct gpio_kp *kp)
|
||||
{
|
||||
int out, in, inp;
|
||||
int key_index;
|
||||
|
||||
if (kp->some_keys_pressed < 3)
|
||||
return;
|
||||
|
||||
for (out = 0; out < kp->keypad_info->noutputs; out++) {
|
||||
inp = -1;
|
||||
key_index = out * kp->keypad_info->ninputs;
|
||||
for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) {
|
||||
if (test_bit(key_index, kp->keys_pressed)) {
|
||||
if (inp == -1) {
|
||||
inp = in;
|
||||
continue;
|
||||
}
|
||||
if (inp >= 0) {
|
||||
if (!restore_keys_for_input(kp, out + 1,
|
||||
inp))
|
||||
break;
|
||||
clear_phantom_key(kp, out, inp);
|
||||
inp = -2;
|
||||
}
|
||||
restore_keys_for_input(kp, out, in);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void report_key(struct gpio_kp *kp, int key_index, int out, int in)
|
||||
{
|
||||
struct gpio_event_matrix_info *mi = kp->keypad_info;
|
||||
int pressed = test_bit(key_index, kp->keys_pressed);
|
||||
unsigned short keyentry = mi->keymap[key_index];
|
||||
unsigned short keycode = keyentry & MATRIX_KEY_MASK;
|
||||
unsigned short dev = keyentry >> MATRIX_CODE_BITS;
|
||||
|
||||
if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) {
|
||||
if (keycode == KEY_RESERVED) {
|
||||
if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS)
|
||||
pr_info("gpiomatrix: unmapped key, %d-%d "
|
||||
"(%d-%d) changed to %d\n",
|
||||
out, in, mi->output_gpios[out],
|
||||
mi->input_gpios[in], pressed);
|
||||
} else {
|
||||
if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS)
|
||||
pr_info("gpiomatrix: key %x, %d-%d (%d-%d) "
|
||||
"changed to %d\n", keycode,
|
||||
out, in, mi->output_gpios[out],
|
||||
mi->input_gpios[in], pressed);
|
||||
input_report_key(kp->input_devs->dev[dev], keycode, pressed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void report_sync(struct gpio_kp *kp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < kp->input_devs->count; i++)
|
||||
input_sync(kp->input_devs->dev[i]);
|
||||
}
|
||||
|
||||
static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
|
||||
{
|
||||
int out, in;
|
||||
int key_index;
|
||||
int gpio;
|
||||
struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
|
||||
struct gpio_event_matrix_info *mi = kp->keypad_info;
|
||||
unsigned gpio_keypad_flags = mi->flags;
|
||||
unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);
|
||||
|
||||
out = kp->current_output;
|
||||
if (out == mi->noutputs) {
|
||||
out = 0;
|
||||
kp->last_key_state_changed = kp->key_state_changed;
|
||||
kp->key_state_changed = 0;
|
||||
kp->some_keys_pressed = 0;
|
||||
} else {
|
||||
key_index = out * mi->ninputs;
|
||||
for (in = 0; in < mi->ninputs; in++, key_index++) {
|
||||
gpio = mi->input_gpios[in];
|
||||
if (gpio_get_value(gpio) ^ !polarity) {
|
||||
if (kp->some_keys_pressed < 3)
|
||||
kp->some_keys_pressed++;
|
||||
kp->key_state_changed |= !__test_and_set_bit(
|
||||
key_index, kp->keys_pressed);
|
||||
} else
|
||||
kp->key_state_changed |= __test_and_clear_bit(
|
||||
key_index, kp->keys_pressed);
|
||||
}
|
||||
gpio = mi->output_gpios[out];
|
||||
if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
|
||||
gpio_set_value(gpio, !polarity);
|
||||
else
|
||||
gpio_direction_input(gpio);
|
||||
out++;
|
||||
}
|
||||
kp->current_output = out;
|
||||
if (out < mi->noutputs) {
|
||||
gpio = mi->output_gpios[out];
|
||||
if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
|
||||
gpio_set_value(gpio, polarity);
|
||||
else
|
||||
gpio_direction_output(gpio, polarity);
|
||||
hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
|
||||
if (kp->key_state_changed) {
|
||||
hrtimer_start(&kp->timer, mi->debounce_delay,
|
||||
HRTIMER_MODE_REL);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
kp->key_state_changed = kp->last_key_state_changed;
|
||||
}
|
||||
if (kp->key_state_changed) {
|
||||
if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
|
||||
remove_phantom_keys(kp);
|
||||
key_index = 0;
|
||||
for (out = 0; out < mi->noutputs; out++)
|
||||
for (in = 0; in < mi->ninputs; in++, key_index++)
|
||||
report_key(kp, key_index, out, in);
|
||||
report_sync(kp);
|
||||
}
|
||||
if (!kp->use_irq || kp->some_keys_pressed) {
|
||||
hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
/* No keys are pressed, reenable interrupt */
|
||||
for (out = 0; out < mi->noutputs; out++) {
|
||||
if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
|
||||
gpio_set_value(mi->output_gpios[out], polarity);
|
||||
else
|
||||
gpio_direction_output(mi->output_gpios[out], polarity);
|
||||
}
|
||||
for (in = 0; in < mi->ninputs; in++)
|
||||
enable_irq(gpio_to_irq(mi->input_gpios[in]));
|
||||
wake_unlock(&kp->wake_lock);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id)
|
||||
{
|
||||
int i;
|
||||
struct gpio_kp *kp = dev_id;
|
||||
struct gpio_event_matrix_info *mi = kp->keypad_info;
|
||||
unsigned gpio_keypad_flags = mi->flags;
|
||||
|
||||
if (!kp->use_irq) {
|
||||
/* ignore interrupt while registering the handler */
|
||||
kp->disabled_irq = 1;
|
||||
disable_irq_nosync(irq_in);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
for (i = 0; i < mi->ninputs; i++)
|
||||
disable_irq_nosync(gpio_to_irq(mi->input_gpios[i]));
|
||||
for (i = 0; i < mi->noutputs; i++) {
|
||||
if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
|
||||
gpio_set_value(mi->output_gpios[i],
|
||||
!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH));
|
||||
else
|
||||
gpio_direction_input(mi->output_gpios[i]);
|
||||
}
|
||||
wake_lock(&kp->wake_lock);
|
||||
hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int gpio_keypad_request_irqs(struct gpio_kp *kp)
|
||||
{
|
||||
int i;
|
||||
int err;
|
||||
unsigned int irq;
|
||||
unsigned long request_flags;
|
||||
struct gpio_event_matrix_info *mi = kp->keypad_info;
|
||||
|
||||
switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) {
|
||||
default:
|
||||
request_flags = IRQF_TRIGGER_FALLING;
|
||||
break;
|
||||
case GPIOKPF_ACTIVE_HIGH:
|
||||
request_flags = IRQF_TRIGGER_RISING;
|
||||
break;
|
||||
case GPIOKPF_LEVEL_TRIGGERED_IRQ:
|
||||
request_flags = IRQF_TRIGGER_LOW;
|
||||
break;
|
||||
case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH:
|
||||
request_flags = IRQF_TRIGGER_HIGH;
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < mi->ninputs; i++) {
|
||||
err = irq = gpio_to_irq(mi->input_gpios[i]);
|
||||
if (err < 0)
|
||||
goto err_gpio_get_irq_num_failed;
|
||||
err = request_irq(irq, gpio_keypad_irq_handler, request_flags,
|
||||
"gpio_kp", kp);
|
||||
if (err) {
|
||||
pr_err("gpiomatrix: request_irq failed for input %d, "
|
||||
"irq %d\n", mi->input_gpios[i], irq);
|
||||
goto err_request_irq_failed;
|
||||
}
|
||||
err = enable_irq_wake(irq);
|
||||
if (err) {
|
||||
pr_err("gpiomatrix: set_irq_wake failed for input %d, "
|
||||
"irq %d\n", mi->input_gpios[i], irq);
|
||||
}
|
||||
disable_irq(irq);
|
||||
if (kp->disabled_irq) {
|
||||
kp->disabled_irq = 0;
|
||||
enable_irq(irq);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
for (i = mi->noutputs - 1; i >= 0; i--) {
|
||||
free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
|
||||
err_request_irq_failed:
|
||||
err_gpio_get_irq_num_failed:
|
||||
;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
|
||||
struct gpio_event_info *info, void **data, int func)
|
||||
{
|
||||
int i;
|
||||
int err;
|
||||
int key_count;
|
||||
struct gpio_kp *kp;
|
||||
struct gpio_event_matrix_info *mi;
|
||||
|
||||
mi = container_of(info, struct gpio_event_matrix_info, info);
|
||||
if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) {
|
||||
/* TODO: disable scanning */
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (func == GPIO_EVENT_FUNC_INIT) {
|
||||
if (mi->keymap == NULL ||
|
||||
mi->input_gpios == NULL ||
|
||||
mi->output_gpios == NULL) {
|
||||
err = -ENODEV;
|
||||
pr_err("gpiomatrix: Incomplete pdata\n");
|
||||
goto err_invalid_platform_data;
|
||||
}
|
||||
key_count = mi->ninputs * mi->noutputs;
|
||||
|
||||
*data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) *
|
||||
BITS_TO_LONGS(key_count), GFP_KERNEL);
|
||||
if (kp == NULL) {
|
||||
err = -ENOMEM;
|
||||
pr_err("gpiomatrix: Failed to allocate private data\n");
|
||||
goto err_kp_alloc_failed;
|
||||
}
|
||||
kp->input_devs = input_devs;
|
||||
kp->keypad_info = mi;
|
||||
for (i = 0; i < key_count; i++) {
|
||||
unsigned short keyentry = mi->keymap[i];
|
||||
unsigned short keycode = keyentry & MATRIX_KEY_MASK;
|
||||
unsigned short dev = keyentry >> MATRIX_CODE_BITS;
|
||||
if (dev >= input_devs->count) {
|
||||
pr_err("gpiomatrix: bad device index %d >= "
|
||||
"%d for key code %d\n",
|
||||
dev, input_devs->count, keycode);
|
||||
err = -EINVAL;
|
||||
goto err_bad_keymap;
|
||||
}
|
||||
if (keycode && keycode <= KEY_MAX)
|
||||
input_set_capability(input_devs->dev[dev],
|
||||
EV_KEY, keycode);
|
||||
}
|
||||
|
||||
for (i = 0; i < mi->noutputs; i++) {
|
||||
err = gpio_request(mi->output_gpios[i], "gpio_kp_out");
|
||||
if (err) {
|
||||
pr_err("gpiomatrix: gpio_request failed for "
|
||||
"output %d\n", mi->output_gpios[i]);
|
||||
goto err_request_output_gpio_failed;
|
||||
}
|
||||
if (gpio_cansleep(mi->output_gpios[i])) {
|
||||
pr_err("gpiomatrix: unsupported output gpio %d,"
|
||||
" can sleep\n", mi->output_gpios[i]);
|
||||
err = -EINVAL;
|
||||
goto err_output_gpio_configure_failed;
|
||||
}
|
||||
if (mi->flags & GPIOKPF_DRIVE_INACTIVE)
|
||||
err = gpio_direction_output(mi->output_gpios[i],
|
||||
!(mi->flags & GPIOKPF_ACTIVE_HIGH));
|
||||
else
|
||||
err = gpio_direction_input(mi->output_gpios[i]);
|
||||
if (err) {
|
||||
pr_err("gpiomatrix: gpio_configure failed for "
|
||||
"output %d\n", mi->output_gpios[i]);
|
||||
goto err_output_gpio_configure_failed;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < mi->ninputs; i++) {
|
||||
err = gpio_request(mi->input_gpios[i], "gpio_kp_in");
|
||||
if (err) {
|
||||
pr_err("gpiomatrix: gpio_request failed for "
|
||||
"input %d\n", mi->input_gpios[i]);
|
||||
goto err_request_input_gpio_failed;
|
||||
}
|
||||
err = gpio_direction_input(mi->input_gpios[i]);
|
||||
if (err) {
|
||||
pr_err("gpiomatrix: gpio_direction_input failed"
|
||||
" for input %d\n", mi->input_gpios[i]);
|
||||
goto err_gpio_direction_input_failed;
|
||||
}
|
||||
}
|
||||
kp->current_output = mi->noutputs;
|
||||
kp->key_state_changed = 1;
|
||||
|
||||
hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
kp->timer.function = gpio_keypad_timer_func;
|
||||
wake_lock_init(&kp->wake_lock, WAKE_LOCK_SUSPEND, "gpio_kp");
|
||||
err = gpio_keypad_request_irqs(kp);
|
||||
kp->use_irq = err == 0;
|
||||
|
||||
pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for "
|
||||
"%s%s in %s mode\n", input_devs->dev[0]->name,
|
||||
(input_devs->count > 1) ? "..." : "",
|
||||
kp->use_irq ? "interrupt" : "polling");
|
||||
|
||||
if (kp->use_irq)
|
||||
wake_lock(&kp->wake_lock);
|
||||
hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
kp = *data;
|
||||
|
||||
if (kp->use_irq)
|
||||
for (i = mi->noutputs - 1; i >= 0; i--)
|
||||
free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
|
||||
|
||||
hrtimer_cancel(&kp->timer);
|
||||
wake_lock_destroy(&kp->wake_lock);
|
||||
for (i = mi->noutputs - 1; i >= 0; i--) {
|
||||
err_gpio_direction_input_failed:
|
||||
gpio_free(mi->input_gpios[i]);
|
||||
err_request_input_gpio_failed:
|
||||
;
|
||||
}
|
||||
for (i = mi->noutputs - 1; i >= 0; i--) {
|
||||
err_output_gpio_configure_failed:
|
||||
gpio_free(mi->output_gpios[i]);
|
||||
err_request_output_gpio_failed:
|
||||
;
|
||||
}
|
||||
err_bad_keymap:
|
||||
kfree(kp);
|
||||
err_kp_alloc_failed:
|
||||
err_invalid_platform_data:
|
||||
return err;
|
||||
}
|
||||
97
drivers/input/misc/gpio_output.c
Normal file
97
drivers/input/misc/gpio_output.c
Normal file
@@ -0,0 +1,97 @@
|
||||
/* drivers/input/misc/gpio_output.c
|
||||
*
|
||||
* Copyright (C) 2007 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio_event.h>
|
||||
|
||||
int gpio_event_output_event(
|
||||
struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
|
||||
void **data, unsigned int dev, unsigned int type,
|
||||
unsigned int code, int value)
|
||||
{
|
||||
int i;
|
||||
struct gpio_event_output_info *oi;
|
||||
oi = container_of(info, struct gpio_event_output_info, info);
|
||||
if (type != oi->type)
|
||||
return 0;
|
||||
if (!(oi->flags & GPIOEDF_ACTIVE_HIGH))
|
||||
value = !value;
|
||||
for (i = 0; i < oi->keymap_size; i++)
|
||||
if (dev == oi->keymap[i].dev && code == oi->keymap[i].code)
|
||||
gpio_set_value(oi->keymap[i].gpio, value);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gpio_event_output_func(
|
||||
struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
|
||||
void **data, int func)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
struct gpio_event_output_info *oi;
|
||||
oi = container_of(info, struct gpio_event_output_info, info);
|
||||
|
||||
if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME)
|
||||
return 0;
|
||||
|
||||
if (func == GPIO_EVENT_FUNC_INIT) {
|
||||
int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH);
|
||||
|
||||
for (i = 0; i < oi->keymap_size; i++) {
|
||||
int dev = oi->keymap[i].dev;
|
||||
if (dev >= input_devs->count) {
|
||||
pr_err("gpio_event_output_func: bad device "
|
||||
"index %d >= %d for key code %d\n",
|
||||
dev, input_devs->count,
|
||||
oi->keymap[i].code);
|
||||
ret = -EINVAL;
|
||||
goto err_bad_keymap;
|
||||
}
|
||||
input_set_capability(input_devs->dev[dev], oi->type,
|
||||
oi->keymap[i].code);
|
||||
}
|
||||
|
||||
for (i = 0; i < oi->keymap_size; i++) {
|
||||
ret = gpio_request(oi->keymap[i].gpio,
|
||||
"gpio_event_output");
|
||||
if (ret) {
|
||||
pr_err("gpio_event_output_func: gpio_request "
|
||||
"failed for %d\n", oi->keymap[i].gpio);
|
||||
goto err_gpio_request_failed;
|
||||
}
|
||||
ret = gpio_direction_output(oi->keymap[i].gpio,
|
||||
output_level);
|
||||
if (ret) {
|
||||
pr_err("gpio_event_output_func: "
|
||||
"gpio_direction_output failed for %d\n",
|
||||
oi->keymap[i].gpio);
|
||||
goto err_gpio_direction_output_failed;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
for (i = oi->keymap_size - 1; i >= 0; i--) {
|
||||
err_gpio_direction_output_failed:
|
||||
gpio_free(oi->keymap[i].gpio);
|
||||
err_gpio_request_failed:
|
||||
;
|
||||
}
|
||||
err_bad_keymap:
|
||||
return ret;
|
||||
}
|
||||
|
||||
391
drivers/input/misc/keychord.c
Normal file
391
drivers/input/misc/keychord.c
Normal file
@@ -0,0 +1,391 @@
|
||||
/*
|
||||
* drivers/input/misc/keychord.c
|
||||
*
|
||||
* Copyright (C) 2008 Google, Inc.
|
||||
* Author: Mike Lockwood <lockwood@android.com>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/poll.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/keychord.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#define KEYCHORD_NAME "keychord"
|
||||
#define BUFFER_SIZE 16
|
||||
|
||||
MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
|
||||
MODULE_DESCRIPTION("Key chord input driver");
|
||||
MODULE_SUPPORTED_DEVICE("keychord");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
#define NEXT_KEYCHORD(kc) ((struct input_keychord *) \
|
||||
((char *)kc + sizeof(struct input_keychord) + \
|
||||
kc->count * sizeof(kc->keycodes[0])))
|
||||
|
||||
struct keychord_device {
|
||||
struct input_handler input_handler;
|
||||
int registered;
|
||||
|
||||
/* list of keychords to monitor */
|
||||
struct input_keychord *keychords;
|
||||
int keychord_count;
|
||||
|
||||
/* bitmask of keys contained in our keychords */
|
||||
unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
|
||||
/* current state of the keys */
|
||||
unsigned long keystate[BITS_TO_LONGS(KEY_CNT)];
|
||||
/* number of keys that are currently pressed */
|
||||
int key_down;
|
||||
|
||||
/* second input_device_id is needed for null termination */
|
||||
struct input_device_id device_ids[2];
|
||||
|
||||
spinlock_t lock;
|
||||
wait_queue_head_t waitq;
|
||||
unsigned char head;
|
||||
unsigned char tail;
|
||||
__u16 buff[BUFFER_SIZE];
|
||||
};
|
||||
|
||||
static int check_keychord(struct keychord_device *kdev,
|
||||
struct input_keychord *keychord)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (keychord->count != kdev->key_down)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < keychord->count; i++) {
|
||||
if (!test_bit(keychord->keycodes[i], kdev->keystate))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* we have a match */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void keychord_event(struct input_handle *handle, unsigned int type,
|
||||
unsigned int code, int value)
|
||||
{
|
||||
struct keychord_device *kdev = handle->private;
|
||||
struct input_keychord *keychord;
|
||||
unsigned long flags;
|
||||
int i, got_chord = 0;
|
||||
|
||||
if (type != EV_KEY || code >= KEY_MAX)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&kdev->lock, flags);
|
||||
/* do nothing if key state did not change */
|
||||
if (!test_bit(code, kdev->keystate) == !value)
|
||||
goto done;
|
||||
__change_bit(code, kdev->keystate);
|
||||
if (value)
|
||||
kdev->key_down++;
|
||||
else
|
||||
kdev->key_down--;
|
||||
|
||||
/* don't notify on key up */
|
||||
if (!value)
|
||||
goto done;
|
||||
/* ignore this event if it is not one of the keys we are monitoring */
|
||||
if (!test_bit(code, kdev->keybit))
|
||||
goto done;
|
||||
|
||||
keychord = kdev->keychords;
|
||||
if (!keychord)
|
||||
goto done;
|
||||
|
||||
/* check to see if the keyboard state matches any keychords */
|
||||
for (i = 0; i < kdev->keychord_count; i++) {
|
||||
if (check_keychord(kdev, keychord)) {
|
||||
kdev->buff[kdev->head] = keychord->id;
|
||||
kdev->head = (kdev->head + 1) % BUFFER_SIZE;
|
||||
got_chord = 1;
|
||||
break;
|
||||
}
|
||||
/* skip to next keychord */
|
||||
keychord = NEXT_KEYCHORD(keychord);
|
||||
}
|
||||
|
||||
done:
|
||||
spin_unlock_irqrestore(&kdev->lock, flags);
|
||||
|
||||
if (got_chord) {
|
||||
pr_info("keychord: got keychord id %d. Any tasks: %d\n",
|
||||
keychord->id,
|
||||
!list_empty_careful(&kdev->waitq.task_list));
|
||||
wake_up_interruptible(&kdev->waitq);
|
||||
}
|
||||
}
|
||||
|
||||
static int keychord_connect(struct input_handler *handler,
|
||||
struct input_dev *dev,
|
||||
const struct input_device_id *id)
|
||||
{
|
||||
int i, ret;
|
||||
struct input_handle *handle;
|
||||
struct keychord_device *kdev =
|
||||
container_of(handler, struct keychord_device, input_handler);
|
||||
|
||||
/*
|
||||
* ignore this input device if it does not contain any keycodes
|
||||
* that we are monitoring
|
||||
*/
|
||||
for (i = 0; i < KEY_MAX; i++) {
|
||||
if (test_bit(i, kdev->keybit) && test_bit(i, dev->keybit))
|
||||
break;
|
||||
}
|
||||
if (i == KEY_MAX)
|
||||
return -ENODEV;
|
||||
|
||||
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
|
||||
if (!handle)
|
||||
return -ENOMEM;
|
||||
|
||||
handle->dev = dev;
|
||||
handle->handler = handler;
|
||||
handle->name = KEYCHORD_NAME;
|
||||
handle->private = kdev;
|
||||
|
||||
ret = input_register_handle(handle);
|
||||
if (ret)
|
||||
goto err_input_register_handle;
|
||||
|
||||
ret = input_open_device(handle);
|
||||
if (ret)
|
||||
goto err_input_open_device;
|
||||
|
||||
pr_info("keychord: using input dev %s for fevent\n", dev->name);
|
||||
|
||||
return 0;
|
||||
|
||||
err_input_open_device:
|
||||
input_unregister_handle(handle);
|
||||
err_input_register_handle:
|
||||
kfree(handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void keychord_disconnect(struct input_handle *handle)
|
||||
{
|
||||
input_close_device(handle);
|
||||
input_unregister_handle(handle);
|
||||
kfree(handle);
|
||||
}
|
||||
|
||||
/*
|
||||
* keychord_read is used to read keychord events from the driver
|
||||
*/
|
||||
static ssize_t keychord_read(struct file *file, char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct keychord_device *kdev = file->private_data;
|
||||
__u16 id;
|
||||
int retval;
|
||||
unsigned long flags;
|
||||
|
||||
if (count < sizeof(id))
|
||||
return -EINVAL;
|
||||
count = sizeof(id);
|
||||
|
||||
if (kdev->head == kdev->tail && (file->f_flags & O_NONBLOCK))
|
||||
return -EAGAIN;
|
||||
|
||||
retval = wait_event_interruptible(kdev->waitq,
|
||||
kdev->head != kdev->tail);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
spin_lock_irqsave(&kdev->lock, flags);
|
||||
/* pop a keychord ID off the queue */
|
||||
id = kdev->buff[kdev->tail];
|
||||
kdev->tail = (kdev->tail + 1) % BUFFER_SIZE;
|
||||
spin_unlock_irqrestore(&kdev->lock, flags);
|
||||
|
||||
if (copy_to_user(buffer, &id, count))
|
||||
return -EFAULT;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
* keychord_write is used to configure the driver
|
||||
*/
|
||||
static ssize_t keychord_write(struct file *file, const char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct keychord_device *kdev = file->private_data;
|
||||
struct input_keychord *keychords = 0;
|
||||
struct input_keychord *keychord, *next, *end;
|
||||
int ret, i, key;
|
||||
unsigned long flags;
|
||||
|
||||
if (count < sizeof(struct input_keychord))
|
||||
return -EINVAL;
|
||||
keychords = kzalloc(count, GFP_KERNEL);
|
||||
if (!keychords)
|
||||
return -ENOMEM;
|
||||
|
||||
/* read list of keychords from userspace */
|
||||
if (copy_from_user(keychords, buffer, count)) {
|
||||
kfree(keychords);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* unregister handler before changing configuration */
|
||||
if (kdev->registered) {
|
||||
input_unregister_handler(&kdev->input_handler);
|
||||
kdev->registered = 0;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&kdev->lock, flags);
|
||||
/* clear any existing configuration */
|
||||
kfree(kdev->keychords);
|
||||
kdev->keychords = 0;
|
||||
kdev->keychord_count = 0;
|
||||
kdev->key_down = 0;
|
||||
memset(kdev->keybit, 0, sizeof(kdev->keybit));
|
||||
memset(kdev->keystate, 0, sizeof(kdev->keystate));
|
||||
kdev->head = kdev->tail = 0;
|
||||
|
||||
keychord = keychords;
|
||||
end = (struct input_keychord *)((char *)keychord + count);
|
||||
|
||||
while (keychord < end) {
|
||||
next = NEXT_KEYCHORD(keychord);
|
||||
if (keychord->count <= 0 || next > end) {
|
||||
pr_err("keychord: invalid keycode count %d\n",
|
||||
keychord->count);
|
||||
goto err_unlock_return;
|
||||
}
|
||||
if (keychord->version != KEYCHORD_VERSION) {
|
||||
pr_err("keychord: unsupported version %d\n",
|
||||
keychord->version);
|
||||
goto err_unlock_return;
|
||||
}
|
||||
|
||||
/* keep track of the keys we are monitoring in keybit */
|
||||
for (i = 0; i < keychord->count; i++) {
|
||||
key = keychord->keycodes[i];
|
||||
if (key < 0 || key >= KEY_CNT) {
|
||||
pr_err("keychord: keycode %d out of range\n",
|
||||
key);
|
||||
goto err_unlock_return;
|
||||
}
|
||||
__set_bit(key, kdev->keybit);
|
||||
}
|
||||
|
||||
kdev->keychord_count++;
|
||||
keychord = next;
|
||||
}
|
||||
|
||||
kdev->keychords = keychords;
|
||||
spin_unlock_irqrestore(&kdev->lock, flags);
|
||||
|
||||
ret = input_register_handler(&kdev->input_handler);
|
||||
if (ret) {
|
||||
kfree(keychords);
|
||||
kdev->keychords = 0;
|
||||
return ret;
|
||||
}
|
||||
kdev->registered = 1;
|
||||
|
||||
return count;
|
||||
|
||||
err_unlock_return:
|
||||
spin_unlock_irqrestore(&kdev->lock, flags);
|
||||
kfree(keychords);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static unsigned int keychord_poll(struct file *file, poll_table *wait)
|
||||
{
|
||||
struct keychord_device *kdev = file->private_data;
|
||||
|
||||
poll_wait(file, &kdev->waitq, wait);
|
||||
|
||||
if (kdev->head != kdev->tail)
|
||||
return POLLIN | POLLRDNORM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int keychord_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct keychord_device *kdev;
|
||||
|
||||
kdev = kzalloc(sizeof(struct keychord_device), GFP_KERNEL);
|
||||
if (!kdev)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&kdev->lock);
|
||||
init_waitqueue_head(&kdev->waitq);
|
||||
|
||||
kdev->input_handler.event = keychord_event;
|
||||
kdev->input_handler.connect = keychord_connect;
|
||||
kdev->input_handler.disconnect = keychord_disconnect;
|
||||
kdev->input_handler.name = KEYCHORD_NAME;
|
||||
kdev->input_handler.id_table = kdev->device_ids;
|
||||
|
||||
kdev->device_ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT;
|
||||
__set_bit(EV_KEY, kdev->device_ids[0].evbit);
|
||||
|
||||
file->private_data = kdev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int keychord_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct keychord_device *kdev = file->private_data;
|
||||
|
||||
if (kdev->registered)
|
||||
input_unregister_handler(&kdev->input_handler);
|
||||
kfree(kdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations keychord_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = keychord_open,
|
||||
.release = keychord_release,
|
||||
.read = keychord_read,
|
||||
.write = keychord_write,
|
||||
.poll = keychord_poll,
|
||||
};
|
||||
|
||||
static struct miscdevice keychord_misc = {
|
||||
.fops = &keychord_fops,
|
||||
.name = KEYCHORD_NAME,
|
||||
.minor = MISC_DYNAMIC_MINOR,
|
||||
};
|
||||
|
||||
static int __init keychord_init(void)
|
||||
{
|
||||
return misc_register(&keychord_misc);
|
||||
}
|
||||
|
||||
static void __exit keychord_exit(void)
|
||||
{
|
||||
misc_deregister(&keychord_misc);
|
||||
}
|
||||
|
||||
module_init(keychord_init);
|
||||
module_exit(keychord_exit);
|
||||
@@ -411,6 +411,10 @@ config TI_DAC7512
|
||||
This driver can also be built as a module. If so, the module
|
||||
will be called ti_dac7512.
|
||||
|
||||
config UID_STAT
|
||||
bool "UID based statistics tracking exported to /proc/uid_stat"
|
||||
default n
|
||||
|
||||
config VMWARE_BALLOON
|
||||
tristate "VMware Balloon Driver"
|
||||
depends on X86 && HYPERVISOR_GUEST
|
||||
|
||||
@@ -35,6 +35,7 @@ obj-$(CONFIG_ISL29020) += isl29020.o
|
||||
obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
|
||||
obj-$(CONFIG_DS1682) += ds1682.o
|
||||
obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
|
||||
obj-$(CONFIG_UID_STAT) += uid_stat.o
|
||||
obj-$(CONFIG_C2PORT) += c2port/
|
||||
obj-$(CONFIG_HMC6352) += hmc6352.o
|
||||
obj-y += eeprom/
|
||||
|
||||
152
drivers/misc/uid_stat.c
Normal file
152
drivers/misc/uid_stat.c
Normal file
@@ -0,0 +1,152 @@
|
||||
/* drivers/misc/uid_stat.c
|
||||
*
|
||||
* Copyright (C) 2008 - 2009 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/uid_stat.h>
|
||||
#include <net/activity_stats.h>
|
||||
|
||||
static DEFINE_SPINLOCK(uid_lock);
|
||||
static LIST_HEAD(uid_list);
|
||||
static struct proc_dir_entry *parent;
|
||||
|
||||
struct uid_stat {
|
||||
struct list_head link;
|
||||
uid_t uid;
|
||||
atomic_t tcp_rcv;
|
||||
atomic_t tcp_snd;
|
||||
};
|
||||
|
||||
static struct uid_stat *find_uid_stat(uid_t uid) {
|
||||
struct uid_stat *entry;
|
||||
|
||||
list_for_each_entry(entry, &uid_list, link) {
|
||||
if (entry->uid == uid) {
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int uid_stat_atomic_int_show(struct seq_file *m, void *v)
|
||||
{
|
||||
unsigned int bytes;
|
||||
atomic_t *counter = m->private;
|
||||
|
||||
bytes = (unsigned int) (atomic_read(counter) + INT_MIN);
|
||||
return seq_printf(m, "%u\n", bytes);
|
||||
}
|
||||
|
||||
static int uid_stat_read_atomic_int_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, uid_stat_atomic_int_show, PDE_DATA(inode));
|
||||
}
|
||||
|
||||
static const struct file_operations uid_stat_read_atomic_int_fops = {
|
||||
.open = uid_stat_read_atomic_int_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
/* Create a new entry for tracking the specified uid. */
|
||||
static struct uid_stat *create_stat(uid_t uid) {
|
||||
struct uid_stat *new_uid;
|
||||
/* Create the uid stat struct and append it to the list. */
|
||||
new_uid = kmalloc(sizeof(struct uid_stat), GFP_ATOMIC);
|
||||
if (!new_uid)
|
||||
return NULL;
|
||||
|
||||
new_uid->uid = uid;
|
||||
/* Counters start at INT_MIN, so we can track 4GB of network traffic. */
|
||||
atomic_set(&new_uid->tcp_rcv, INT_MIN);
|
||||
atomic_set(&new_uid->tcp_snd, INT_MIN);
|
||||
|
||||
list_add_tail(&new_uid->link, &uid_list);
|
||||
return new_uid;
|
||||
}
|
||||
|
||||
static void create_stat_proc(struct uid_stat *new_uid)
|
||||
{
|
||||
char uid_s[32];
|
||||
struct proc_dir_entry *entry;
|
||||
sprintf(uid_s, "%d", new_uid->uid);
|
||||
entry = proc_mkdir(uid_s, parent);
|
||||
|
||||
/* Keep reference to uid_stat so we know what uid to read stats from. */
|
||||
proc_create_data("tcp_snd", S_IRUGO, entry,
|
||||
&uid_stat_read_atomic_int_fops, &new_uid->tcp_snd);
|
||||
|
||||
proc_create_data("tcp_rcv", S_IRUGO, entry,
|
||||
&uid_stat_read_atomic_int_fops, &new_uid->tcp_rcv);
|
||||
}
|
||||
|
||||
static struct uid_stat *find_or_create_uid_stat(uid_t uid)
|
||||
{
|
||||
struct uid_stat *entry;
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&uid_lock, flags);
|
||||
entry = find_uid_stat(uid);
|
||||
if (entry) {
|
||||
spin_unlock_irqrestore(&uid_lock, flags);
|
||||
return entry;
|
||||
}
|
||||
entry = create_stat(uid);
|
||||
spin_unlock_irqrestore(&uid_lock, flags);
|
||||
if (entry)
|
||||
create_stat_proc(entry);
|
||||
return entry;
|
||||
}
|
||||
|
||||
int uid_stat_tcp_snd(uid_t uid, int size) {
|
||||
struct uid_stat *entry;
|
||||
activity_stats_update();
|
||||
entry = find_or_create_uid_stat(uid);
|
||||
if (!entry)
|
||||
return -1;
|
||||
atomic_add(size, &entry->tcp_snd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int uid_stat_tcp_rcv(uid_t uid, int size) {
|
||||
struct uid_stat *entry;
|
||||
activity_stats_update();
|
||||
entry = find_or_create_uid_stat(uid);
|
||||
if (!entry)
|
||||
return -1;
|
||||
atomic_add(size, &entry->tcp_rcv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init uid_stat_init(void)
|
||||
{
|
||||
parent = proc_mkdir("uid_stat", NULL);
|
||||
if (!parent) {
|
||||
pr_err("uid_stat: failed to create proc entry\n");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
__initcall(uid_stat_init);
|
||||
@@ -50,6 +50,15 @@ config MMC_BLOCK_BOUNCE
|
||||
|
||||
If unsure, say Y here.
|
||||
|
||||
config MMC_BLOCK_DEFERRED_RESUME
|
||||
bool "Deferr MMC layer resume until I/O is requested"
|
||||
depends on MMC_BLOCK
|
||||
default n
|
||||
help
|
||||
Say Y here to enable deferred MMC resume until I/O
|
||||
is requested. This will reduce overall resume latency and
|
||||
save power when theres an SD card inserted but not being used.
|
||||
|
||||
config SDIO_UART
|
||||
tristate "SDIO UART/GPS class support"
|
||||
depends on TTY
|
||||
|
||||
@@ -36,6 +36,9 @@
|
||||
#include <linux/compat.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/mmc.h>
|
||||
|
||||
#include <linux/mmc/ioctl.h>
|
||||
#include <linux/mmc/card.h>
|
||||
#include <linux/mmc/host.h>
|
||||
@@ -166,11 +169,7 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
|
||||
|
||||
static inline int mmc_get_devidx(struct gendisk *disk)
|
||||
{
|
||||
int devmaj = MAJOR(disk_devt(disk));
|
||||
int devidx = MINOR(disk_devt(disk)) / perdev_minors;
|
||||
|
||||
if (!devmaj)
|
||||
devidx = disk->first_minor / perdev_minors;
|
||||
int devidx = disk->first_minor / perdev_minors;
|
||||
return devidx;
|
||||
}
|
||||
|
||||
@@ -426,9 +425,11 @@ static int ioctl_do_sanitize(struct mmc_card *card)
|
||||
pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
|
||||
mmc_hostname(card->host), __func__);
|
||||
|
||||
trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
|
||||
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_SANITIZE_START, 1,
|
||||
MMC_SANITIZE_REQ_TIMEOUT);
|
||||
trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
|
||||
|
||||
if (err)
|
||||
pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
|
||||
@@ -771,18 +772,22 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
|
||||
req->rq_disk->disk_name, "timed out", name, status);
|
||||
|
||||
/* If the status cmd initially failed, retry the r/w cmd */
|
||||
if (!status_valid)
|
||||
if (!status_valid) {
|
||||
pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
|
||||
return ERR_RETRY;
|
||||
|
||||
}
|
||||
/*
|
||||
* If it was a r/w cmd crc error, or illegal command
|
||||
* (eg, issued in wrong state) then retry - we should
|
||||
* have corrected the state problem above.
|
||||
*/
|
||||
if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
|
||||
if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
|
||||
pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
|
||||
return ERR_RETRY;
|
||||
}
|
||||
|
||||
/* Otherwise abort the command */
|
||||
pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
|
||||
return ERR_ABORT;
|
||||
|
||||
default:
|
||||
@@ -1961,6 +1966,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
||||
unsigned long flags;
|
||||
unsigned int cmd_flags = req ? req->cmd_flags : 0;
|
||||
|
||||
#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
|
||||
if (mmc_bus_needs_resume(card->host))
|
||||
mmc_resume_bus(card->host);
|
||||
#endif
|
||||
|
||||
if (req && !mq->mqrq_prev->req)
|
||||
/* claim host only for the first request */
|
||||
mmc_get_card(card);
|
||||
@@ -2083,6 +2093,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
|
||||
md->disk->queue = md->queue.queue;
|
||||
md->disk->driverfs_dev = parent;
|
||||
set_disk_ro(md->disk, md->read_only || default_ro);
|
||||
md->disk->flags = GENHD_FL_EXT_DEVT;
|
||||
if (area_type & MMC_BLK_DATA_AREA_RPMB)
|
||||
md->disk->flags |= GENHD_FL_NO_PART_SCAN;
|
||||
|
||||
@@ -2399,6 +2410,9 @@ static int mmc_blk_probe(struct mmc_card *card)
|
||||
mmc_set_drvdata(card, md);
|
||||
mmc_fixup_device(card, blk_fixups);
|
||||
|
||||
#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
|
||||
mmc_set_bus_resume_policy(card->host, 1);
|
||||
#endif
|
||||
if (mmc_add_disk(md))
|
||||
goto out;
|
||||
|
||||
@@ -2441,6 +2455,9 @@ static void mmc_blk_remove(struct mmc_card *card)
|
||||
pm_runtime_put_noidle(&card->dev);
|
||||
mmc_blk_remove_req(md);
|
||||
mmc_set_drvdata(card, NULL);
|
||||
#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
|
||||
mmc_set_bus_resume_policy(card->host, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int _mmc_blk_suspend(struct mmc_card *card)
|
||||
|
||||
@@ -26,3 +26,18 @@ config MMC_CLKGATE
|
||||
support handling this in order for it to be of any use.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config MMC_EMBEDDED_SDIO
|
||||
boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
|
||||
help
|
||||
If you say Y here, support will be added for embedded SDIO
|
||||
devices which do not contain the necessary enumeration
|
||||
support in hardware to be properly detected.
|
||||
|
||||
config MMC_PARANOID_SD_INIT
|
||||
bool "Enable paranoid SD card initialization (EXPERIMENTAL)"
|
||||
help
|
||||
If you say Y here, the MMC layer will be extra paranoid
|
||||
about re-trying SD init requests. This can be a useful
|
||||
work-around for buggy controllers and hardware. Enable
|
||||
if you are experiencing issues with SD detection.
|
||||
|
||||
@@ -29,6 +29,9 @@
|
||||
#include <linux/random.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/wakelock.h>
|
||||
|
||||
#include <trace/events/mmc.h>
|
||||
|
||||
#include <linux/mmc/card.h>
|
||||
#include <linux/mmc/host.h>
|
||||
@@ -54,6 +57,7 @@
|
||||
#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
|
||||
|
||||
static struct workqueue_struct *workqueue;
|
||||
static struct wake_lock mmc_delayed_work_wake_lock;
|
||||
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
|
||||
|
||||
/*
|
||||
@@ -87,6 +91,7 @@ MODULE_PARM_DESC(
|
||||
static int mmc_schedule_delayed_work(struct delayed_work *work,
|
||||
unsigned long delay)
|
||||
{
|
||||
wake_lock(&mmc_delayed_work_wake_lock);
|
||||
return queue_delayed_work(workqueue, work, delay);
|
||||
}
|
||||
|
||||
@@ -174,6 +179,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
|
||||
pr_debug("%s: %d bytes transferred: %d\n",
|
||||
mmc_hostname(host),
|
||||
mrq->data->bytes_xfered, mrq->data->error);
|
||||
trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
|
||||
}
|
||||
|
||||
if (mrq->stop) {
|
||||
@@ -557,8 +563,12 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
|
||||
mmc_start_bkops(host->card, true);
|
||||
}
|
||||
|
||||
if (!err && areq)
|
||||
if (!err && areq) {
|
||||
trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
|
||||
areq->mrq->cmd->arg,
|
||||
areq->mrq->data);
|
||||
start_err = __mmc_start_data_req(host, areq->mrq);
|
||||
}
|
||||
|
||||
if (host->areq)
|
||||
mmc_post_req(host, host->areq->mrq, 0);
|
||||
@@ -1885,8 +1895,13 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
|
||||
struct mmc_command cmd = {0};
|
||||
unsigned int qty = 0;
|
||||
unsigned long timeout;
|
||||
unsigned int fr, nr;
|
||||
int err;
|
||||
|
||||
fr = from;
|
||||
nr = to - from + 1;
|
||||
trace_mmc_blk_erase_start(arg, fr, nr);
|
||||
|
||||
/*
|
||||
* qty is used to calculate the erase timeout which depends on how many
|
||||
* erase groups (or allocation units in SD terminology) are affected.
|
||||
@@ -1990,6 +2005,8 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
|
||||
} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
|
||||
(R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
|
||||
out:
|
||||
|
||||
trace_mmc_blk_erase_end(arg, fr, nr);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -2420,6 +2437,7 @@ void mmc_rescan(struct work_struct *work)
|
||||
struct mmc_host *host =
|
||||
container_of(work, struct mmc_host, detect.work);
|
||||
int i;
|
||||
bool extend_wakelock = false;
|
||||
|
||||
if (host->rescan_disable)
|
||||
return;
|
||||
@@ -2441,6 +2459,12 @@ void mmc_rescan(struct work_struct *work)
|
||||
|
||||
host->detect_change = 0;
|
||||
|
||||
/* If the card was removed the bus will be marked
|
||||
* as dead - extend the wakelock so userspace
|
||||
* can respond */
|
||||
if (host->bus_dead)
|
||||
extend_wakelock = 1;
|
||||
|
||||
/*
|
||||
* Let mmc_bus_put() free the bus/bus_ops if we've found that
|
||||
* the card is no longer present.
|
||||
@@ -2470,14 +2494,20 @@ void mmc_rescan(struct work_struct *work)
|
||||
|
||||
mmc_claim_host(host);
|
||||
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
|
||||
if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
|
||||
if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) {
|
||||
extend_wakelock = true;
|
||||
break;
|
||||
}
|
||||
if (freqs[i] <= host->f_min)
|
||||
break;
|
||||
}
|
||||
mmc_release_host(host);
|
||||
|
||||
out:
|
||||
if (extend_wakelock)
|
||||
wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2);
|
||||
else
|
||||
wake_unlock(&mmc_delayed_work_wake_lock);
|
||||
if (host->caps & MMC_CAP_NEEDS_POLL)
|
||||
mmc_schedule_delayed_work(&host->detect, HZ);
|
||||
}
|
||||
@@ -2712,6 +2742,22 @@ void mmc_init_context_info(struct mmc_host *host)
|
||||
init_waitqueue_head(&host->context_info.wait);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMC_EMBEDDED_SDIO
|
||||
void mmc_set_embedded_sdio_data(struct mmc_host *host,
|
||||
struct sdio_cis *cis,
|
||||
struct sdio_cccr *cccr,
|
||||
struct sdio_embedded_func *funcs,
|
||||
int num_funcs)
|
||||
{
|
||||
host->embedded_sdio_data.cis = cis;
|
||||
host->embedded_sdio_data.cccr = cccr;
|
||||
host->embedded_sdio_data.funcs = funcs;
|
||||
host->embedded_sdio_data.num_funcs = num_funcs;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
|
||||
#endif
|
||||
|
||||
static int __init mmc_init(void)
|
||||
{
|
||||
int ret;
|
||||
@@ -2720,6 +2766,9 @@ static int __init mmc_init(void)
|
||||
if (!workqueue)
|
||||
return -ENOMEM;
|
||||
|
||||
wake_lock_init(&mmc_delayed_work_wake_lock, WAKE_LOCK_SUSPEND,
|
||||
"mmc_delayed_work");
|
||||
|
||||
ret = mmc_register_bus();
|
||||
if (ret)
|
||||
goto destroy_workqueue;
|
||||
@@ -2740,6 +2789,7 @@ unregister_bus:
|
||||
mmc_unregister_bus();
|
||||
destroy_workqueue:
|
||||
destroy_workqueue(workqueue);
|
||||
wake_lock_destroy(&mmc_delayed_work_wake_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -2750,6 +2800,7 @@ static void __exit mmc_exit(void)
|
||||
mmc_unregister_host_class();
|
||||
mmc_unregister_bus();
|
||||
destroy_workqueue(workqueue);
|
||||
wake_lock_destroy(&mmc_delayed_work_wake_lock);
|
||||
}
|
||||
|
||||
subsys_initcall(mmc_init);
|
||||
|
||||
@@ -533,7 +533,8 @@ int mmc_add_host(struct mmc_host *host)
|
||||
mmc_host_clk_sysfs_init(host);
|
||||
|
||||
mmc_start_host(host);
|
||||
register_pm_notifier(&host->pm_notify);
|
||||
if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
|
||||
register_pm_notifier(&host->pm_notify);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -550,7 +551,9 @@ EXPORT_SYMBOL(mmc_add_host);
|
||||
*/
|
||||
void mmc_remove_host(struct mmc_host *host)
|
||||
{
|
||||
unregister_pm_notifier(&host->pm_notify);
|
||||
if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
|
||||
unregister_pm_notifier(&host->pm_notify);
|
||||
|
||||
mmc_stop_host(host);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
@@ -823,6 +823,9 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
|
||||
bool reinit)
|
||||
{
|
||||
int err;
|
||||
#ifdef CONFIG_MMC_PARANOID_SD_INIT
|
||||
int retries;
|
||||
#endif
|
||||
|
||||
if (!reinit) {
|
||||
/*
|
||||
@@ -849,7 +852,26 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
|
||||
/*
|
||||
* Fetch switch information from card.
|
||||
*/
|
||||
#ifdef CONFIG_MMC_PARANOID_SD_INIT
|
||||
for (retries = 1; retries <= 3; retries++) {
|
||||
err = mmc_read_switch(card);
|
||||
if (!err) {
|
||||
if (retries > 1) {
|
||||
printk(KERN_WARNING
|
||||
"%s: recovered\n",
|
||||
mmc_hostname(host));
|
||||
}
|
||||
break;
|
||||
} else {
|
||||
printk(KERN_WARNING
|
||||
"%s: read switch failed (attempt %d)\n",
|
||||
mmc_hostname(host), retries);
|
||||
}
|
||||
}
|
||||
#else
|
||||
err = mmc_read_switch(card);
|
||||
#endif
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@@ -1051,7 +1073,10 @@ static int mmc_sd_alive(struct mmc_host *host)
|
||||
*/
|
||||
static void mmc_sd_detect(struct mmc_host *host)
|
||||
{
|
||||
int err;
|
||||
int err = 0;
|
||||
#ifdef CONFIG_MMC_PARANOID_SD_INIT
|
||||
int retries = 5;
|
||||
#endif
|
||||
|
||||
BUG_ON(!host);
|
||||
BUG_ON(!host->card);
|
||||
@@ -1061,7 +1086,23 @@ static void mmc_sd_detect(struct mmc_host *host)
|
||||
/*
|
||||
* Just check if our card has been removed.
|
||||
*/
|
||||
#ifdef CONFIG_MMC_PARANOID_SD_INIT
|
||||
while(retries) {
|
||||
err = mmc_send_status(host->card, NULL);
|
||||
if (err) {
|
||||
retries--;
|
||||
udelay(5);
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (!retries) {
|
||||
printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
|
||||
__func__, mmc_hostname(host), err);
|
||||
}
|
||||
#else
|
||||
err = _mmc_detect_card_removed(host);
|
||||
#endif
|
||||
|
||||
mmc_put_card(host->card);
|
||||
|
||||
@@ -1123,6 +1164,9 @@ static int mmc_sd_suspend(struct mmc_host *host)
|
||||
static int _mmc_sd_resume(struct mmc_host *host)
|
||||
{
|
||||
int err = 0;
|
||||
#ifdef CONFIG_MMC_PARANOID_SD_INIT
|
||||
int retries;
|
||||
#endif
|
||||
|
||||
BUG_ON(!host);
|
||||
BUG_ON(!host->card);
|
||||
@@ -1133,7 +1177,23 @@ static int _mmc_sd_resume(struct mmc_host *host)
|
||||
goto out;
|
||||
|
||||
mmc_power_up(host, host->card->ocr);
|
||||
#ifdef CONFIG_MMC_PARANOID_SD_INIT
|
||||
retries = 5;
|
||||
while (retries) {
|
||||
err = mmc_sd_init_card(host, host->card->ocr, host->card);
|
||||
|
||||
if (err) {
|
||||
printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
|
||||
mmc_hostname(host), err, retries);
|
||||
mdelay(5);
|
||||
retries--;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
#else
|
||||
err = mmc_sd_init_card(host, host->card->ocr, host->card);
|
||||
#endif
|
||||
mmc_card_clr_suspended(host->card);
|
||||
|
||||
out:
|
||||
@@ -1246,6 +1306,9 @@ int mmc_attach_sd(struct mmc_host *host)
|
||||
{
|
||||
int err;
|
||||
u32 ocr, rocr;
|
||||
#ifdef CONFIG_MMC_PARANOID_SD_INIT
|
||||
int retries;
|
||||
#endif
|
||||
|
||||
BUG_ON(!host);
|
||||
WARN_ON(!host->claimed);
|
||||
@@ -1282,9 +1345,27 @@ int mmc_attach_sd(struct mmc_host *host)
|
||||
/*
|
||||
* Detect and init the card.
|
||||
*/
|
||||
#ifdef CONFIG_MMC_PARANOID_SD_INIT
|
||||
retries = 5;
|
||||
while (retries) {
|
||||
err = mmc_sd_init_card(host, rocr, NULL);
|
||||
if (err) {
|
||||
retries--;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!retries) {
|
||||
printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
|
||||
mmc_hostname(host), err);
|
||||
goto err;
|
||||
}
|
||||
#else
|
||||
err = mmc_sd_init_card(host, rocr, NULL);
|
||||
if (err)
|
||||
goto err;
|
||||
#endif
|
||||
|
||||
mmc_release_host(host);
|
||||
err = mmc_add_card(host->card);
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include <linux/mmc/host.h>
|
||||
@@ -28,6 +29,10 @@
|
||||
#include "sdio_ops.h"
|
||||
#include "sdio_cis.h"
|
||||
|
||||
#ifdef CONFIG_MMC_EMBEDDED_SDIO
|
||||
#include <linux/mmc/sdio_ids.h>
|
||||
#endif
|
||||
|
||||
static int sdio_read_fbr(struct sdio_func *func)
|
||||
{
|
||||
int ret;
|
||||
@@ -740,19 +745,35 @@ try_again:
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the common registers.
|
||||
*/
|
||||
err = sdio_read_cccr(card, ocr);
|
||||
if (err)
|
||||
goto remove;
|
||||
#ifdef CONFIG_MMC_EMBEDDED_SDIO
|
||||
if (host->embedded_sdio_data.cccr)
|
||||
memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
|
||||
else {
|
||||
#endif
|
||||
/*
|
||||
* Read the common registers.
|
||||
*/
|
||||
err = sdio_read_cccr(card, ocr);
|
||||
if (err)
|
||||
goto remove;
|
||||
#ifdef CONFIG_MMC_EMBEDDED_SDIO
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Read the common CIS tuples.
|
||||
*/
|
||||
err = sdio_read_common_cis(card);
|
||||
if (err)
|
||||
goto remove;
|
||||
#ifdef CONFIG_MMC_EMBEDDED_SDIO
|
||||
if (host->embedded_sdio_data.cis)
|
||||
memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
|
||||
else {
|
||||
#endif
|
||||
/*
|
||||
* Read the common CIS tuples.
|
||||
*/
|
||||
err = sdio_read_common_cis(card);
|
||||
if (err)
|
||||
goto remove;
|
||||
#ifdef CONFIG_MMC_EMBEDDED_SDIO
|
||||
}
|
||||
#endif
|
||||
|
||||
if (oldcard) {
|
||||
int same = (card->cis.vendor == oldcard->cis.vendor &&
|
||||
@@ -1177,14 +1198,36 @@ int mmc_attach_sdio(struct mmc_host *host)
|
||||
funcs = (ocr & 0x70000000) >> 28;
|
||||
card->sdio_funcs = 0;
|
||||
|
||||
#ifdef CONFIG_MMC_EMBEDDED_SDIO
|
||||
if (host->embedded_sdio_data.funcs)
|
||||
card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize (but don't add) all present functions.
|
||||
*/
|
||||
for (i = 0; i < funcs; i++, card->sdio_funcs++) {
|
||||
err = sdio_init_func(host->card, i + 1);
|
||||
if (err)
|
||||
goto remove;
|
||||
#ifdef CONFIG_MMC_EMBEDDED_SDIO
|
||||
if (host->embedded_sdio_data.funcs) {
|
||||
struct sdio_func *tmp;
|
||||
|
||||
tmp = sdio_alloc_func(host->card);
|
||||
if (IS_ERR(tmp))
|
||||
goto remove;
|
||||
tmp->num = (i + 1);
|
||||
card->sdio_func[i] = tmp;
|
||||
tmp->class = host->embedded_sdio_data.funcs[i].f_class;
|
||||
tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
|
||||
tmp->vendor = card->cis.vendor;
|
||||
tmp->device = card->cis.device;
|
||||
} else {
|
||||
#endif
|
||||
err = sdio_init_func(host->card, i + 1);
|
||||
if (err)
|
||||
goto remove;
|
||||
#ifdef CONFIG_MMC_EMBEDDED_SDIO
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Enable Runtime PM for this func (if supported)
|
||||
*/
|
||||
@@ -1232,3 +1275,40 @@ err:
|
||||
return err;
|
||||
}
|
||||
|
||||
int sdio_reset_comm(struct mmc_card *card)
|
||||
{
|
||||
struct mmc_host *host = card->host;
|
||||
u32 ocr;
|
||||
u32 rocr;
|
||||
int err;
|
||||
|
||||
printk("%s():\n", __func__);
|
||||
mmc_claim_host(host);
|
||||
|
||||
mmc_go_idle(host);
|
||||
|
||||
mmc_set_clock(host, host->f_min);
|
||||
|
||||
err = mmc_send_io_op_cond(host, 0, &ocr);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
rocr = mmc_select_voltage(host, ocr);
|
||||
if (!rocr) {
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
err = mmc_sdio_init_card(host, rocr, card, 0);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
mmc_release_host(host);
|
||||
return 0;
|
||||
err:
|
||||
printk("%s: Error resetting SDIO communications (%d)\n",
|
||||
mmc_hostname(host), err);
|
||||
mmc_release_host(host);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(sdio_reset_comm);
|
||||
|
||||
@@ -25,6 +25,10 @@
|
||||
#include "sdio_cis.h"
|
||||
#include "sdio_bus.h"
|
||||
|
||||
#ifdef CONFIG_MMC_EMBEDDED_SDIO
|
||||
#include <linux/mmc/host.h>
|
||||
#endif
|
||||
|
||||
/* show configuration fields */
|
||||
#define sdio_config_attr(field, format_string) \
|
||||
static ssize_t \
|
||||
@@ -273,7 +277,14 @@ static void sdio_release_func(struct device *dev)
|
||||
{
|
||||
struct sdio_func *func = dev_to_sdio_func(dev);
|
||||
|
||||
sdio_free_func_cis(func);
|
||||
#ifdef CONFIG_MMC_EMBEDDED_SDIO
|
||||
/*
|
||||
* If this device is embedded then we never allocated
|
||||
* cis tables for this func
|
||||
*/
|
||||
if (!func->card->host->embedded_sdio_data.funcs)
|
||||
#endif
|
||||
sdio_free_func_cis(func);
|
||||
|
||||
kfree(func->info);
|
||||
|
||||
|
||||
33
drivers/mmc/core/sdio_io.c
Normal file → Executable file
33
drivers/mmc/core/sdio_io.c
Normal file → Executable file
@@ -383,6 +383,39 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sdio_readb);
|
||||
|
||||
/**
|
||||
* sdio_readb_ext - read a single byte from a SDIO function
|
||||
* @func: SDIO function to access
|
||||
* @addr: address to read
|
||||
* @err_ret: optional status value from transfer
|
||||
* @in: value to add to argument
|
||||
*
|
||||
* Reads a single byte from the address space of a given SDIO
|
||||
* function. If there is a problem reading the address, 0xff
|
||||
* is returned and @err_ret will contain the error code.
|
||||
*/
|
||||
unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr,
|
||||
int *err_ret, unsigned in)
|
||||
{
|
||||
int ret;
|
||||
unsigned char val;
|
||||
|
||||
BUG_ON(!func);
|
||||
|
||||
if (err_ret)
|
||||
*err_ret = 0;
|
||||
|
||||
ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val);
|
||||
if (ret) {
|
||||
if (err_ret)
|
||||
*err_ret = ret;
|
||||
return 0xFF;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sdio_readb_ext);
|
||||
|
||||
/**
|
||||
* sdio_writeb - write a single byte to a SDIO function
|
||||
* @func: SDIO function to access
|
||||
|
||||
@@ -1,3 +1,10 @@
|
||||
config MTD_NAND_IDS
|
||||
tristate "Include chip ids for known NAND devices."
|
||||
depends on MTD
|
||||
help
|
||||
Useful for NAND drivers that do not use the NAND subsystem but
|
||||
still like to take advantage of the known chip information.
|
||||
|
||||
config MTD_NAND_ECC
|
||||
tristate
|
||||
|
||||
@@ -106,9 +113,6 @@ config MTD_NAND_OMAP_BCH
|
||||
legacy OMAP families like OMAP2xxx, OMAP3xxx do not have ELM engine
|
||||
so they should not enable this config symbol.
|
||||
|
||||
config MTD_NAND_IDS
|
||||
tristate
|
||||
|
||||
config MTD_NAND_RICOH
|
||||
tristate "Ricoh xD card reader"
|
||||
default n
|
||||
|
||||
@@ -149,6 +149,23 @@ config PPPOL2TP
|
||||
tunnels. L2TP is replacing PPTP for VPN uses.
|
||||
if TTY
|
||||
|
||||
config PPPOLAC
|
||||
tristate "PPP on L2TP Access Concentrator"
|
||||
depends on PPP && INET
|
||||
help
|
||||
L2TP (RFC 2661) is a tunneling protocol widely used in virtual private
|
||||
networks. This driver handles L2TP data packets between a UDP socket
|
||||
and a PPP channel, but only permits one session per socket. Thus it is
|
||||
fairly simple and suited for clients.
|
||||
|
||||
config PPPOPNS
|
||||
tristate "PPP on PPTP Network Server"
|
||||
depends on PPP && INET
|
||||
help
|
||||
PPTP (RFC 2637) is a tunneling protocol widely used in virtual private
|
||||
networks. This driver handles PPTP data packets between a RAW socket
|
||||
and a PPP channel. It is fairly simple and easy to use.
|
||||
|
||||
config PPP_ASYNC
|
||||
tristate "PPP support for async serial ports"
|
||||
depends on PPP
|
||||
|
||||
@@ -11,3 +11,5 @@ obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
|
||||
obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
|
||||
obj-$(CONFIG_PPPOL2TP) += pppox.o
|
||||
obj-$(CONFIG_PPTP) += pppox.o pptp.o
|
||||
obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o
|
||||
obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o
|
||||
|
||||
449
drivers/net/ppp/pppolac.c
Normal file
449
drivers/net/ppp/pppolac.c
Normal file
@@ -0,0 +1,449 @@
|
||||
/* drivers/net/pppolac.c
|
||||
*
|
||||
* Driver for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
|
||||
*
|
||||
* Copyright (C) 2009 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
/* This driver handles L2TP data packets between a UDP socket and a PPP channel.
|
||||
* The socket must keep connected, and only one session per socket is permitted.
|
||||
* Sequencing of outgoing packets is controlled by LNS. Incoming packets with
|
||||
* sequences are reordered within a sliding window of one second. Currently
|
||||
* reordering only happens when a packet is received. It is done for simplicity
|
||||
* since no additional locks or threads are required. This driver only works on
|
||||
* IPv4 due to the lack of UDP encapsulation support in IPv6. */
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/ppp_defs.h>
|
||||
#include <linux/if_ppp.h>
|
||||
#include <linux/if_pppox.h>
|
||||
#include <linux/ppp_channel.h>
|
||||
#include <net/tcp_states.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#define L2TP_CONTROL_BIT 0x80
|
||||
#define L2TP_LENGTH_BIT 0x40
|
||||
#define L2TP_SEQUENCE_BIT 0x08
|
||||
#define L2TP_OFFSET_BIT 0x02
|
||||
#define L2TP_VERSION 0x02
|
||||
#define L2TP_VERSION_MASK 0x0F
|
||||
|
||||
#define PPP_ADDR 0xFF
|
||||
#define PPP_CTRL 0x03
|
||||
|
||||
union unaligned {
|
||||
__u32 u32;
|
||||
} __attribute__((packed));
|
||||
|
||||
static inline union unaligned *unaligned(void *ptr)
|
||||
{
|
||||
return (union unaligned *)ptr;
|
||||
}
|
||||
|
||||
struct meta {
|
||||
__u32 sequence;
|
||||
__u32 timestamp;
|
||||
};
|
||||
|
||||
static inline struct meta *skb_meta(struct sk_buff *skb)
|
||||
{
|
||||
return (struct meta *)skb->cb;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
|
||||
{
|
||||
struct sock *sk = (struct sock *)sk_udp->sk_user_data;
|
||||
struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac;
|
||||
struct meta *meta = skb_meta(skb);
|
||||
__u32 now = jiffies;
|
||||
__u8 bits;
|
||||
__u8 *ptr;
|
||||
|
||||
/* Drop the packet if L2TP header is missing. */
|
||||
if (skb->len < sizeof(struct udphdr) + 6)
|
||||
goto drop;
|
||||
|
||||
/* Put it back if it is a control packet. */
|
||||
if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT)
|
||||
return opt->backlog_rcv(sk_udp, skb);
|
||||
|
||||
/* Skip UDP header. */
|
||||
skb_pull(skb, sizeof(struct udphdr));
|
||||
|
||||
/* Check the version. */
|
||||
if ((skb->data[1] & L2TP_VERSION_MASK) != L2TP_VERSION)
|
||||
goto drop;
|
||||
bits = skb->data[0];
|
||||
ptr = &skb->data[2];
|
||||
|
||||
/* Check the length if it is present. */
|
||||
if (bits & L2TP_LENGTH_BIT) {
|
||||
if ((ptr[0] << 8 | ptr[1]) != skb->len)
|
||||
goto drop;
|
||||
ptr += 2;
|
||||
}
|
||||
|
||||
/* Skip all fields including optional ones. */
|
||||
if (!skb_pull(skb, 6 + (bits & L2TP_SEQUENCE_BIT ? 4 : 0) +
|
||||
(bits & L2TP_LENGTH_BIT ? 2 : 0) +
|
||||
(bits & L2TP_OFFSET_BIT ? 2 : 0)))
|
||||
goto drop;
|
||||
|
||||
/* Skip the offset padding if it is present. */
|
||||
if (bits & L2TP_OFFSET_BIT &&
|
||||
!skb_pull(skb, skb->data[-2] << 8 | skb->data[-1]))
|
||||
goto drop;
|
||||
|
||||
/* Check the tunnel and the session. */
|
||||
if (unaligned(ptr)->u32 != opt->local)
|
||||
goto drop;
|
||||
|
||||
/* Check the sequence if it is present. */
|
||||
if (bits & L2TP_SEQUENCE_BIT) {
|
||||
meta->sequence = ptr[4] << 8 | ptr[5];
|
||||
if ((__s16)(meta->sequence - opt->recv_sequence) < 0)
|
||||
goto drop;
|
||||
}
|
||||
|
||||
/* Skip PPP address and control if they are present. */
|
||||
if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
|
||||
skb->data[1] == PPP_CTRL)
|
||||
skb_pull(skb, 2);
|
||||
|
||||
/* Fix PPP protocol if it is compressed. */
|
||||
if (skb->len >= 1 && skb->data[0] & 1)
|
||||
skb_push(skb, 1)[0] = 0;
|
||||
|
||||
/* Drop the packet if PPP protocol is missing. */
|
||||
if (skb->len < 2)
|
||||
goto drop;
|
||||
|
||||
/* Perform reordering if sequencing is enabled. */
|
||||
atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT);
|
||||
if (bits & L2TP_SEQUENCE_BIT) {
|
||||
struct sk_buff *skb1;
|
||||
|
||||
/* Insert the packet into receive queue in order. */
|
||||
skb_set_owner_r(skb, sk);
|
||||
skb_queue_walk(&sk->sk_receive_queue, skb1) {
|
||||
struct meta *meta1 = skb_meta(skb1);
|
||||
__s16 order = meta->sequence - meta1->sequence;
|
||||
if (order == 0)
|
||||
goto drop;
|
||||
if (order < 0) {
|
||||
meta->timestamp = meta1->timestamp;
|
||||
skb_insert(skb1, skb, &sk->sk_receive_queue);
|
||||
skb = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (skb) {
|
||||
meta->timestamp = now;
|
||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||
}
|
||||
|
||||
/* Remove packets from receive queue as long as
|
||||
* 1. the receive buffer is full,
|
||||
* 2. they are queued longer than one second, or
|
||||
* 3. there are no missing packets before them. */
|
||||
skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
|
||||
meta = skb_meta(skb);
|
||||
if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
|
||||
now - meta->timestamp < HZ &&
|
||||
meta->sequence != opt->recv_sequence)
|
||||
break;
|
||||
skb_unlink(skb, &sk->sk_receive_queue);
|
||||
opt->recv_sequence = (__u16)(meta->sequence + 1);
|
||||
skb_orphan(skb);
|
||||
ppp_input(&pppox_sk(sk)->chan, skb);
|
||||
}
|
||||
return NET_RX_SUCCESS;
|
||||
}
|
||||
|
||||
/* Flush receive queue if sequencing is disabled. */
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
skb_orphan(skb);
|
||||
ppp_input(&pppox_sk(sk)->chan, skb);
|
||||
return NET_RX_SUCCESS;
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb)
|
||||
{
|
||||
sock_hold(sk_udp);
|
||||
sk_receive_skb(sk_udp, skb, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct sk_buff_head delivery_queue;
|
||||
|
||||
static void pppolac_xmit_core(struct work_struct *delivery_work)
|
||||
{
|
||||
mm_segment_t old_fs = get_fs();
|
||||
struct sk_buff *skb;
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
while ((skb = skb_dequeue(&delivery_queue))) {
|
||||
struct sock *sk_udp = skb->sk;
|
||||
struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
|
||||
struct msghdr msg = {
|
||||
.msg_iov = (struct iovec *)&iov,
|
||||
.msg_iovlen = 1,
|
||||
.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
|
||||
};
|
||||
sk_udp->sk_prot->sendmsg(NULL, sk_udp, &msg, skb->len);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
set_fs(old_fs);
|
||||
}
|
||||
|
||||
static DECLARE_WORK(delivery_work, pppolac_xmit_core);
|
||||
|
||||
static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
||||
{
|
||||
struct sock *sk_udp = (struct sock *)chan->private;
|
||||
struct pppolac_opt *opt = &pppox_sk(sk_udp->sk_user_data)->proto.lac;
|
||||
|
||||
/* Install PPP address and control. */
|
||||
skb_push(skb, 2);
|
||||
skb->data[0] = PPP_ADDR;
|
||||
skb->data[1] = PPP_CTRL;
|
||||
|
||||
/* Install L2TP header. */
|
||||
if (atomic_read(&opt->sequencing)) {
|
||||
skb_push(skb, 10);
|
||||
skb->data[0] = L2TP_SEQUENCE_BIT;
|
||||
skb->data[6] = opt->xmit_sequence >> 8;
|
||||
skb->data[7] = opt->xmit_sequence;
|
||||
skb->data[8] = 0;
|
||||
skb->data[9] = 0;
|
||||
opt->xmit_sequence++;
|
||||
} else {
|
||||
skb_push(skb, 6);
|
||||
skb->data[0] = 0;
|
||||
}
|
||||
skb->data[1] = L2TP_VERSION;
|
||||
unaligned(&skb->data[2])->u32 = opt->remote;
|
||||
|
||||
/* Now send the packet via the delivery queue. */
|
||||
skb_set_owner_w(skb, sk_udp);
|
||||
skb_queue_tail(&delivery_queue, skb);
|
||||
schedule_work(&delivery_work);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static struct ppp_channel_ops pppolac_channel_ops = {
|
||||
.start_xmit = pppolac_xmit,
|
||||
};
|
||||
|
||||
static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr,
|
||||
int addrlen, int flags)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct pppox_sock *po = pppox_sk(sk);
|
||||
struct sockaddr_pppolac *addr = (struct sockaddr_pppolac *)useraddr;
|
||||
struct socket *sock_udp = NULL;
|
||||
struct sock *sk_udp;
|
||||
int error;
|
||||
|
||||
if (addrlen != sizeof(struct sockaddr_pppolac) ||
|
||||
!addr->local.tunnel || !addr->local.session ||
|
||||
!addr->remote.tunnel || !addr->remote.session) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
lock_sock(sk);
|
||||
error = -EALREADY;
|
||||
if (sk->sk_state != PPPOX_NONE)
|
||||
goto out;
|
||||
|
||||
sock_udp = sockfd_lookup(addr->udp_socket, &error);
|
||||
if (!sock_udp)
|
||||
goto out;
|
||||
sk_udp = sock_udp->sk;
|
||||
lock_sock(sk_udp);
|
||||
|
||||
/* Remove this check when IPv6 supports UDP encapsulation. */
|
||||
error = -EAFNOSUPPORT;
|
||||
if (sk_udp->sk_family != AF_INET)
|
||||
goto out;
|
||||
error = -EPROTONOSUPPORT;
|
||||
if (sk_udp->sk_protocol != IPPROTO_UDP)
|
||||
goto out;
|
||||
error = -EDESTADDRREQ;
|
||||
if (sk_udp->sk_state != TCP_ESTABLISHED)
|
||||
goto out;
|
||||
error = -EBUSY;
|
||||
if (udp_sk(sk_udp)->encap_type || sk_udp->sk_user_data)
|
||||
goto out;
|
||||
if (!sk_udp->sk_bound_dev_if) {
|
||||
struct dst_entry *dst = sk_dst_get(sk_udp);
|
||||
error = -ENODEV;
|
||||
if (!dst)
|
||||
goto out;
|
||||
sk_udp->sk_bound_dev_if = dst->dev->ifindex;
|
||||
dst_release(dst);
|
||||
}
|
||||
|
||||
po->chan.hdrlen = 12;
|
||||
po->chan.private = sk_udp;
|
||||
po->chan.ops = &pppolac_channel_ops;
|
||||
po->chan.mtu = PPP_MRU - 80;
|
||||
po->proto.lac.local = unaligned(&addr->local)->u32;
|
||||
po->proto.lac.remote = unaligned(&addr->remote)->u32;
|
||||
atomic_set(&po->proto.lac.sequencing, 1);
|
||||
po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv;
|
||||
|
||||
error = ppp_register_channel(&po->chan);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
sk->sk_state = PPPOX_CONNECTED;
|
||||
udp_sk(sk_udp)->encap_type = UDP_ENCAP_L2TPINUDP;
|
||||
udp_sk(sk_udp)->encap_rcv = pppolac_recv;
|
||||
sk_udp->sk_backlog_rcv = pppolac_recv_core;
|
||||
sk_udp->sk_user_data = sk;
|
||||
out:
|
||||
if (sock_udp) {
|
||||
release_sock(sk_udp);
|
||||
if (error)
|
||||
sockfd_put(sock_udp);
|
||||
}
|
||||
release_sock(sk);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int pppolac_release(struct socket *sock)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
|
||||
if (!sk)
|
||||
return 0;
|
||||
|
||||
lock_sock(sk);
|
||||
if (sock_flag(sk, SOCK_DEAD)) {
|
||||
release_sock(sk);
|
||||
return -EBADF;
|
||||
}
|
||||
|
||||
if (sk->sk_state != PPPOX_NONE) {
|
||||
struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private;
|
||||
lock_sock(sk_udp);
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
pppox_unbind_sock(sk);
|
||||
udp_sk(sk_udp)->encap_type = 0;
|
||||
udp_sk(sk_udp)->encap_rcv = NULL;
|
||||
sk_udp->sk_backlog_rcv = pppox_sk(sk)->proto.lac.backlog_rcv;
|
||||
sk_udp->sk_user_data = NULL;
|
||||
release_sock(sk_udp);
|
||||
sockfd_put(sk_udp->sk_socket);
|
||||
}
|
||||
|
||||
sock_orphan(sk);
|
||||
sock->sk = NULL;
|
||||
release_sock(sk);
|
||||
sock_put(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static struct proto pppolac_proto = {
|
||||
.name = "PPPOLAC",
|
||||
.owner = THIS_MODULE,
|
||||
.obj_size = sizeof(struct pppox_sock),
|
||||
};
|
||||
|
||||
static struct proto_ops pppolac_proto_ops = {
|
||||
.family = PF_PPPOX,
|
||||
.owner = THIS_MODULE,
|
||||
.release = pppolac_release,
|
||||
.bind = sock_no_bind,
|
||||
.connect = pppolac_connect,
|
||||
.socketpair = sock_no_socketpair,
|
||||
.accept = sock_no_accept,
|
||||
.getname = sock_no_getname,
|
||||
.poll = sock_no_poll,
|
||||
.ioctl = pppox_ioctl,
|
||||
.listen = sock_no_listen,
|
||||
.shutdown = sock_no_shutdown,
|
||||
.setsockopt = sock_no_setsockopt,
|
||||
.getsockopt = sock_no_getsockopt,
|
||||
.sendmsg = sock_no_sendmsg,
|
||||
.recvmsg = sock_no_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
};
|
||||
|
||||
static int pppolac_create(struct net *net, struct socket *sock)
|
||||
{
|
||||
struct sock *sk;
|
||||
|
||||
sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppolac_proto);
|
||||
if (!sk)
|
||||
return -ENOMEM;
|
||||
|
||||
sock_init_data(sock, sk);
|
||||
sock->state = SS_UNCONNECTED;
|
||||
sock->ops = &pppolac_proto_ops;
|
||||
sk->sk_protocol = PX_PROTO_OLAC;
|
||||
sk->sk_state = PPPOX_NONE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static struct pppox_proto pppolac_pppox_proto = {
|
||||
.create = pppolac_create,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init pppolac_init(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = proto_register(&pppolac_proto, 0);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = register_pppox_proto(PX_PROTO_OLAC, &pppolac_pppox_proto);
|
||||
if (error)
|
||||
proto_unregister(&pppolac_proto);
|
||||
else
|
||||
skb_queue_head_init(&delivery_queue);
|
||||
return error;
|
||||
}
|
||||
|
||||
static void __exit pppolac_exit(void)
|
||||
{
|
||||
unregister_pppox_proto(PX_PROTO_OLAC);
|
||||
proto_unregister(&pppolac_proto);
|
||||
}
|
||||
|
||||
module_init(pppolac_init);
|
||||
module_exit(pppolac_exit);
|
||||
|
||||
MODULE_DESCRIPTION("PPP on L2TP Access Concentrator (PPPoLAC)");
|
||||
MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
428
drivers/net/ppp/pppopns.c
Normal file
428
drivers/net/ppp/pppopns.c
Normal file
@@ -0,0 +1,428 @@
|
||||
/* drivers/net/pppopns.c
|
||||
*
|
||||
* Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
|
||||
*
|
||||
* Copyright (C) 2009 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
/* This driver handles PPTP data packets between a RAW socket and a PPP channel.
|
||||
* The socket is created in the kernel space and connected to the same address
|
||||
* of the control socket. Outgoing packets are always sent with sequences but
|
||||
* without acknowledgements. Incoming packets with sequences are reordered
|
||||
* within a sliding window of one second. Currently reordering only happens when
|
||||
* a packet is received. It is done for simplicity since no additional locks or
|
||||
* threads are required. This driver should work on both IPv4 and IPv6. */
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/ppp_defs.h>
|
||||
#include <linux/if.h>
|
||||
#include <linux/if_ppp.h>
|
||||
#include <linux/if_pppox.h>
|
||||
#include <linux/ppp_channel.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#define GRE_HEADER_SIZE 8
|
||||
|
||||
#define PPTP_GRE_BITS htons(0x2001)
|
||||
#define PPTP_GRE_BITS_MASK htons(0xEF7F)
|
||||
#define PPTP_GRE_SEQ_BIT htons(0x1000)
|
||||
#define PPTP_GRE_ACK_BIT htons(0x0080)
|
||||
#define PPTP_GRE_TYPE htons(0x880B)
|
||||
|
||||
#define PPP_ADDR 0xFF
|
||||
#define PPP_CTRL 0x03
|
||||
|
||||
struct header {
|
||||
__u16 bits;
|
||||
__u16 type;
|
||||
__u16 length;
|
||||
__u16 call;
|
||||
__u32 sequence;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct meta {
|
||||
__u32 sequence;
|
||||
__u32 timestamp;
|
||||
};
|
||||
|
||||
static inline struct meta *skb_meta(struct sk_buff *skb)
|
||||
{
|
||||
return (struct meta *)skb->cb;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
|
||||
{
|
||||
struct sock *sk = (struct sock *)sk_raw->sk_user_data;
|
||||
struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns;
|
||||
struct meta *meta = skb_meta(skb);
|
||||
__u32 now = jiffies;
|
||||
struct header *hdr;
|
||||
|
||||
/* Skip transport header */
|
||||
skb_pull(skb, skb_transport_header(skb) - skb->data);
|
||||
|
||||
/* Drop the packet if GRE header is missing. */
|
||||
if (skb->len < GRE_HEADER_SIZE)
|
||||
goto drop;
|
||||
hdr = (struct header *)skb->data;
|
||||
|
||||
/* Check the header. */
|
||||
if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local ||
|
||||
(hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS)
|
||||
goto drop;
|
||||
|
||||
/* Skip all fields including optional ones. */
|
||||
if (!skb_pull(skb, GRE_HEADER_SIZE +
|
||||
(hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) +
|
||||
(hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0)))
|
||||
goto drop;
|
||||
|
||||
/* Check the length. */
|
||||
if (skb->len != ntohs(hdr->length))
|
||||
goto drop;
|
||||
|
||||
/* Check the sequence if it is present. */
|
||||
if (hdr->bits & PPTP_GRE_SEQ_BIT) {
|
||||
meta->sequence = ntohl(hdr->sequence);
|
||||
if ((__s32)(meta->sequence - opt->recv_sequence) < 0)
|
||||
goto drop;
|
||||
}
|
||||
|
||||
/* Skip PPP address and control if they are present. */
|
||||
if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
|
||||
skb->data[1] == PPP_CTRL)
|
||||
skb_pull(skb, 2);
|
||||
|
||||
/* Fix PPP protocol if it is compressed. */
|
||||
if (skb->len >= 1 && skb->data[0] & 1)
|
||||
skb_push(skb, 1)[0] = 0;
|
||||
|
||||
/* Drop the packet if PPP protocol is missing. */
|
||||
if (skb->len < 2)
|
||||
goto drop;
|
||||
|
||||
/* Perform reordering if sequencing is enabled. */
|
||||
if (hdr->bits & PPTP_GRE_SEQ_BIT) {
|
||||
struct sk_buff *skb1;
|
||||
|
||||
/* Insert the packet into receive queue in order. */
|
||||
skb_set_owner_r(skb, sk);
|
||||
skb_queue_walk(&sk->sk_receive_queue, skb1) {
|
||||
struct meta *meta1 = skb_meta(skb1);
|
||||
__s32 order = meta->sequence - meta1->sequence;
|
||||
if (order == 0)
|
||||
goto drop;
|
||||
if (order < 0) {
|
||||
meta->timestamp = meta1->timestamp;
|
||||
skb_insert(skb1, skb, &sk->sk_receive_queue);
|
||||
skb = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (skb) {
|
||||
meta->timestamp = now;
|
||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||
}
|
||||
|
||||
/* Remove packets from receive queue as long as
|
||||
* 1. the receive buffer is full,
|
||||
* 2. they are queued longer than one second, or
|
||||
* 3. there are no missing packets before them. */
|
||||
skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
|
||||
meta = skb_meta(skb);
|
||||
if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
|
||||
now - meta->timestamp < HZ &&
|
||||
meta->sequence != opt->recv_sequence)
|
||||
break;
|
||||
skb_unlink(skb, &sk->sk_receive_queue);
|
||||
opt->recv_sequence = meta->sequence + 1;
|
||||
skb_orphan(skb);
|
||||
ppp_input(&pppox_sk(sk)->chan, skb);
|
||||
}
|
||||
return NET_RX_SUCCESS;
|
||||
}
|
||||
|
||||
/* Flush receive queue if sequencing is disabled. */
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
skb_orphan(skb);
|
||||
ppp_input(&pppox_sk(sk)->chan, skb);
|
||||
return NET_RX_SUCCESS;
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
static void pppopns_recv(struct sock *sk_raw, int length)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) {
|
||||
sock_hold(sk_raw);
|
||||
sk_receive_skb(sk_raw, skb, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static struct sk_buff_head delivery_queue;
|
||||
|
||||
static void pppopns_xmit_core(struct work_struct *delivery_work)
|
||||
{
|
||||
mm_segment_t old_fs = get_fs();
|
||||
struct sk_buff *skb;
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
while ((skb = skb_dequeue(&delivery_queue))) {
|
||||
struct sock *sk_raw = skb->sk;
|
||||
struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
|
||||
struct msghdr msg = {
|
||||
.msg_iov = (struct iovec *)&iov,
|
||||
.msg_iovlen = 1,
|
||||
.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
|
||||
};
|
||||
sk_raw->sk_prot->sendmsg(NULL, sk_raw, &msg, skb->len);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
set_fs(old_fs);
|
||||
}
|
||||
|
||||
static DECLARE_WORK(delivery_work, pppopns_xmit_core);
|
||||
|
||||
static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
||||
{
|
||||
struct sock *sk_raw = (struct sock *)chan->private;
|
||||
struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns;
|
||||
struct header *hdr;
|
||||
__u16 length;
|
||||
|
||||
/* Install PPP address and control. */
|
||||
skb_push(skb, 2);
|
||||
skb->data[0] = PPP_ADDR;
|
||||
skb->data[1] = PPP_CTRL;
|
||||
length = skb->len;
|
||||
|
||||
/* Install PPTP GRE header. */
|
||||
hdr = (struct header *)skb_push(skb, 12);
|
||||
hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT;
|
||||
hdr->type = PPTP_GRE_TYPE;
|
||||
hdr->length = htons(length);
|
||||
hdr->call = opt->remote;
|
||||
hdr->sequence = htonl(opt->xmit_sequence);
|
||||
opt->xmit_sequence++;
|
||||
|
||||
/* Now send the packet via the delivery queue. */
|
||||
skb_set_owner_w(skb, sk_raw);
|
||||
skb_queue_tail(&delivery_queue, skb);
|
||||
schedule_work(&delivery_work);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static struct ppp_channel_ops pppopns_channel_ops = {
|
||||
.start_xmit = pppopns_xmit,
|
||||
};
|
||||
|
||||
static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr,
|
||||
int addrlen, int flags)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct pppox_sock *po = pppox_sk(sk);
|
||||
struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr;
|
||||
struct sockaddr_storage ss;
|
||||
struct socket *sock_tcp = NULL;
|
||||
struct socket *sock_raw = NULL;
|
||||
struct sock *sk_tcp;
|
||||
struct sock *sk_raw;
|
||||
int error;
|
||||
|
||||
if (addrlen != sizeof(struct sockaddr_pppopns))
|
||||
return -EINVAL;
|
||||
|
||||
lock_sock(sk);
|
||||
error = -EALREADY;
|
||||
if (sk->sk_state != PPPOX_NONE)
|
||||
goto out;
|
||||
|
||||
sock_tcp = sockfd_lookup(addr->tcp_socket, &error);
|
||||
if (!sock_tcp)
|
||||
goto out;
|
||||
sk_tcp = sock_tcp->sk;
|
||||
error = -EPROTONOSUPPORT;
|
||||
if (sk_tcp->sk_protocol != IPPROTO_TCP)
|
||||
goto out;
|
||||
addrlen = sizeof(struct sockaddr_storage);
|
||||
error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen);
|
||||
if (error)
|
||||
goto out;
|
||||
if (!sk_tcp->sk_bound_dev_if) {
|
||||
struct dst_entry *dst = sk_dst_get(sk_tcp);
|
||||
error = -ENODEV;
|
||||
if (!dst)
|
||||
goto out;
|
||||
sk_tcp->sk_bound_dev_if = dst->dev->ifindex;
|
||||
dst_release(dst);
|
||||
}
|
||||
|
||||
error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw);
|
||||
if (error)
|
||||
goto out;
|
||||
sk_raw = sock_raw->sk;
|
||||
sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if;
|
||||
error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
po->chan.hdrlen = 14;
|
||||
po->chan.private = sk_raw;
|
||||
po->chan.ops = &pppopns_channel_ops;
|
||||
po->chan.mtu = PPP_MRU - 80;
|
||||
po->proto.pns.local = addr->local;
|
||||
po->proto.pns.remote = addr->remote;
|
||||
po->proto.pns.data_ready = sk_raw->sk_data_ready;
|
||||
po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv;
|
||||
|
||||
error = ppp_register_channel(&po->chan);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
sk->sk_state = PPPOX_CONNECTED;
|
||||
lock_sock(sk_raw);
|
||||
sk_raw->sk_data_ready = pppopns_recv;
|
||||
sk_raw->sk_backlog_rcv = pppopns_recv_core;
|
||||
sk_raw->sk_user_data = sk;
|
||||
release_sock(sk_raw);
|
||||
out:
|
||||
if (sock_tcp)
|
||||
sockfd_put(sock_tcp);
|
||||
if (error && sock_raw)
|
||||
sock_release(sock_raw);
|
||||
release_sock(sk);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int pppopns_release(struct socket *sock)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
|
||||
if (!sk)
|
||||
return 0;
|
||||
|
||||
lock_sock(sk);
|
||||
if (sock_flag(sk, SOCK_DEAD)) {
|
||||
release_sock(sk);
|
||||
return -EBADF;
|
||||
}
|
||||
|
||||
if (sk->sk_state != PPPOX_NONE) {
|
||||
struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private;
|
||||
lock_sock(sk_raw);
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
pppox_unbind_sock(sk);
|
||||
sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready;
|
||||
sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv;
|
||||
sk_raw->sk_user_data = NULL;
|
||||
release_sock(sk_raw);
|
||||
sock_release(sk_raw->sk_socket);
|
||||
}
|
||||
|
||||
sock_orphan(sk);
|
||||
sock->sk = NULL;
|
||||
release_sock(sk);
|
||||
sock_put(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static struct proto pppopns_proto = {
|
||||
.name = "PPPOPNS",
|
||||
.owner = THIS_MODULE,
|
||||
.obj_size = sizeof(struct pppox_sock),
|
||||
};
|
||||
|
||||
static struct proto_ops pppopns_proto_ops = {
|
||||
.family = PF_PPPOX,
|
||||
.owner = THIS_MODULE,
|
||||
.release = pppopns_release,
|
||||
.bind = sock_no_bind,
|
||||
.connect = pppopns_connect,
|
||||
.socketpair = sock_no_socketpair,
|
||||
.accept = sock_no_accept,
|
||||
.getname = sock_no_getname,
|
||||
.poll = sock_no_poll,
|
||||
.ioctl = pppox_ioctl,
|
||||
.listen = sock_no_listen,
|
||||
.shutdown = sock_no_shutdown,
|
||||
.setsockopt = sock_no_setsockopt,
|
||||
.getsockopt = sock_no_getsockopt,
|
||||
.sendmsg = sock_no_sendmsg,
|
||||
.recvmsg = sock_no_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
};
|
||||
|
||||
static int pppopns_create(struct net *net, struct socket *sock)
|
||||
{
|
||||
struct sock *sk;
|
||||
|
||||
sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto);
|
||||
if (!sk)
|
||||
return -ENOMEM;
|
||||
|
||||
sock_init_data(sock, sk);
|
||||
sock->state = SS_UNCONNECTED;
|
||||
sock->ops = &pppopns_proto_ops;
|
||||
sk->sk_protocol = PX_PROTO_OPNS;
|
||||
sk->sk_state = PPPOX_NONE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static struct pppox_proto pppopns_pppox_proto = {
|
||||
.create = pppopns_create,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init pppopns_init(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = proto_register(&pppopns_proto, 0);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto);
|
||||
if (error)
|
||||
proto_unregister(&pppopns_proto);
|
||||
else
|
||||
skb_queue_head_init(&delivery_queue);
|
||||
return error;
|
||||
}
|
||||
|
||||
static void __exit pppopns_exit(void)
|
||||
{
|
||||
unregister_pppox_proto(PX_PROTO_OPNS);
|
||||
proto_unregister(&pppopns_proto);
|
||||
}
|
||||
|
||||
module_init(pppopns_init);
|
||||
module_exit(pppopns_exit);
|
||||
|
||||
MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)");
|
||||
MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
@@ -1889,6 +1889,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned int ifindex;
|
||||
int ret;
|
||||
|
||||
#ifdef CONFIG_ANDROID_PARANOID_NETWORK
|
||||
if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) {
|
||||
return -EPERM;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
|
||||
if (copy_from_user(&ifr, argp, ifreq_len))
|
||||
return -EFAULT;
|
||||
|
||||
@@ -264,6 +264,11 @@ config MWL8K
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called mwl8k. If unsure, say N.
|
||||
|
||||
config WIFI_CONTROL_FUNC
|
||||
bool "Enable WiFi control function abstraction"
|
||||
help
|
||||
Enables Power/Reset/Carddetect function abstraction
|
||||
|
||||
source "drivers/net/wireless/ath/Kconfig"
|
||||
source "drivers/net/wireless/b43/Kconfig"
|
||||
source "drivers/net/wireless/b43legacy/Kconfig"
|
||||
|
||||
@@ -189,6 +189,10 @@ static struct device_attribute power_supply_attrs[] = {
|
||||
POWER_SUPPLY_ATTR(time_to_full_avg),
|
||||
POWER_SUPPLY_ATTR(type),
|
||||
POWER_SUPPLY_ATTR(scope),
|
||||
/* Local extensions */
|
||||
POWER_SUPPLY_ATTR(usb_hc),
|
||||
POWER_SUPPLY_ATTR(usb_otg),
|
||||
POWER_SUPPLY_ATTR(charge_enabled),
|
||||
/* Properties of type `const char *' */
|
||||
POWER_SUPPLY_ATTR(model_name),
|
||||
POWER_SUPPLY_ATTR(manufacturer),
|
||||
|
||||
@@ -20,6 +20,14 @@ config ANDROID_BINDER_IPC
|
||||
Android process, using Binder to identify, invoke and pass arguments
|
||||
between said processes.
|
||||
|
||||
config ANDROID_BINDER_IPC_32BIT
|
||||
bool
|
||||
default y
|
||||
depends on !64BIT && ANDROID_BINDER_IPC
|
||||
---help---
|
||||
Enable to support an old 32-bit Android user-space. Breaks the new
|
||||
Android user-space.
|
||||
|
||||
config ASHMEM
|
||||
bool "Enable the Anonymous Shared Memory Subsystem"
|
||||
default n
|
||||
@@ -64,6 +72,15 @@ config ANDROID_LOW_MEMORY_KILLER
|
||||
---help---
|
||||
Registers processes to be killed when memory is low
|
||||
|
||||
config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
|
||||
bool "Android Low Memory Killer: detect oom_adj values"
|
||||
depends on ANDROID_LOW_MEMORY_KILLER
|
||||
default y
|
||||
---help---
|
||||
Detect oom_adj values written to
|
||||
/sys/module/lowmemorykiller/parameters/adj and convert them
|
||||
to oom_score_adj values.
|
||||
|
||||
config ANDROID_INTF_ALARM_DEV
|
||||
bool "Android alarm driver"
|
||||
depends on RTC_CLASS
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
TODO:
|
||||
- checkpatch.pl cleanups
|
||||
- sparse fixes
|
||||
- rename files to be not so "generic"
|
||||
- make sure things build as modules properly
|
||||
- add proper arch dependencies as needed
|
||||
- audit userspace interfaces to make sure they are sane
|
||||
|
||||
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
|
||||
Brian Swetland <swetland@google.com>
|
||||
@@ -16,50 +16,10 @@
|
||||
#ifndef _LINUX_ANDROID_ALARM_H
|
||||
#define _LINUX_ANDROID_ALARM_H
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
enum android_alarm_type {
|
||||
/* return code bit numbers or set alarm arg */
|
||||
ANDROID_ALARM_RTC_WAKEUP,
|
||||
ANDROID_ALARM_RTC,
|
||||
ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
|
||||
ANDROID_ALARM_ELAPSED_REALTIME,
|
||||
ANDROID_ALARM_SYSTEMTIME,
|
||||
|
||||
ANDROID_ALARM_TYPE_COUNT,
|
||||
|
||||
/* return code bit numbers */
|
||||
/* ANDROID_ALARM_TIME_CHANGE = 16 */
|
||||
};
|
||||
|
||||
enum android_alarm_return_flags {
|
||||
ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
|
||||
ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
|
||||
ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
|
||||
1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
|
||||
ANDROID_ALARM_ELAPSED_REALTIME_MASK =
|
||||
1U << ANDROID_ALARM_ELAPSED_REALTIME,
|
||||
ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
|
||||
ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
|
||||
};
|
||||
|
||||
/* Disable alarm */
|
||||
#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
|
||||
|
||||
/* Ack last alarm and wait for next */
|
||||
#define ANDROID_ALARM_WAIT _IO('a', 1)
|
||||
|
||||
#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
|
||||
/* Set alarm */
|
||||
#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
|
||||
#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
|
||||
#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
|
||||
#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
|
||||
#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
|
||||
#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
|
||||
|
||||
#include "uapi/android_alarm.h"
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define ANDROID_ALARM_SET_COMPAT(type) ALARM_IOW(2, type, \
|
||||
|
||||
@@ -396,22 +396,14 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
}
|
||||
get_file(asma->file);
|
||||
|
||||
/*
|
||||
* XXX - Reworked to use shmem_zero_setup() instead of
|
||||
* shmem_set_file while we're in staging. -jstultz
|
||||
*/
|
||||
if (vma->vm_flags & VM_SHARED) {
|
||||
ret = shmem_zero_setup(vma);
|
||||
if (ret) {
|
||||
fput(asma->file);
|
||||
goto out;
|
||||
}
|
||||
if (vma->vm_flags & VM_SHARED)
|
||||
shmem_set_file(vma, asma->file);
|
||||
else {
|
||||
if (vma->vm_file)
|
||||
fput(vma->vm_file);
|
||||
vma->vm_file = asma->file;
|
||||
}
|
||||
|
||||
if (vma->vm_file)
|
||||
fput(vma->vm_file);
|
||||
vma->vm_file = asma->file;
|
||||
|
||||
out:
|
||||
mutex_unlock(&ashmem_mutex);
|
||||
return ret;
|
||||
|
||||
@@ -16,35 +16,7 @@
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/compat.h>
|
||||
|
||||
#define ASHMEM_NAME_LEN 256
|
||||
|
||||
#define ASHMEM_NAME_DEF "dev/ashmem"
|
||||
|
||||
/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
|
||||
#define ASHMEM_NOT_PURGED 0
|
||||
#define ASHMEM_WAS_PURGED 1
|
||||
|
||||
/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
|
||||
#define ASHMEM_IS_UNPINNED 0
|
||||
#define ASHMEM_IS_PINNED 1
|
||||
|
||||
struct ashmem_pin {
|
||||
__u32 offset; /* offset into region, in bytes, page-aligned */
|
||||
__u32 len; /* length forward from offset, in bytes, page-aligned */
|
||||
};
|
||||
|
||||
#define __ASHMEMIOC 0x77
|
||||
|
||||
#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
|
||||
#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
|
||||
#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
|
||||
#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
|
||||
#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
|
||||
#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
|
||||
#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
|
||||
#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
|
||||
#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
|
||||
#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
|
||||
#include "uapi/ashmem.h"
|
||||
|
||||
/* support of 32bit userspace on 64bit platforms */
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
@@ -37,6 +37,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pid_namespace.h>
|
||||
#include <linux/security.h>
|
||||
|
||||
#include "binder.h"
|
||||
#include "binder_trace.h"
|
||||
@@ -228,8 +229,8 @@ struct binder_node {
|
||||
int internal_strong_refs;
|
||||
int local_weak_refs;
|
||||
int local_strong_refs;
|
||||
void __user *ptr;
|
||||
void __user *cookie;
|
||||
binder_uintptr_t ptr;
|
||||
binder_uintptr_t cookie;
|
||||
unsigned has_strong_ref:1;
|
||||
unsigned pending_strong_ref:1;
|
||||
unsigned has_weak_ref:1;
|
||||
@@ -242,7 +243,7 @@ struct binder_node {
|
||||
|
||||
struct binder_ref_death {
|
||||
struct binder_work work;
|
||||
void __user *cookie;
|
||||
binder_uintptr_t cookie;
|
||||
};
|
||||
|
||||
struct binder_ref {
|
||||
@@ -515,14 +516,14 @@ static void binder_insert_allocated_buffer(struct binder_proc *proc,
|
||||
}
|
||||
|
||||
static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
|
||||
void __user *user_ptr)
|
||||
uintptr_t user_ptr)
|
||||
{
|
||||
struct rb_node *n = proc->allocated_buffers.rb_node;
|
||||
struct binder_buffer *buffer;
|
||||
struct binder_buffer *kern_ptr;
|
||||
|
||||
kern_ptr = user_ptr - proc->user_buffer_offset
|
||||
- offsetof(struct binder_buffer, data);
|
||||
kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
|
||||
- offsetof(struct binder_buffer, data));
|
||||
|
||||
while (n) {
|
||||
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
||||
@@ -856,7 +857,7 @@ static void binder_free_buf(struct binder_proc *proc,
|
||||
}
|
||||
|
||||
static struct binder_node *binder_get_node(struct binder_proc *proc,
|
||||
void __user *ptr)
|
||||
binder_uintptr_t ptr)
|
||||
{
|
||||
struct rb_node *n = proc->nodes.rb_node;
|
||||
struct binder_node *node;
|
||||
@@ -875,8 +876,8 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,
|
||||
}
|
||||
|
||||
static struct binder_node *binder_new_node(struct binder_proc *proc,
|
||||
void __user *ptr,
|
||||
void __user *cookie)
|
||||
binder_uintptr_t ptr,
|
||||
binder_uintptr_t cookie)
|
||||
{
|
||||
struct rb_node **p = &proc->nodes.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
@@ -908,9 +909,9 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
|
||||
INIT_LIST_HEAD(&node->work.entry);
|
||||
INIT_LIST_HEAD(&node->async_todo);
|
||||
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
||||
"%d:%d node %d u%p c%p created\n",
|
||||
"%d:%d node %d u%016llx c%016llx created\n",
|
||||
proc->pid, current->pid, node->debug_id,
|
||||
node->ptr, node->cookie);
|
||||
(u64)node->ptr, (u64)node->cookie);
|
||||
return node;
|
||||
}
|
||||
|
||||
@@ -1226,9 +1227,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
|
||||
|
||||
static void binder_transaction_buffer_release(struct binder_proc *proc,
|
||||
struct binder_buffer *buffer,
|
||||
size_t *failed_at)
|
||||
binder_size_t *failed_at)
|
||||
{
|
||||
size_t *offp, *off_end;
|
||||
binder_size_t *offp, *off_end;
|
||||
int debug_id = buffer->debug_id;
|
||||
|
||||
binder_debug(BINDER_DEBUG_TRANSACTION,
|
||||
@@ -1239,7 +1240,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
||||
if (buffer->target_node)
|
||||
binder_dec_node(buffer->target_node, 1, 0);
|
||||
|
||||
offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
|
||||
offp = (binder_size_t *)(buffer->data +
|
||||
ALIGN(buffer->data_size, sizeof(void *)));
|
||||
if (failed_at)
|
||||
off_end = failed_at;
|
||||
else
|
||||
@@ -1249,8 +1251,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
||||
if (*offp > buffer->data_size - sizeof(*fp) ||
|
||||
buffer->data_size < sizeof(*fp) ||
|
||||
!IS_ALIGNED(*offp, sizeof(u32))) {
|
||||
pr_err("transaction release %d bad offset %zd, size %zd\n",
|
||||
debug_id, *offp, buffer->data_size);
|
||||
pr_err("transaction release %d bad offset %lld, size %zd\n",
|
||||
debug_id, (u64)*offp, buffer->data_size);
|
||||
continue;
|
||||
}
|
||||
fp = (struct flat_binder_object *)(buffer->data + *offp);
|
||||
@@ -1259,13 +1261,13 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
||||
case BINDER_TYPE_WEAK_BINDER: {
|
||||
struct binder_node *node = binder_get_node(proc, fp->binder);
|
||||
if (node == NULL) {
|
||||
pr_err("transaction release %d bad node %p\n",
|
||||
debug_id, fp->binder);
|
||||
pr_err("transaction release %d bad node %016llx\n",
|
||||
debug_id, (u64)fp->binder);
|
||||
break;
|
||||
}
|
||||
binder_debug(BINDER_DEBUG_TRANSACTION,
|
||||
" node %d u%p\n",
|
||||
node->debug_id, node->ptr);
|
||||
" node %d u%016llx\n",
|
||||
node->debug_id, (u64)node->ptr);
|
||||
binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
|
||||
} break;
|
||||
case BINDER_TYPE_HANDLE:
|
||||
@@ -1303,7 +1305,8 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
{
|
||||
struct binder_transaction *t;
|
||||
struct binder_work *tcomplete;
|
||||
size_t *offp, *off_end;
|
||||
binder_size_t *offp, *off_end;
|
||||
binder_size_t off_min;
|
||||
struct binder_proc *target_proc;
|
||||
struct binder_thread *target_thread = NULL;
|
||||
struct binder_node *target_node = NULL;
|
||||
@@ -1383,6 +1386,10 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
return_error = BR_DEAD_REPLY;
|
||||
goto err_dead_binder;
|
||||
}
|
||||
if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
|
||||
return_error = BR_FAILED_REPLY;
|
||||
goto err_invalid_target_handle;
|
||||
}
|
||||
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
|
||||
struct binder_transaction *tmp;
|
||||
tmp = thread->transaction_stack;
|
||||
@@ -1432,18 +1439,20 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
|
||||
if (reply)
|
||||
binder_debug(BINDER_DEBUG_TRANSACTION,
|
||||
"%d:%d BC_REPLY %d -> %d:%d, data %p-%p size %zd-%zd\n",
|
||||
"%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
|
||||
proc->pid, thread->pid, t->debug_id,
|
||||
target_proc->pid, target_thread->pid,
|
||||
tr->data.ptr.buffer, tr->data.ptr.offsets,
|
||||
tr->data_size, tr->offsets_size);
|
||||
(u64)tr->data.ptr.buffer,
|
||||
(u64)tr->data.ptr.offsets,
|
||||
(u64)tr->data_size, (u64)tr->offsets_size);
|
||||
else
|
||||
binder_debug(BINDER_DEBUG_TRANSACTION,
|
||||
"%d:%d BC_TRANSACTION %d -> %d - node %d, data %p-%p size %zd-%zd\n",
|
||||
"%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
|
||||
proc->pid, thread->pid, t->debug_id,
|
||||
target_proc->pid, target_node->debug_id,
|
||||
tr->data.ptr.buffer, tr->data.ptr.offsets,
|
||||
tr->data_size, tr->offsets_size);
|
||||
(u64)tr->data.ptr.buffer,
|
||||
(u64)tr->data.ptr.offsets,
|
||||
(u64)tr->data_size, (u64)tr->offsets_size);
|
||||
|
||||
if (!reply && !(tr->flags & TF_ONE_WAY))
|
||||
t->from = thread;
|
||||
@@ -1472,38 +1481,47 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
if (target_node)
|
||||
binder_inc_node(target_node, 1, 0, NULL);
|
||||
|
||||
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
|
||||
offp = (binder_size_t *)(t->buffer->data +
|
||||
ALIGN(tr->data_size, sizeof(void *)));
|
||||
|
||||
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
|
||||
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
|
||||
tr->data.ptr.buffer, tr->data_size)) {
|
||||
binder_user_error("%d:%d got transaction with invalid data ptr\n",
|
||||
proc->pid, thread->pid);
|
||||
return_error = BR_FAILED_REPLY;
|
||||
goto err_copy_data_failed;
|
||||
}
|
||||
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
|
||||
if (copy_from_user(offp, (const void __user *)(uintptr_t)
|
||||
tr->data.ptr.offsets, tr->offsets_size)) {
|
||||
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
|
||||
proc->pid, thread->pid);
|
||||
return_error = BR_FAILED_REPLY;
|
||||
goto err_copy_data_failed;
|
||||
}
|
||||
if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
|
||||
binder_user_error("%d:%d got transaction with invalid offsets size, %zd\n",
|
||||
proc->pid, thread->pid, tr->offsets_size);
|
||||
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
|
||||
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
|
||||
proc->pid, thread->pid, (u64)tr->offsets_size);
|
||||
return_error = BR_FAILED_REPLY;
|
||||
goto err_bad_offset;
|
||||
}
|
||||
off_end = (void *)offp + tr->offsets_size;
|
||||
off_min = 0;
|
||||
for (; offp < off_end; offp++) {
|
||||
struct flat_binder_object *fp;
|
||||
if (*offp > t->buffer->data_size - sizeof(*fp) ||
|
||||
*offp < off_min ||
|
||||
t->buffer->data_size < sizeof(*fp) ||
|
||||
!IS_ALIGNED(*offp, sizeof(u32))) {
|
||||
binder_user_error("%d:%d got transaction with invalid offset, %zd\n",
|
||||
proc->pid, thread->pid, *offp);
|
||||
binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
|
||||
proc->pid, thread->pid, (u64)*offp,
|
||||
(u64)off_min,
|
||||
(u64)(t->buffer->data_size -
|
||||
sizeof(*fp)));
|
||||
return_error = BR_FAILED_REPLY;
|
||||
goto err_bad_offset;
|
||||
}
|
||||
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
|
||||
off_min = *offp + sizeof(struct flat_binder_object);
|
||||
switch (fp->type) {
|
||||
case BINDER_TYPE_BINDER:
|
||||
case BINDER_TYPE_WEAK_BINDER: {
|
||||
@@ -1519,10 +1537,14 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
|
||||
}
|
||||
if (fp->cookie != node->cookie) {
|
||||
binder_user_error("%d:%d sending u%p node %d, cookie mismatch %p != %p\n",
|
||||
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
|
||||
proc->pid, thread->pid,
|
||||
fp->binder, node->debug_id,
|
||||
fp->cookie, node->cookie);
|
||||
(u64)fp->binder, node->debug_id,
|
||||
(u64)fp->cookie, (u64)node->cookie);
|
||||
goto err_binder_get_ref_for_node_failed;
|
||||
}
|
||||
if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
|
||||
return_error = BR_FAILED_REPLY;
|
||||
goto err_binder_get_ref_for_node_failed;
|
||||
}
|
||||
ref = binder_get_ref_for_node(target_proc, node);
|
||||
@@ -1540,9 +1562,9 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
|
||||
trace_binder_transaction_node_to_ref(t, node, ref);
|
||||
binder_debug(BINDER_DEBUG_TRANSACTION,
|
||||
" node %d u%p -> ref %d desc %d\n",
|
||||
node->debug_id, node->ptr, ref->debug_id,
|
||||
ref->desc);
|
||||
" node %d u%016llx -> ref %d desc %d\n",
|
||||
node->debug_id, (u64)node->ptr,
|
||||
ref->debug_id, ref->desc);
|
||||
} break;
|
||||
case BINDER_TYPE_HANDLE:
|
||||
case BINDER_TYPE_WEAK_HANDLE: {
|
||||
@@ -1554,6 +1576,10 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
return_error = BR_FAILED_REPLY;
|
||||
goto err_binder_get_ref_failed;
|
||||
}
|
||||
if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
|
||||
return_error = BR_FAILED_REPLY;
|
||||
goto err_binder_get_ref_failed;
|
||||
}
|
||||
if (ref->node->proc == target_proc) {
|
||||
if (fp->type == BINDER_TYPE_HANDLE)
|
||||
fp->type = BINDER_TYPE_BINDER;
|
||||
@@ -1564,9 +1590,9 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
|
||||
trace_binder_transaction_ref_to_node(t, ref);
|
||||
binder_debug(BINDER_DEBUG_TRANSACTION,
|
||||
" ref %d desc %d -> node %d u%p\n",
|
||||
" ref %d desc %d -> node %d u%016llx\n",
|
||||
ref->debug_id, ref->desc, ref->node->debug_id,
|
||||
ref->node->ptr);
|
||||
(u64)ref->node->ptr);
|
||||
} else {
|
||||
struct binder_ref *new_ref;
|
||||
new_ref = binder_get_ref_for_node(target_proc, ref->node);
|
||||
@@ -1610,6 +1636,11 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
return_error = BR_FAILED_REPLY;
|
||||
goto err_fget_failed;
|
||||
}
|
||||
if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) {
|
||||
fput(file);
|
||||
return_error = BR_FAILED_REPLY;
|
||||
goto err_get_unused_fd_failed;
|
||||
}
|
||||
target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
|
||||
if (target_fd < 0) {
|
||||
fput(file);
|
||||
@@ -1682,9 +1713,9 @@ err_dead_binder:
|
||||
err_invalid_target_handle:
|
||||
err_no_context_mgr_node:
|
||||
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
||||
"%d:%d transaction failed %d, size %zd-%zd\n",
|
||||
"%d:%d transaction failed %d, size %lld-%lld\n",
|
||||
proc->pid, thread->pid, return_error,
|
||||
tr->data_size, tr->offsets_size);
|
||||
(u64)tr->data_size, (u64)tr->offsets_size);
|
||||
|
||||
{
|
||||
struct binder_transaction_log_entry *fe;
|
||||
@@ -1702,9 +1733,11 @@ err_no_context_mgr_node:
|
||||
|
||||
static int binder_thread_write(struct binder_proc *proc,
|
||||
struct binder_thread *thread,
|
||||
void __user *buffer, size_t size, size_t *consumed)
|
||||
binder_uintptr_t binder_buffer, size_t size,
|
||||
binder_size_t *consumed)
|
||||
{
|
||||
uint32_t cmd;
|
||||
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
|
||||
void __user *ptr = buffer + *consumed;
|
||||
void __user *end = buffer + size;
|
||||
|
||||
@@ -1773,33 +1806,33 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
}
|
||||
case BC_INCREFS_DONE:
|
||||
case BC_ACQUIRE_DONE: {
|
||||
void __user *node_ptr;
|
||||
void __user *cookie;
|
||||
binder_uintptr_t node_ptr;
|
||||
binder_uintptr_t cookie;
|
||||
struct binder_node *node;
|
||||
|
||||
if (get_user(node_ptr, (void * __user *)ptr))
|
||||
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
|
||||
return -EFAULT;
|
||||
ptr += sizeof(void *);
|
||||
if (get_user(cookie, (void * __user *)ptr))
|
||||
ptr += sizeof(binder_uintptr_t);
|
||||
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
|
||||
return -EFAULT;
|
||||
ptr += sizeof(void *);
|
||||
ptr += sizeof(binder_uintptr_t);
|
||||
node = binder_get_node(proc, node_ptr);
|
||||
if (node == NULL) {
|
||||
binder_user_error("%d:%d %s u%p no match\n",
|
||||
binder_user_error("%d:%d %s u%016llx no match\n",
|
||||
proc->pid, thread->pid,
|
||||
cmd == BC_INCREFS_DONE ?
|
||||
"BC_INCREFS_DONE" :
|
||||
"BC_ACQUIRE_DONE",
|
||||
node_ptr);
|
||||
(u64)node_ptr);
|
||||
break;
|
||||
}
|
||||
if (cookie != node->cookie) {
|
||||
binder_user_error("%d:%d %s u%p node %d cookie mismatch %p != %p\n",
|
||||
binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
|
||||
proc->pid, thread->pid,
|
||||
cmd == BC_INCREFS_DONE ?
|
||||
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
|
||||
node_ptr, node->debug_id,
|
||||
cookie, node->cookie);
|
||||
(u64)node_ptr, node->debug_id,
|
||||
(u64)cookie, (u64)node->cookie);
|
||||
break;
|
||||
}
|
||||
if (cmd == BC_ACQUIRE_DONE) {
|
||||
@@ -1835,27 +1868,27 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
return -EINVAL;
|
||||
|
||||
case BC_FREE_BUFFER: {
|
||||
void __user *data_ptr;
|
||||
binder_uintptr_t data_ptr;
|
||||
struct binder_buffer *buffer;
|
||||
|
||||
if (get_user(data_ptr, (void * __user *)ptr))
|
||||
if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
|
||||
return -EFAULT;
|
||||
ptr += sizeof(void *);
|
||||
ptr += sizeof(binder_uintptr_t);
|
||||
|
||||
buffer = binder_buffer_lookup(proc, data_ptr);
|
||||
if (buffer == NULL) {
|
||||
binder_user_error("%d:%d BC_FREE_BUFFER u%p no match\n",
|
||||
proc->pid, thread->pid, data_ptr);
|
||||
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
|
||||
proc->pid, thread->pid, (u64)data_ptr);
|
||||
break;
|
||||
}
|
||||
if (!buffer->allow_user_free) {
|
||||
binder_user_error("%d:%d BC_FREE_BUFFER u%p matched unreturned buffer\n",
|
||||
proc->pid, thread->pid, data_ptr);
|
||||
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
|
||||
proc->pid, thread->pid, (u64)data_ptr);
|
||||
break;
|
||||
}
|
||||
binder_debug(BINDER_DEBUG_FREE_BUFFER,
|
||||
"%d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
|
||||
proc->pid, thread->pid, data_ptr, buffer->debug_id,
|
||||
"%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
|
||||
proc->pid, thread->pid, (u64)data_ptr, buffer->debug_id,
|
||||
buffer->transaction ? "active" : "finished");
|
||||
|
||||
if (buffer->transaction) {
|
||||
@@ -1925,16 +1958,16 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
case BC_REQUEST_DEATH_NOTIFICATION:
|
||||
case BC_CLEAR_DEATH_NOTIFICATION: {
|
||||
uint32_t target;
|
||||
void __user *cookie;
|
||||
binder_uintptr_t cookie;
|
||||
struct binder_ref *ref;
|
||||
struct binder_ref_death *death;
|
||||
|
||||
if (get_user(target, (uint32_t __user *)ptr))
|
||||
return -EFAULT;
|
||||
ptr += sizeof(uint32_t);
|
||||
if (get_user(cookie, (void __user * __user *)ptr))
|
||||
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
|
||||
return -EFAULT;
|
||||
ptr += sizeof(void *);
|
||||
ptr += sizeof(binder_uintptr_t);
|
||||
ref = binder_get_ref(proc, target);
|
||||
if (ref == NULL) {
|
||||
binder_user_error("%d:%d %s invalid ref %d\n",
|
||||
@@ -1947,12 +1980,12 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
}
|
||||
|
||||
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
|
||||
"%d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
|
||||
"%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
|
||||
proc->pid, thread->pid,
|
||||
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
|
||||
"BC_REQUEST_DEATH_NOTIFICATION" :
|
||||
"BC_CLEAR_DEATH_NOTIFICATION",
|
||||
cookie, ref->debug_id, ref->desc,
|
||||
(u64)cookie, ref->debug_id, ref->desc,
|
||||
ref->strong, ref->weak, ref->node->debug_id);
|
||||
|
||||
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
|
||||
@@ -1990,9 +2023,9 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
}
|
||||
death = ref->death;
|
||||
if (death->cookie != cookie) {
|
||||
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %p != %p\n",
|
||||
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
|
||||
proc->pid, thread->pid,
|
||||
death->cookie, cookie);
|
||||
(u64)death->cookie, (u64)cookie);
|
||||
break;
|
||||
}
|
||||
ref->death = NULL;
|
||||
@@ -2012,9 +2045,9 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
} break;
|
||||
case BC_DEAD_BINDER_DONE: {
|
||||
struct binder_work *w;
|
||||
void __user *cookie;
|
||||
binder_uintptr_t cookie;
|
||||
struct binder_ref_death *death = NULL;
|
||||
if (get_user(cookie, (void __user * __user *)ptr))
|
||||
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
|
||||
return -EFAULT;
|
||||
|
||||
ptr += sizeof(void *);
|
||||
@@ -2026,11 +2059,11 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
}
|
||||
}
|
||||
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
||||
"%d:%d BC_DEAD_BINDER_DONE %p found %p\n",
|
||||
proc->pid, thread->pid, cookie, death);
|
||||
"%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
|
||||
proc->pid, thread->pid, (u64)cookie, death);
|
||||
if (death == NULL) {
|
||||
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %p not found\n",
|
||||
proc->pid, thread->pid, cookie);
|
||||
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
|
||||
proc->pid, thread->pid, (u64)cookie);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -2082,9 +2115,10 @@ static int binder_has_thread_work(struct binder_thread *thread)
|
||||
|
||||
static int binder_thread_read(struct binder_proc *proc,
|
||||
struct binder_thread *thread,
|
||||
void __user *buffer, size_t size,
|
||||
size_t *consumed, int non_block)
|
||||
binder_uintptr_t binder_buffer, size_t size,
|
||||
binder_size_t *consumed, int non_block)
|
||||
{
|
||||
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
|
||||
void __user *ptr = buffer + *consumed;
|
||||
void __user *end = buffer + size;
|
||||
|
||||
@@ -2229,32 +2263,36 @@ retry:
|
||||
if (put_user(cmd, (uint32_t __user *)ptr))
|
||||
return -EFAULT;
|
||||
ptr += sizeof(uint32_t);
|
||||
if (put_user(node->ptr, (void * __user *)ptr))
|
||||
if (put_user(node->ptr,
|
||||
(binder_uintptr_t __user *)ptr))
|
||||
return -EFAULT;
|
||||
ptr += sizeof(void *);
|
||||
if (put_user(node->cookie, (void * __user *)ptr))
|
||||
ptr += sizeof(binder_uintptr_t);
|
||||
if (put_user(node->cookie,
|
||||
(binder_uintptr_t __user *)ptr))
|
||||
return -EFAULT;
|
||||
ptr += sizeof(void *);
|
||||
ptr += sizeof(binder_uintptr_t);
|
||||
|
||||
binder_stat_br(proc, thread, cmd);
|
||||
binder_debug(BINDER_DEBUG_USER_REFS,
|
||||
"%d:%d %s %d u%p c%p\n",
|
||||
proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
|
||||
"%d:%d %s %d u%016llx c%016llx\n",
|
||||
proc->pid, thread->pid, cmd_name,
|
||||
node->debug_id,
|
||||
(u64)node->ptr, (u64)node->cookie);
|
||||
} else {
|
||||
list_del_init(&w->entry);
|
||||
if (!weak && !strong) {
|
||||
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
||||
"%d:%d node %d u%p c%p deleted\n",
|
||||
"%d:%d node %d u%016llx c%016llx deleted\n",
|
||||
proc->pid, thread->pid, node->debug_id,
|
||||
node->ptr, node->cookie);
|
||||
(u64)node->ptr, (u64)node->cookie);
|
||||
rb_erase(&node->rb_node, &proc->nodes);
|
||||
kfree(node);
|
||||
binder_stats_deleted(BINDER_STAT_NODE);
|
||||
} else {
|
||||
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
||||
"%d:%d node %d u%p c%p state unchanged\n",
|
||||
proc->pid, thread->pid, node->debug_id, node->ptr,
|
||||
node->cookie);
|
||||
"%d:%d node %d u%016llx c%016llx state unchanged\n",
|
||||
proc->pid, thread->pid, node->debug_id,
|
||||
(u64)node->ptr, (u64)node->cookie);
|
||||
}
|
||||
}
|
||||
} break;
|
||||
@@ -2272,17 +2310,18 @@ retry:
|
||||
if (put_user(cmd, (uint32_t __user *)ptr))
|
||||
return -EFAULT;
|
||||
ptr += sizeof(uint32_t);
|
||||
if (put_user(death->cookie, (void * __user *)ptr))
|
||||
if (put_user(death->cookie,
|
||||
(binder_uintptr_t __user *)ptr))
|
||||
return -EFAULT;
|
||||
ptr += sizeof(void *);
|
||||
ptr += sizeof(binder_uintptr_t);
|
||||
binder_stat_br(proc, thread, cmd);
|
||||
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
|
||||
"%d:%d %s %p\n",
|
||||
"%d:%d %s %016llx\n",
|
||||
proc->pid, thread->pid,
|
||||
cmd == BR_DEAD_BINDER ?
|
||||
"BR_DEAD_BINDER" :
|
||||
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
|
||||
death->cookie);
|
||||
(u64)death->cookie);
|
||||
|
||||
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
|
||||
list_del(&w->entry);
|
||||
@@ -2312,8 +2351,8 @@ retry:
|
||||
binder_set_nice(target_node->min_priority);
|
||||
cmd = BR_TRANSACTION;
|
||||
} else {
|
||||
tr.target.ptr = NULL;
|
||||
tr.cookie = NULL;
|
||||
tr.target.ptr = 0;
|
||||
tr.cookie = 0;
|
||||
cmd = BR_REPLY;
|
||||
}
|
||||
tr.code = t->code;
|
||||
@@ -2330,8 +2369,9 @@ retry:
|
||||
|
||||
tr.data_size = t->buffer->data_size;
|
||||
tr.offsets_size = t->buffer->offsets_size;
|
||||
tr.data.ptr.buffer = (void *)t->buffer->data +
|
||||
proc->user_buffer_offset;
|
||||
tr.data.ptr.buffer = (binder_uintptr_t)(
|
||||
(uintptr_t)t->buffer->data +
|
||||
proc->user_buffer_offset);
|
||||
tr.data.ptr.offsets = tr.data.ptr.buffer +
|
||||
ALIGN(t->buffer->data_size,
|
||||
sizeof(void *));
|
||||
@@ -2346,14 +2386,14 @@ retry:
|
||||
trace_binder_transaction_received(t);
|
||||
binder_stat_br(proc, thread, cmd);
|
||||
binder_debug(BINDER_DEBUG_TRANSACTION,
|
||||
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %p-%p\n",
|
||||
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
|
||||
proc->pid, thread->pid,
|
||||
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
|
||||
"BR_REPLY",
|
||||
t->debug_id, t->from ? t->from->proc->pid : 0,
|
||||
t->from ? t->from->pid : 0, cmd,
|
||||
t->buffer->data_size, t->buffer->offsets_size,
|
||||
tr.data.ptr.buffer, tr.data.ptr.offsets);
|
||||
(u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
|
||||
|
||||
list_del(&t->work.entry);
|
||||
t->buffer->allow_user_free = 1;
|
||||
@@ -2423,8 +2463,8 @@ static void binder_release_work(struct list_head *list)
|
||||
|
||||
death = container_of(w, struct binder_ref_death, work);
|
||||
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
||||
"undelivered death notification, %p\n",
|
||||
death->cookie);
|
||||
"undelivered death notification, %016llx\n",
|
||||
(u64)death->cookie);
|
||||
kfree(death);
|
||||
binder_stats_deleted(BINDER_STAT_DEATH);
|
||||
} break;
|
||||
@@ -2580,12 +2620,13 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
goto err;
|
||||
}
|
||||
binder_debug(BINDER_DEBUG_READ_WRITE,
|
||||
"%d:%d write %zd at %016lx, read %zd at %016lx\n",
|
||||
proc->pid, thread->pid, bwr.write_size,
|
||||
bwr.write_buffer, bwr.read_size, bwr.read_buffer);
|
||||
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
|
||||
proc->pid, thread->pid,
|
||||
(u64)bwr.write_size, (u64)bwr.write_buffer,
|
||||
(u64)bwr.read_size, (u64)bwr.read_buffer);
|
||||
|
||||
if (bwr.write_size > 0) {
|
||||
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
|
||||
ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
|
||||
trace_binder_write_done(ret);
|
||||
if (ret < 0) {
|
||||
bwr.read_consumed = 0;
|
||||
@@ -2595,7 +2636,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
}
|
||||
}
|
||||
if (bwr.read_size > 0) {
|
||||
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
|
||||
ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
|
||||
trace_binder_read_done(ret);
|
||||
if (!list_empty(&proc->todo))
|
||||
wake_up_interruptible(&proc->wait);
|
||||
@@ -2606,9 +2647,10 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
}
|
||||
}
|
||||
binder_debug(BINDER_DEBUG_READ_WRITE,
|
||||
"%d:%d wrote %zd of %zd, read return %zd of %zd\n",
|
||||
proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
|
||||
bwr.read_consumed, bwr.read_size);
|
||||
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
|
||||
proc->pid, thread->pid,
|
||||
(u64)bwr.write_consumed, (u64)bwr.write_size,
|
||||
(u64)bwr.read_consumed, (u64)bwr.read_size);
|
||||
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
@@ -2627,6 +2669,9 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
ret = -EBUSY;
|
||||
goto err;
|
||||
}
|
||||
ret = security_binder_set_context_mgr(proc->tsk);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (uid_valid(binder_context_mgr_uid)) {
|
||||
if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) {
|
||||
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
|
||||
@@ -2637,7 +2682,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
}
|
||||
} else
|
||||
binder_context_mgr_uid = current->cred->euid;
|
||||
binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
|
||||
binder_context_mgr_node = binder_new_node(proc, 0, 0);
|
||||
if (binder_context_mgr_node == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
@@ -3132,8 +3177,9 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
|
||||
break;
|
||||
case BINDER_WORK_NODE:
|
||||
node = container_of(w, struct binder_node, work);
|
||||
seq_printf(m, "%snode work %d: u%p c%p\n",
|
||||
prefix, node->debug_id, node->ptr, node->cookie);
|
||||
seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
|
||||
prefix, node->debug_id,
|
||||
(u64)node->ptr, (u64)node->cookie);
|
||||
break;
|
||||
case BINDER_WORK_DEAD_BINDER:
|
||||
seq_printf(m, "%shas dead binder\n", prefix);
|
||||
@@ -3193,8 +3239,8 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
|
||||
hlist_for_each_entry(ref, &node->refs, node_entry)
|
||||
count++;
|
||||
|
||||
seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
|
||||
node->debug_id, node->ptr, node->cookie,
|
||||
seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
|
||||
node->debug_id, (u64)node->ptr, (u64)node->cookie,
|
||||
node->has_strong_ref, node->has_weak_ref,
|
||||
node->local_strong_refs, node->local_weak_refs,
|
||||
node->internal_strong_refs, count);
|
||||
@@ -3496,6 +3542,7 @@ static const struct file_operations binder_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.poll = binder_poll,
|
||||
.unlocked_ioctl = binder_ioctl,
|
||||
.compat_ioctl = binder_ioctl,
|
||||
.mmap = binder_mmap,
|
||||
.open = binder_open,
|
||||
.flush = binder_flush,
|
||||
|
||||
@@ -20,311 +20,11 @@
|
||||
#ifndef _LINUX_BINDER_H
|
||||
#define _LINUX_BINDER_H
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
|
||||
#define BINDER_IPC_32BIT 1
|
||||
#endif
|
||||
|
||||
#define B_PACK_CHARS(c1, c2, c3, c4) \
|
||||
((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
|
||||
#define B_TYPE_LARGE 0x85
|
||||
|
||||
enum {
|
||||
BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
|
||||
BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
|
||||
BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
|
||||
BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
|
||||
BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
|
||||
};
|
||||
|
||||
enum {
|
||||
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
|
||||
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
|
||||
};
|
||||
|
||||
/*
|
||||
* This is the flattened representation of a Binder object for transfer
|
||||
* between processes. The 'offsets' supplied as part of a binder transaction
|
||||
* contains offsets into the data where these structures occur. The Binder
|
||||
* driver takes care of re-writing the structure type and data as it moves
|
||||
* between processes.
|
||||
*/
|
||||
struct flat_binder_object {
|
||||
/* 8 bytes for large_flat_header. */
|
||||
__u32 type;
|
||||
__u32 flags;
|
||||
|
||||
/* 8 bytes of data. */
|
||||
union {
|
||||
void __user *binder; /* local object */
|
||||
__u32 handle; /* remote object */
|
||||
};
|
||||
|
||||
/* extra data associated with local object */
|
||||
void __user *cookie;
|
||||
};
|
||||
|
||||
/*
|
||||
* On 64-bit platforms where user code may run in 32-bits the driver must
|
||||
* translate the buffer (and local binder) addresses appropriately.
|
||||
*/
|
||||
|
||||
struct binder_write_read {
|
||||
size_t write_size; /* bytes to write */
|
||||
size_t write_consumed; /* bytes consumed by driver */
|
||||
unsigned long write_buffer;
|
||||
size_t read_size; /* bytes to read */
|
||||
size_t read_consumed; /* bytes consumed by driver */
|
||||
unsigned long read_buffer;
|
||||
};
|
||||
|
||||
/* Use with BINDER_VERSION, driver fills in fields. */
|
||||
struct binder_version {
|
||||
/* driver protocol version -- increment with incompatible change */
|
||||
__s32 protocol_version;
|
||||
};
|
||||
|
||||
/* This is the current protocol version. */
|
||||
#define BINDER_CURRENT_PROTOCOL_VERSION 7
|
||||
|
||||
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
|
||||
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
|
||||
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
|
||||
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
|
||||
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
|
||||
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
|
||||
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
|
||||
|
||||
/*
|
||||
* NOTE: Two special error codes you should check for when calling
|
||||
* in to the driver are:
|
||||
*
|
||||
* EINTR -- The operation has been interupted. This should be
|
||||
* handled by retrying the ioctl() until a different error code
|
||||
* is returned.
|
||||
*
|
||||
* ECONNREFUSED -- The driver is no longer accepting operations
|
||||
* from your process. That is, the process is being destroyed.
|
||||
* You should handle this by exiting from your process. Note
|
||||
* that once this error code is returned, all further calls to
|
||||
* the driver from any thread will return this same code.
|
||||
*/
|
||||
|
||||
enum transaction_flags {
|
||||
TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
|
||||
TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
|
||||
TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
|
||||
TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
|
||||
};
|
||||
|
||||
struct binder_transaction_data {
|
||||
/* The first two are only used for bcTRANSACTION and brTRANSACTION,
|
||||
* identifying the target and contents of the transaction.
|
||||
*/
|
||||
union {
|
||||
__u32 handle; /* target descriptor of command transaction */
|
||||
void *ptr; /* target descriptor of return transaction */
|
||||
} target;
|
||||
void *cookie; /* target object cookie */
|
||||
__u32 code; /* transaction command */
|
||||
|
||||
/* General information about the transaction. */
|
||||
__u32 flags;
|
||||
pid_t sender_pid;
|
||||
uid_t sender_euid;
|
||||
size_t data_size; /* number of bytes of data */
|
||||
size_t offsets_size; /* number of bytes of offsets */
|
||||
|
||||
/* If this transaction is inline, the data immediately
|
||||
* follows here; otherwise, it ends with a pointer to
|
||||
* the data buffer.
|
||||
*/
|
||||
union {
|
||||
struct {
|
||||
/* transaction data */
|
||||
const void __user *buffer;
|
||||
/* offsets from buffer to flat_binder_object structs */
|
||||
const void __user *offsets;
|
||||
} ptr;
|
||||
__u8 buf[8];
|
||||
} data;
|
||||
};
|
||||
|
||||
struct binder_ptr_cookie {
|
||||
void *ptr;
|
||||
void *cookie;
|
||||
};
|
||||
|
||||
struct binder_pri_desc {
|
||||
__s32 priority;
|
||||
__u32 desc;
|
||||
};
|
||||
|
||||
struct binder_pri_ptr_cookie {
|
||||
__s32 priority;
|
||||
void *ptr;
|
||||
void *cookie;
|
||||
};
|
||||
|
||||
enum binder_driver_return_protocol {
|
||||
BR_ERROR = _IOR('r', 0, __s32),
|
||||
/*
|
||||
* int: error code
|
||||
*/
|
||||
|
||||
BR_OK = _IO('r', 1),
|
||||
/* No parameters! */
|
||||
|
||||
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
|
||||
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
|
||||
/*
|
||||
* binder_transaction_data: the received command.
|
||||
*/
|
||||
|
||||
BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
|
||||
/*
|
||||
* not currently supported
|
||||
* int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
|
||||
* Else the remote object has acquired a primary reference.
|
||||
*/
|
||||
|
||||
BR_DEAD_REPLY = _IO('r', 5),
|
||||
/*
|
||||
* The target of the last transaction (either a bcTRANSACTION or
|
||||
* a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
|
||||
*/
|
||||
|
||||
BR_TRANSACTION_COMPLETE = _IO('r', 6),
|
||||
/*
|
||||
* No parameters... always refers to the last transaction requested
|
||||
* (including replies). Note that this will be sent even for
|
||||
* asynchronous transactions.
|
||||
*/
|
||||
|
||||
BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
|
||||
BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
|
||||
BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
|
||||
BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
|
||||
/*
|
||||
* void *: ptr to binder
|
||||
* void *: cookie for binder
|
||||
*/
|
||||
|
||||
BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
|
||||
/*
|
||||
* not currently supported
|
||||
* int: priority
|
||||
* void *: ptr to binder
|
||||
* void *: cookie for binder
|
||||
*/
|
||||
|
||||
BR_NOOP = _IO('r', 12),
|
||||
/*
|
||||
* No parameters. Do nothing and examine the next command. It exists
|
||||
* primarily so that we can replace it with a BR_SPAWN_LOOPER command.
|
||||
*/
|
||||
|
||||
BR_SPAWN_LOOPER = _IO('r', 13),
|
||||
/*
|
||||
* No parameters. The driver has determined that a process has no
|
||||
* threads waiting to service incoming transactions. When a process
|
||||
* receives this command, it must spawn a new service thread and
|
||||
* register it via bcENTER_LOOPER.
|
||||
*/
|
||||
|
||||
BR_FINISHED = _IO('r', 14),
|
||||
/*
|
||||
* not currently supported
|
||||
* stop threadpool thread
|
||||
*/
|
||||
|
||||
BR_DEAD_BINDER = _IOR('r', 15, void *),
|
||||
/*
|
||||
* void *: cookie
|
||||
*/
|
||||
BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
|
||||
/*
|
||||
* void *: cookie
|
||||
*/
|
||||
|
||||
BR_FAILED_REPLY = _IO('r', 17),
|
||||
/*
|
||||
* The the last transaction (either a bcTRANSACTION or
|
||||
* a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
|
||||
*/
|
||||
};
|
||||
|
||||
enum binder_driver_command_protocol {
|
||||
BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
|
||||
BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
|
||||
/*
|
||||
* binder_transaction_data: the sent command.
|
||||
*/
|
||||
|
||||
BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
|
||||
/*
|
||||
* not currently supported
|
||||
* int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
|
||||
* Else you have acquired a primary reference on the object.
|
||||
*/
|
||||
|
||||
BC_FREE_BUFFER = _IOW('c', 3, void *),
|
||||
/*
|
||||
* void *: ptr to transaction data received on a read
|
||||
*/
|
||||
|
||||
BC_INCREFS = _IOW('c', 4, __u32),
|
||||
BC_ACQUIRE = _IOW('c', 5, __u32),
|
||||
BC_RELEASE = _IOW('c', 6, __u32),
|
||||
BC_DECREFS = _IOW('c', 7, __u32),
|
||||
/*
|
||||
* int: descriptor
|
||||
*/
|
||||
|
||||
BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
|
||||
BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
|
||||
/*
|
||||
* void *: ptr to binder
|
||||
* void *: cookie for binder
|
||||
*/
|
||||
|
||||
BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
|
||||
/*
|
||||
* not currently supported
|
||||
* int: priority
|
||||
* int: descriptor
|
||||
*/
|
||||
|
||||
BC_REGISTER_LOOPER = _IO('c', 11),
|
||||
/*
|
||||
* No parameters.
|
||||
* Register a spawned looper thread with the device.
|
||||
*/
|
||||
|
||||
BC_ENTER_LOOPER = _IO('c', 12),
|
||||
BC_EXIT_LOOPER = _IO('c', 13),
|
||||
/*
|
||||
* No parameters.
|
||||
* These two commands are sent as an application-level thread
|
||||
* enters and exits the binder loop, respectively. They are
|
||||
* used so the binder can have an accurate count of the number
|
||||
* of looping threads it has available.
|
||||
*/
|
||||
|
||||
BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
|
||||
/*
|
||||
* void *: ptr to binder
|
||||
* void *: cookie
|
||||
*/
|
||||
|
||||
BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
|
||||
/*
|
||||
* void *: ptr to binder
|
||||
* void *: cookie
|
||||
*/
|
||||
|
||||
BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
|
||||
/*
|
||||
* void *: cookie
|
||||
*/
|
||||
};
|
||||
#include "uapi/binder.h"
|
||||
|
||||
#endif /* _LINUX_BINDER_H */
|
||||
|
||||
|
||||
@@ -152,19 +152,20 @@ TRACE_EVENT(binder_transaction_node_to_ref,
|
||||
TP_STRUCT__entry(
|
||||
__field(int, debug_id)
|
||||
__field(int, node_debug_id)
|
||||
__field(void __user *, node_ptr)
|
||||
__field(binder_uintptr_t, node_ptr)
|
||||
__field(int, ref_debug_id)
|
||||
__field(uint32_t, ref_desc)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->debug_id = t->debug_id;
|
||||
__entry->node_debug_id = node->debug_id;
|
||||
__entry->node_ptr = node->ptr;
|
||||
__entry->node_ptr = (void __user *)node->ptr;
|
||||
__entry->ref_debug_id = ref->debug_id;
|
||||
__entry->ref_desc = ref->desc;
|
||||
),
|
||||
TP_printk("transaction=%d node=%d src_ptr=0x%p ==> dest_ref=%d dest_desc=%d",
|
||||
__entry->debug_id, __entry->node_debug_id, __entry->node_ptr,
|
||||
TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
|
||||
__entry->debug_id, __entry->node_debug_id,
|
||||
(u64)__entry->node_ptr,
|
||||
__entry->ref_debug_id, __entry->ref_desc)
|
||||
);
|
||||
|
||||
@@ -177,18 +178,19 @@ TRACE_EVENT(binder_transaction_ref_to_node,
|
||||
__field(int, ref_debug_id)
|
||||
__field(uint32_t, ref_desc)
|
||||
__field(int, node_debug_id)
|
||||
__field(void __user *, node_ptr)
|
||||
__field(binder_uintptr_t, node_ptr)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->debug_id = t->debug_id;
|
||||
__entry->ref_debug_id = ref->debug_id;
|
||||
__entry->ref_desc = ref->desc;
|
||||
__entry->node_debug_id = ref->node->debug_id;
|
||||
__entry->node_ptr = ref->node->ptr;
|
||||
__entry->node_ptr = (void __user *)ref->node->ptr;
|
||||
),
|
||||
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%p",
|
||||
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
|
||||
__entry->debug_id, __entry->node_debug_id,
|
||||
__entry->ref_debug_id, __entry->ref_desc, __entry->node_ptr)
|
||||
__entry->ref_debug_id, __entry->ref_desc,
|
||||
(u64)__entry->node_ptr)
|
||||
);
|
||||
|
||||
TRACE_EVENT(binder_transaction_ref_to_ref,
|
||||
|
||||
@@ -59,6 +59,8 @@ struct ion_device {
|
||||
unsigned long arg);
|
||||
struct rb_root clients;
|
||||
struct dentry *debug_root;
|
||||
struct dentry *heaps_debug_root;
|
||||
struct dentry *clients_debug_root;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -69,6 +71,8 @@ struct ion_device {
|
||||
* @idr: an idr space for allocating handle ids
|
||||
* @lock: lock protecting the tree of handles
|
||||
* @name: used for debugging
|
||||
* @display_name: used for debugging (unique version of @name)
|
||||
* @display_serial: used for debugging (to make display_name unique)
|
||||
* @task: used for debugging
|
||||
*
|
||||
* A client represents a list of buffers this client may access.
|
||||
@@ -82,6 +86,8 @@ struct ion_client {
|
||||
struct idr idr;
|
||||
struct mutex lock;
|
||||
const char *name;
|
||||
char *display_name;
|
||||
int display_serial;
|
||||
struct task_struct *task;
|
||||
pid_t pid;
|
||||
struct dentry *debug_root;
|
||||
@@ -708,6 +714,21 @@ static const struct file_operations debug_client_fops = {
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int ion_get_client_serial(const struct rb_root *root,
|
||||
const unsigned char *name)
|
||||
{
|
||||
int serial = -1;
|
||||
struct rb_node *node;
|
||||
for (node = rb_first(root); node; node = rb_next(node)) {
|
||||
struct ion_client *client = rb_entry(node, struct ion_client,
|
||||
node);
|
||||
if (strcmp(client->name, name))
|
||||
continue;
|
||||
serial = max(serial, client->display_serial);
|
||||
}
|
||||
return serial + 1;
|
||||
}
|
||||
|
||||
struct ion_client *ion_client_create(struct ion_device *dev,
|
||||
const char *name)
|
||||
{
|
||||
@@ -716,9 +737,13 @@ struct ion_client *ion_client_create(struct ion_device *dev,
|
||||
struct rb_node **p;
|
||||
struct rb_node *parent = NULL;
|
||||
struct ion_client *entry;
|
||||
char debug_name[64];
|
||||
pid_t pid;
|
||||
|
||||
if (!name) {
|
||||
pr_err("%s: Name cannot be null\n", __func__);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
get_task_struct(current->group_leader);
|
||||
task_lock(current->group_leader);
|
||||
pid = task_pid_nr(current->group_leader);
|
||||
@@ -733,21 +758,27 @@ struct ion_client *ion_client_create(struct ion_device *dev,
|
||||
task_unlock(current->group_leader);
|
||||
|
||||
client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
|
||||
if (!client) {
|
||||
if (task)
|
||||
put_task_struct(current->group_leader);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
if (!client)
|
||||
goto err_put_task_struct;
|
||||
|
||||
client->dev = dev;
|
||||
client->handles = RB_ROOT;
|
||||
idr_init(&client->idr);
|
||||
mutex_init(&client->lock);
|
||||
client->name = name;
|
||||
client->task = task;
|
||||
client->pid = pid;
|
||||
client->name = kstrdup(name, GFP_KERNEL);
|
||||
if (!client->name)
|
||||
goto err_free_client;
|
||||
|
||||
down_write(&dev->lock);
|
||||
client->display_serial = ion_get_client_serial(&dev->clients, name);
|
||||
client->display_name = kasprintf(
|
||||
GFP_KERNEL, "%s-%d", name, client->display_serial);
|
||||
if (!client->display_name) {
|
||||
up_write(&dev->lock);
|
||||
goto err_free_client_name;
|
||||
}
|
||||
p = &dev->clients.rb_node;
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
@@ -761,13 +792,28 @@ struct ion_client *ion_client_create(struct ion_device *dev,
|
||||
rb_link_node(&client->node, parent, p);
|
||||
rb_insert_color(&client->node, &dev->clients);
|
||||
|
||||
snprintf(debug_name, 64, "%u", client->pid);
|
||||
client->debug_root = debugfs_create_file(debug_name, 0664,
|
||||
dev->debug_root, client,
|
||||
&debug_client_fops);
|
||||
client->debug_root = debugfs_create_file(client->display_name, 0664,
|
||||
dev->clients_debug_root,
|
||||
client, &debug_client_fops);
|
||||
if (!client->debug_root) {
|
||||
char buf[256], *path;
|
||||
path = dentry_path(dev->clients_debug_root, buf, 256);
|
||||
pr_err("Failed to create client debugfs at %s/%s\n",
|
||||
path, client->display_name);
|
||||
}
|
||||
|
||||
up_write(&dev->lock);
|
||||
|
||||
return client;
|
||||
|
||||
err_free_client_name:
|
||||
kfree(client->name);
|
||||
err_free_client:
|
||||
kfree(client);
|
||||
err_put_task_struct:
|
||||
if (task)
|
||||
put_task_struct(current->group_leader);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
EXPORT_SYMBOL(ion_client_create);
|
||||
|
||||
@@ -792,6 +838,8 @@ void ion_client_destroy(struct ion_client *client)
|
||||
debugfs_remove_recursive(client->debug_root);
|
||||
up_write(&dev->lock);
|
||||
|
||||
kfree(client->display_name);
|
||||
kfree(client->name);
|
||||
kfree(client);
|
||||
}
|
||||
EXPORT_SYMBOL(ion_client_destroy);
|
||||
@@ -1293,9 +1341,11 @@ static int ion_open(struct inode *inode, struct file *file)
|
||||
struct miscdevice *miscdev = file->private_data;
|
||||
struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
|
||||
struct ion_client *client;
|
||||
char debug_name[64];
|
||||
|
||||
pr_debug("%s: %d\n", __func__, __LINE__);
|
||||
client = ion_client_create(dev, "user");
|
||||
snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
|
||||
client = ion_client_create(dev, debug_name);
|
||||
if (IS_ERR(client))
|
||||
return PTR_ERR(client);
|
||||
file->private_data = client;
|
||||
@@ -1443,6 +1493,8 @@ DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
|
||||
|
||||
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
|
||||
{
|
||||
struct dentry *debug_file;
|
||||
|
||||
if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
|
||||
!heap->ops->unmap_dma)
|
||||
pr_err("%s: can not add heap with invalid ops struct.\n",
|
||||
@@ -1457,15 +1509,31 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
|
||||
the list later attempt higher id numbers first */
|
||||
plist_node_init(&heap->node, -heap->id);
|
||||
plist_add(&heap->node, &dev->heaps);
|
||||
debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
|
||||
&debug_heap_fops);
|
||||
debug_file = debugfs_create_file(heap->name, 0664,
|
||||
dev->heaps_debug_root, heap,
|
||||
&debug_heap_fops);
|
||||
|
||||
if (!debug_file) {
|
||||
char buf[256], *path;
|
||||
path = dentry_path(dev->heaps_debug_root, buf, 256);
|
||||
pr_err("Failed to create heap debugfs at %s/%s\n",
|
||||
path, heap->name);
|
||||
}
|
||||
|
||||
#ifdef DEBUG_HEAP_SHRINKER
|
||||
if (heap->shrinker.shrink) {
|
||||
char debug_name[64];
|
||||
|
||||
snprintf(debug_name, 64, "%s_shrink", heap->name);
|
||||
debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
|
||||
&debug_shrink_fops);
|
||||
debug_file = debugfs_create_file(
|
||||
debug_name, 0644, dev->heaps_debug_root, heap,
|
||||
&debug_shrink_fops);
|
||||
if (!debug_file) {
|
||||
char buf[256], *path;
|
||||
path = dentry_path(dev->heaps_debug_root, buf, 256);
|
||||
pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
|
||||
path, debug_name);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
up_write(&dev->lock);
|
||||
@@ -1494,8 +1562,21 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
|
||||
}
|
||||
|
||||
idev->debug_root = debugfs_create_dir("ion", NULL);
|
||||
if (!idev->debug_root)
|
||||
pr_err("ion: failed to create debug files.\n");
|
||||
if (!idev->debug_root) {
|
||||
pr_err("ion: failed to create debugfs root directory.\n");
|
||||
goto debugfs_done;
|
||||
}
|
||||
idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
|
||||
if (!idev->heaps_debug_root) {
|
||||
pr_err("ion: failed to create debugfs heaps directory.\n");
|
||||
goto debugfs_done;
|
||||
}
|
||||
idev->clients_debug_root = debugfs_create_dir("clients",
|
||||
idev->debug_root);
|
||||
if (!idev->clients_debug_root)
|
||||
pr_err("ion: failed to create debugfs clients directory.\n");
|
||||
|
||||
debugfs_done:
|
||||
|
||||
idev->custom_ioctl = custom_ioctl;
|
||||
idev->buffers = RB_ROOT;
|
||||
@@ -1509,6 +1590,7 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
|
||||
void ion_device_destroy(struct ion_device *dev)
|
||||
{
|
||||
misc_deregister(&dev->dev);
|
||||
debugfs_remove_recursive(dev->debug_root);
|
||||
/* XXX need to free the heaps and clients ? */
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/shrinker.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
#include "ion.h"
|
||||
|
||||
|
||||
@@ -39,7 +39,6 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/profile.h>
|
||||
#include <linux/notifier.h>
|
||||
|
||||
static uint32_t lowmem_debug_level = 1;
|
||||
@@ -83,6 +82,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
|
||||
int tasksize;
|
||||
int i;
|
||||
short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
|
||||
int minfree = 0;
|
||||
int selected_tasksize = 0;
|
||||
short selected_oom_score_adj;
|
||||
int array_size = ARRAY_SIZE(lowmem_adj);
|
||||
@@ -95,8 +95,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
|
||||
if (lowmem_minfree_size < array_size)
|
||||
array_size = lowmem_minfree_size;
|
||||
for (i = 0; i < array_size; i++) {
|
||||
if (other_free < lowmem_minfree[i] &&
|
||||
other_file < lowmem_minfree[i]) {
|
||||
minfree = lowmem_minfree[i];
|
||||
if (other_free < minfree && other_file < minfree) {
|
||||
min_score_adj = lowmem_adj[i];
|
||||
break;
|
||||
}
|
||||
@@ -151,13 +151,22 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
|
||||
selected = p;
|
||||
selected_tasksize = tasksize;
|
||||
selected_oom_score_adj = oom_score_adj;
|
||||
lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n",
|
||||
p->pid, p->comm, oom_score_adj, tasksize);
|
||||
lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n",
|
||||
p->comm, p->pid, oom_score_adj, tasksize);
|
||||
}
|
||||
if (selected) {
|
||||
lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n",
|
||||
selected->pid, selected->comm,
|
||||
selected_oom_score_adj, selected_tasksize);
|
||||
lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" \
|
||||
" to free %ldkB on behalf of '%s' (%d) because\n" \
|
||||
" cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \
|
||||
" Free memory is %ldkB above reserved\n",
|
||||
selected->comm, selected->pid,
|
||||
selected_oom_score_adj,
|
||||
selected_tasksize * (long)(PAGE_SIZE / 1024),
|
||||
current->comm, current->pid,
|
||||
other_file * (long)(PAGE_SIZE / 1024),
|
||||
minfree * (long)(PAGE_SIZE / 1024),
|
||||
min_score_adj,
|
||||
other_free * (long)(PAGE_SIZE / 1024));
|
||||
lowmem_deathpending_timeout = jiffies + HZ;
|
||||
send_sig(SIGKILL, selected, 0);
|
||||
set_tsk_thread_flag(selected, TIF_MEMDIE);
|
||||
@@ -187,9 +196,94 @@ static void __exit lowmem_exit(void)
|
||||
unregister_shrinker(&lowmem_shrinker);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
|
||||
static short lowmem_oom_adj_to_oom_score_adj(short oom_adj)
|
||||
{
|
||||
if (oom_adj == OOM_ADJUST_MAX)
|
||||
return OOM_SCORE_ADJ_MAX;
|
||||
else
|
||||
return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
|
||||
}
|
||||
|
||||
static void lowmem_autodetect_oom_adj_values(void)
|
||||
{
|
||||
int i;
|
||||
short oom_adj;
|
||||
short oom_score_adj;
|
||||
int array_size = ARRAY_SIZE(lowmem_adj);
|
||||
|
||||
if (lowmem_adj_size < array_size)
|
||||
array_size = lowmem_adj_size;
|
||||
|
||||
if (array_size <= 0)
|
||||
return;
|
||||
|
||||
oom_adj = lowmem_adj[array_size - 1];
|
||||
if (oom_adj > OOM_ADJUST_MAX)
|
||||
return;
|
||||
|
||||
oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
|
||||
if (oom_score_adj <= OOM_ADJUST_MAX)
|
||||
return;
|
||||
|
||||
lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n");
|
||||
for (i = 0; i < array_size; i++) {
|
||||
oom_adj = lowmem_adj[i];
|
||||
oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
|
||||
lowmem_adj[i] = oom_score_adj;
|
||||
lowmem_print(1, "oom_adj %d => oom_score_adj %d\n",
|
||||
oom_adj, oom_score_adj);
|
||||
}
|
||||
}
|
||||
|
||||
static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = param_array_ops.set(val, kp);
|
||||
|
||||
/* HACK: Autodetect oom_adj values in lowmem_adj array */
|
||||
lowmem_autodetect_oom_adj_values();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp)
|
||||
{
|
||||
return param_array_ops.get(buffer, kp);
|
||||
}
|
||||
|
||||
static void lowmem_adj_array_free(void *arg)
|
||||
{
|
||||
param_array_ops.free(arg);
|
||||
}
|
||||
|
||||
static struct kernel_param_ops lowmem_adj_array_ops = {
|
||||
.set = lowmem_adj_array_set,
|
||||
.get = lowmem_adj_array_get,
|
||||
.free = lowmem_adj_array_free,
|
||||
};
|
||||
|
||||
static const struct kparam_array __param_arr_adj = {
|
||||
.max = ARRAY_SIZE(lowmem_adj),
|
||||
.num = &lowmem_adj_size,
|
||||
.ops = ¶m_ops_short,
|
||||
.elemsize = sizeof(lowmem_adj[0]),
|
||||
.elem = lowmem_adj,
|
||||
};
|
||||
#endif
|
||||
|
||||
module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
|
||||
#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
|
||||
__module_param_call(MODULE_PARAM_PREFIX, adj,
|
||||
&lowmem_adj_array_ops,
|
||||
.arr = &__param_arr_adj,
|
||||
S_IRUGO | S_IWUSR, -1);
|
||||
__MODULE_PARM_TYPE(adj, "array of short");
|
||||
#else
|
||||
module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size,
|
||||
S_IRUGO | S_IWUSR);
|
||||
#endif
|
||||
module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
|
||||
S_IRUGO | S_IWUSR);
|
||||
module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
|
||||
|
||||
@@ -18,10 +18,9 @@
|
||||
#define _LINUX_SW_SYNC_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/kconfig.h>
|
||||
#include "sync.h"
|
||||
#include "uapi/sw_sync.h"
|
||||
|
||||
struct sw_sync_timeline {
|
||||
struct sync_timeline obj;
|
||||
@@ -57,19 +56,4 @@ static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
|
||||
}
|
||||
#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
|
||||
|
||||
#endif /* __KERNEL __ */
|
||||
|
||||
struct sw_sync_create_fence_data {
|
||||
__u32 value;
|
||||
char name[32];
|
||||
__s32 fence; /* fd of new fence */
|
||||
};
|
||||
|
||||
#define SW_SYNC_IOC_MAGIC 'W'
|
||||
|
||||
#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
|
||||
struct sw_sync_create_fence_data)
|
||||
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
|
||||
|
||||
|
||||
#endif /* _LINUX_SW_SYNC_H */
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user