summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/debugfs-pktcdvd6
-rw-r--r--Documentation/DocBook/Makefile11
-rw-r--r--Documentation/cgroups/memory.txt55
-rw-r--r--Documentation/cgroups/resource_counter.txt27
-rw-r--r--Documentation/input/rotary-encoder.txt101
-rw-r--r--Documentation/kbuild/makefiles.txt83
-rw-r--r--Documentation/sparse.txt8
-rw-r--r--Documentation/sysctl/net.txt2
-rw-r--r--Documentation/tomoyo.txt55
-rw-r--r--Documentation/vm/00-INDEX2
-rw-r--r--Documentation/vm/active_mm.txt83
-rw-r--r--Documentation/vm/unevictable-lru.txt1041
-rw-r--r--MAINTAINERS32
-rw-r--r--Makefile15
-rw-r--r--arch/arm/mach-omap2/usb-musb.c8
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c2
-rw-r--r--arch/mips/include/asm/mach-rc32434/gpio.h3
-rw-r--r--arch/mips/rb532/devices.c19
-rw-r--r--arch/powerpc/include/asm/parport.h2
-rw-r--r--arch/sh/Kconfig18
-rw-r--r--arch/sh/boards/board-ap325rxa.c1
-rw-r--r--arch/sh/boards/board-urquell.c30
-rw-r--r--arch/sh/drivers/pci/ops-sh7785lcr.c5
-rw-r--r--arch/sh/drivers/pci/pci-sh7780.h2
-rw-r--r--arch/sh/drivers/pci/pci.c3
-rw-r--r--arch/sh/include/asm/dma-mapping.h36
-rw-r--r--arch/sh/include/asm/scatterlist.h11
-rw-r--r--arch/sh/include/asm/topology.h7
-rw-r--r--arch/sh/include/asm/unistd_32.h4
-rw-r--r--arch/sh/include/asm/unistd_64.h4
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c14
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sh/kernel/syscalls_64.S2
-rw-r--r--arch/sh/mm/consistent.c31
-rw-r--r--arch/sparc/include/asm/parport.h5
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/include/asm/processor.h6
-rw-r--r--arch/x86/include/asm/required-features.h2
-rw-r--r--arch/x86/include/asm/sigcontext.h6
-rw-r--r--arch/x86/include/asm/xen/page.h3
-rw-r--r--arch/x86/include/asm/xsave.h3
-rw-r--r--arch/x86/kernel/apic/io_apic.c5
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c24
-rw-r--r--arch/x86/kernel/mpparse.c7
-rw-r--r--arch/x86/kernel/xsave.c2
-rw-r--r--arch/x86/mm/gup.c16
-rw-r--r--arch/x86/mm/ioremap.c23
-rw-r--r--arch/x86/mm/pat.c2
-rw-r--r--arch/x86/xen/enlighten.c89
-rw-r--r--arch/x86/xen/mmu.c116
-rw-r--r--arch/x86/xen/mmu.h3
-rw-r--r--arch/x86/xen/smp.c4
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--drivers/ata/ahci.c57
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/ata/sata_via.c2
-rw-r--r--drivers/atm/solos-pci.c2
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/char/agp/intel-agp.c3
-rw-r--r--drivers/char/sysrq.c1
-rw-r--r--drivers/edac/edac_core.h12
-rw-r--r--drivers/edac/edac_device.c2
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/edac_pci.c2
-rw-r--r--drivers/hwmon/Kconfig10
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/hp_accel.c1
-rw-r--r--drivers/hwmon/sht15.c692
-rw-r--r--drivers/input/input.c13
-rw-r--r--drivers/input/keyboard/atkbd.c135
-rw-r--r--drivers/input/keyboard/bf54x-keys.c4
-rw-r--r--drivers/input/keyboard/hilkbd.c140
-rw-r--r--drivers/input/misc/Kconfig23
-rw-r--r--drivers/input/misc/Makefile28
-rw-r--r--drivers/input/misc/ati_remote2.c277
-rw-r--r--drivers/input/misc/rb532_button.c120
-rw-r--r--drivers/input/misc/rotary_encoder.c221
-rw-r--r--drivers/input/mouse/Kconfig11
-rw-r--r--drivers/input/mouse/Makefile9
-rw-r--r--drivers/input/mouse/hgpk.c2
-rw-r--r--drivers/input/mouse/maplemouse.c147
-rw-r--r--drivers/input/mouse/pc110pad.c5
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h8
-rw-r--r--drivers/input/touchscreen/Kconfig58
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ad7877.c844
-rw-r--r--drivers/input/touchscreen/ad7879.c782
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c3
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c5
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c13
-rw-r--r--drivers/input/touchscreen/zylonite-wm97xx.c240
-rw-r--r--drivers/misc/eeprom/at24.c8
-rw-r--r--drivers/misc/eeprom/at25.c5
-rw-r--r--drivers/misc/sgi-xp/xpc.h254
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c138
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c128
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c20
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c164
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c257
-rw-r--r--drivers/net/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/benet/be_main.c4
-rw-r--r--drivers/net/jme.c8
-rw-r--r--drivers/net/wireless/ath9k/pci.c4
-rw-r--r--drivers/net/wireless/p54/p54pci.c4
-rw-r--r--drivers/parisc/superio.c3
-rw-r--r--drivers/pci/dmar.c11
-rw-r--r--drivers/pci/intel-iommu.c4
-rw-r--r--drivers/scsi/3w-9xxx.c8
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c10
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_wait_scan.c2
-rw-r--r--drivers/sh/intc.c35
-rw-r--r--drivers/spi/spi.c22
-rw-r--r--drivers/staging/b3dfg/b3dfg.c2
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/otg/nop-usb-xceiv.c4
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c26
-rw-r--r--drivers/video/aty/radeon_base.c4
-rw-r--r--drivers/video/backlight/backlight.c3
-rw-r--r--drivers/video/backlight/lcd.c3
-rw-r--r--drivers/video/cirrusfb.c4
-rw-r--r--drivers/video/console/fbcon.c55
-rw-r--r--drivers/video/efifb.c7
-rw-r--r--drivers/video/fbmem.c19
-rw-r--r--drivers/video/intelfb/intelfb.h2
-rw-r--r--drivers/video/intelfb/intelfb_i2c.c1
-rw-r--r--drivers/video/intelfb/intelfbdrv.c1
-rw-r--r--drivers/video/intelfb/intelfbhw.c5
-rw-r--r--drivers/video/s3fb.c6
-rw-r--r--drivers/video/sa1100fb.c15
-rw-r--r--drivers/video/sa1100fb.h7
-rw-r--r--drivers/video/sis/sis_main.c2
-rw-r--r--drivers/video/skeletonfb.c8
-rw-r--r--drivers/video/uvesafb.c35
-rw-r--r--drivers/video/vfb.c11
-rw-r--r--drivers/xen/cpu_hotplug.c40
-rw-r--r--drivers/xen/manage.c5
-rw-r--r--fs/ext2/inode.c44
-rw-r--r--fs/hfs/inode.c4
-rw-r--r--fs/hfs/mdb.c1
-rw-r--r--fs/jbd/revoke.c24
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c38
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c14
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c18
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c78
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h9
-rw-r--r--fs/xfs/xfs_iget.c23
-rw-r--r--fs/xfs/xfs_iomap.c61
-rw-r--r--fs/xfs/xfs_iomap.h3
-rw-r--r--fs/xfs/xfs_log.c78
-rw-r--r--fs/xfs/xfs_mount.h2
-rw-r--r--fs/xfs/xfs_vnodeops.c7
-rw-r--r--include/asm-generic/siginfo.h2
-rw-r--r--include/drm/drm_pciids.h2
-rw-r--r--include/linux/debug_locks.h7
-rw-r--r--include/linux/fb.h8
-rw-r--r--include/linux/fiemap.h2
-rw-r--r--include/linux/init_task.h13
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/rotary_encoder.h13
-rw-r--r--include/linux/sht15.h24
-rw-r--r--include/linux/spi/ad7879.h35
-rw-r--r--include/linux/stringify.h4
-rw-r--r--include/linux/usb/serial.h7
-rw-r--r--include/scsi/scsi_scan.h11
-rw-r--r--include/video/cyblafb.h175
-rw-r--r--init/Kconfig8
-rw-r--r--init/initramfs.c5
-rw-r--r--ipc/mq_sysctl.c2
-rw-r--r--kernel/panic.c12
-rw-r--r--kernel/power/disk.c8
-rw-r--r--kernel/power/user.c9
-rw-r--r--kernel/ptrace.c7
-rw-r--r--kernel/sys.c24
-rw-r--r--kernel/sysctl.c20
-rw-r--r--kernel/trace/Kconfig4
-rw-r--r--kernel/trace/trace.c36
-rw-r--r--kernel/trace/trace_events.c12
-rw-r--r--kernel/trace/trace_events_filter.c14
-rw-r--r--kernel/trace/trace_events_stage_2.h4
-rw-r--r--lib/debug_locks.c2
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/shmem.c27
-rw-r--r--mm/util.c16
-rw-r--r--scripts/Makefile.headersinst2
-rw-r--r--scripts/gen_initramfs_list.sh2
-rwxr-xr-xscripts/headerdep.pl2
-rw-r--r--scripts/kconfig/kxgettext.c4
-rw-r--r--scripts/mod/modpost.c4
-rwxr-xr-xscripts/setlocalversion2
-rw-r--r--security/tomoyo/common.c6
-rw-r--r--security/tomoyo/common.h2
-rw-r--r--security/tomoyo/domain.c2
-rw-r--r--security/tomoyo/file.c2
-rw-r--r--security/tomoyo/realpath.c2
-rw-r--r--security/tomoyo/realpath.h2
-rw-r--r--security/tomoyo/tomoyo.c2
-rw-r--r--security/tomoyo/tomoyo.h2
-rw-r--r--sound/pci/hda/hda_intel.c8
205 files changed, 6251 insertions, 2032 deletions
diff --git a/Documentation/ABI/testing/debugfs-pktcdvd b/Documentation/ABI/testing/debugfs-pktcdvd
index bf9c16b64c34..cf11736acb76 100644
--- a/Documentation/ABI/testing/debugfs-pktcdvd
+++ b/Documentation/ABI/testing/debugfs-pktcdvd
@@ -1,4 +1,4 @@
-What: /debug/pktcdvd/pktcdvd[0-7]
+What: /sys/kernel/debug/pktcdvd/pktcdvd[0-7]
Date: Oct. 2006
KernelVersion: 2.6.20
Contact: Thomas Maier <balagi@justmail.de>
@@ -10,10 +10,10 @@ debugfs interface
The pktcdvd module (packet writing driver) creates
these files in debugfs:
-/debug/pktcdvd/pktcdvd[0-7]/
+/sys/kernel/debug/pktcdvd/pktcdvd[0-7]/
info (0444) Lots of driver statistics and infos.
Example:
-------
-cat /debug/pktcdvd/pktcdvd0/info
+cat /sys/kernel/debug/pktcdvd/pktcdvd0/info
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index a3a83d38f96f..8918a32c6b3a 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -31,7 +31,7 @@ PS_METHOD = $(prefer-db2x)
###
# The targets that may be used.
-PHONY += xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs
+PHONY += xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs cleandocs
BOOKS := $(addprefix $(obj)/,$(DOCBOOKS))
xmldocs: $(BOOKS)
@@ -213,11 +213,12 @@ silent_gen_xml = :
dochelp:
@echo ' Linux kernel internal documentation in different formats:'
@echo ' htmldocs - HTML'
- @echo ' installmandocs - install man pages generated by mandocs'
- @echo ' mandocs - man pages'
@echo ' pdfdocs - PDF'
@echo ' psdocs - Postscript'
@echo ' xmldocs - XML DocBook'
+ @echo ' mandocs - man pages'
+ @echo ' installmandocs - install man pages generated by mandocs'
+ @echo ' cleandocs - clean all generated DocBook files'
###
# Temporary files left by various tools
@@ -235,6 +236,10 @@ clean-files := $(DOCBOOKS) \
clean-dirs := $(patsubst %.xml,%,$(DOCBOOKS)) man
+cleandocs:
+ $(Q)rm -f $(call objectify, $(clean-files))
+ $(Q)rm -rf $(call objectify, $(clean-dirs))
+
# Declare the contents of the .PHONY variable as phony. We keep that
# information in a variable se we can use it in if_changed and friends.
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index a98a7fe7aabb..1a608877b14e 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -6,15 +6,14 @@ used here with the memory controller that is used in hardware.
Salient features
-a. Enable control of both RSS (mapped) and Page Cache (unmapped) pages
+a. Enable control of Anonymous, Page Cache (mapped and unmapped) and
+ Swap Cache memory pages.
b. The infrastructure allows easy addition of other types of memory to control
c. Provides *zero overhead* for non memory controller users
d. Provides a double LRU: global memory pressure causes reclaim from the
global LRU; a cgroup on hitting a limit, reclaims from the per
cgroup LRU
-NOTE: Swap Cache (unmapped) is not accounted now.
-
Benefits and Purpose of the memory controller
The memory controller isolates the memory behaviour of a group of tasks
@@ -290,34 +289,44 @@ will be charged as a new owner of it.
moved to the parent. If you want to avoid that, force_empty will be useful.
5.2 stat file
- memory.stat file includes following statistics (now)
- cache - # of pages from page-cache and shmem.
- rss - # of pages from anonymous memory.
- pgpgin - # of event of charging
- pgpgout - # of event of uncharging
- active_anon - # of pages on active lru of anon, shmem.
- inactive_anon - # of pages on active lru of anon, shmem
- active_file - # of pages on active lru of file-cache
- inactive_file - # of pages on inactive lru of file cache
- unevictable - # of pages cannot be reclaimed.(mlocked etc)
-
- Below is depend on CONFIG_DEBUG_VM.
- inactive_ratio - VM internal parameter. (see mm/page_alloc.c)
- recent_rotated_anon - VM internal parameter. (see mm/vmscan.c)
- recent_rotated_file - VM internal parameter. (see mm/vmscan.c)
- recent_scanned_anon - VM internal parameter. (see mm/vmscan.c)
- recent_scanned_file - VM internal parameter. (see mm/vmscan.c)
-
- Memo:
+
+memory.stat file includes following statistics
+
+cache - # of bytes of page cache memory.
+rss - # of bytes of anonymous and swap cache memory.
+pgpgin - # of pages paged in (equivalent to # of charging events).
+pgpgout - # of pages paged out (equivalent to # of uncharging events).
+active_anon - # of bytes of anonymous and swap cache memory on active
+ lru list.
+inactive_anon - # of bytes of anonymous memory and swap cache memory on
+ inactive lru list.
+active_file - # of bytes of file-backed memory on active lru list.
+inactive_file - # of bytes of file-backed memory on inactive lru list.
+unevictable - # of bytes of memory that cannot be reclaimed (mlocked etc).
+
+The following additional stats are dependent on CONFIG_DEBUG_VM.
+
+inactive_ratio - VM internal parameter. (see mm/page_alloc.c)
+recent_rotated_anon - VM internal parameter. (see mm/vmscan.c)
+recent_rotated_file - VM internal parameter. (see mm/vmscan.c)
+recent_scanned_anon - VM internal parameter. (see mm/vmscan.c)
+recent_scanned_file - VM internal parameter. (see mm/vmscan.c)
+
+Memo:
recent_rotated means recent frequency of lru rotation.
recent_scanned means recent # of scans to lru.
showing for better debug please see the code for meanings.
+Note:
+ Only anonymous and swap cache memory is listed as part of 'rss' stat.
+ This should not be confused with the true 'resident set size' or the
+ amount of physical memory used by the cgroup. Per-cgroup rss
+ accounting is not done yet.
5.3 swappiness
Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
- Following cgroup's swapiness can't be changed.
+ Following cgroups' swapiness can't be changed.
- root cgroup (uses /proc/sys/vm/swappiness).
- a cgroup which uses hierarchy and it has child cgroup.
- a cgroup which uses hierarchy and not the root of hierarchy.
diff --git a/Documentation/cgroups/resource_counter.txt b/Documentation/cgroups/resource_counter.txt
index f196ac1d7d25..95b24d766eab 100644
--- a/Documentation/cgroups/resource_counter.txt
+++ b/Documentation/cgroups/resource_counter.txt
@@ -47,13 +47,18 @@ to work with it.
2. Basic accounting routines
- a. void res_counter_init(struct res_counter *rc)
+ a. void res_counter_init(struct res_counter *rc,
+ struct res_counter *rc_parent)
Initializes the resource counter. As usual, should be the first
routine called for a new counter.
- b. int res_counter_charge[_locked]
- (struct res_counter *rc, unsigned long val)
+ The struct res_counter *parent can be used to define a hierarchical
+ child -> parent relationship directly in the res_counter structure,
+ NULL can be used to define no relationship.
+
+ c. int res_counter_charge(struct res_counter *rc, unsigned long val,
+ struct res_counter **limit_fail_at)
When a resource is about to be allocated it has to be accounted
with the appropriate resource counter (controller should determine
@@ -67,15 +72,25 @@ to work with it.
* if the charging is performed first, then it should be uncharged
on error path (if the one is called).
- c. void res_counter_uncharge[_locked]
+ If the charging fails and a hierarchical dependency exists, the
+ limit_fail_at parameter is set to the particular res_counter element
+ where the charging failed.
+
+ d. int res_counter_charge_locked
+ (struct res_counter *rc, unsigned long val)
+
+ The same as res_counter_charge(), but it must not acquire/release the
+ res_counter->lock internally (it must be called with res_counter->lock
+ held).
+
+ e. void res_counter_uncharge[_locked]
(struct res_counter *rc, unsigned long val)
When a resource is released (freed) it should be de-accounted
from the resource counter it was accounted to. This is called
"uncharging".
- The _locked routines imply that the res_counter->lock is taken.
-
+ The _locked routines imply that the res_counter->lock is taken.
2.1 Other accounting routines
diff --git a/Documentation/input/rotary-encoder.txt b/Documentation/input/rotary-encoder.txt
new file mode 100644
index 000000000000..435102a26d96
--- /dev/null
+++ b/Documentation/input/rotary-encoder.txt
@@ -0,0 +1,101 @@
+rotary-encoder - a generic driver for GPIO connected devices
+Daniel Mack <daniel@caiaq.de>, Feb 2009
+
+0. Function
+-----------
+
+Rotary encoders are devices which are connected to the CPU or other
+peripherals with two wires. The outputs are phase-shifted by 90 degrees
+and by triggering on falling and rising edges, the turn direction can
+be determined.
+
+The phase diagram of these two outputs look like this:
+
+ _____ _____ _____
+ | | | | | |
+ Channel A ____| |_____| |_____| |____
+
+ : : : : : : : : : : : :
+ __ _____ _____ _____
+ | | | | | | |
+ Channel B |_____| |_____| |_____| |__
+
+ : : : : : : : : : : : :
+ Event a b c d a b c d a b c d
+
+ |<-------->|
+ one step
+
+
+For more information, please see
+ http://en.wikipedia.org/wiki/Rotary_encoder
+
+
+1. Events / state machine
+-------------------------
+
+a) Rising edge on channel A, channel B in low state
+ This state is used to recognize a clockwise turn
+
+b) Rising edge on channel B, channel A in high state
+ When entering this state, the encoder is put into 'armed' state,
+ meaning that there it has seen half the way of a one-step transition.
+
+c) Falling edge on channel A, channel B in high state
+ This state is used to recognize a counter-clockwise turn
+
+d) Falling edge on channel B, channel A in low state
+ Parking position. If the encoder enters this state, a full transition
+ should have happend, unless it flipped back on half the way. The
+ 'armed' state tells us about that.
+
+2. Platform requirements
+------------------------
+
+As there is no hardware dependent call in this driver, the platform it is
+used with must support gpiolib. Another requirement is that IRQs must be
+able to fire on both edges.
+
+
+3. Board integration
+--------------------
+
+To use this driver in your system, register a platform_device with the
+name 'rotary-encoder' and associate the IRQs and some specific platform
+data with it.
+
+struct rotary_encoder_platform_data is declared in
+include/linux/rotary-encoder.h and needs to be filled with the number of
+steps the encoder has and can carry information about externally inverted
+signals (because of used invertig buffer or other reasons).
+
+Because GPIO to IRQ mapping is platform specific, this information must
+be given in seperately to the driver. See the example below.
+
+---------<snip>---------
+
+/* board support file example */
+
+#include <linux/input.h>
+#include <linux/rotary_encoder.h>
+
+#define GPIO_ROTARY_A 1
+#define GPIO_ROTARY_B 2
+
+static struct rotary_encoder_platform_data my_rotary_encoder_info = {
+ .steps = 24,
+ .axis = ABS_X,
+ .gpio_a = GPIO_ROTARY_A,
+ .gpio_b = GPIO_ROTARY_B,
+ .inverted_a = 0,
+ .inverted_b = 0,
+};
+
+static struct platform_device rotary_encoder_device = {
+ .name = "rotary-encoder",
+ .id = 0,
+ .dev = {
+ .platform_data = &my_rotary_encoder_info,
+ }
+};
+
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 51104f9194a5..d4b05672f9f7 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -40,10 +40,16 @@ This document describes the Linux kernel Makefiles.
--- 6.7 Custom kbuild commands
--- 6.8 Preprocessing linker scripts
- === 7 Kbuild Variables
- === 8 Makefile language
- === 9 Credits
- === 10 TODO
+ === 7 Kbuild syntax for exported headers
+ --- 7.1 header-y
+ --- 7.2 objhdr-y
+ --- 7.3 destination-y
+ --- 7.4 unifdef-y (deprecated)
+
+ === 8 Kbuild Variables
+ === 9 Makefile language
+ === 10 Credits
+ === 11 TODO
=== 1 Overview
@@ -1143,8 +1149,69 @@ When kbuild executes, the following steps are followed (roughly):
The kbuild infrastructure for *lds file are used in several
architecture-specific files.
+=== 7 Kbuild syntax for exported headers
+
+The kernel include a set of headers that is exported to userspace.
+Many headers can be exported as-is but other headers requires a
+minimal pre-processing before they are ready for user-space.
+The pre-processing does:
+- drop kernel specific annotations
+- drop include of compiler.h
+- drop all sections that is kernel internat (guarded by ifdef __KERNEL__)
+
+Each relevant directory contain a file name "Kbuild" which specify the
+headers to be exported.
+See subsequent chapter for the syntax of the Kbuild file.
+
+ --- 7.1 header-y
+
+ header-y specify header files to be exported.
+
+ Example:
+ #include/linux/Kbuild
+ header-y += usb/
+ header-y += aio_abi.h
+
+ The convention is to list one file per line and
+ preferably in alphabetic order.
+
+ header-y also specify which subdirectories to visit.
+ A subdirectory is identified by a trailing '/' which
+ can be seen in the example above for the usb subdirectory.
+
+ Subdirectories are visited before their parent directories.
+
+ --- 7.2 objhdr-y
+
+ objhdr-y specifies generated files to be exported.
+ Generated files are special as they need to be looked
+ up in another directory when doing 'make O=...' builds.
+
+ Example:
+ #include/linux/Kbuild
+ objhdr-y += version.h
+
+ --- 7.3 destination-y
+
+ When an architecture have a set of exported headers that needs to be
+ exported to a different directory destination-y is used.
+ destination-y specify the destination directory for all exported
+ headers in the file where it is present.
+
+ Example:
+ #arch/xtensa/platforms/s6105/include/platform/Kbuild
+ destination-y := include/linux
+
+ In the example above all exported headers in the Kbuild file
+ will be located in the directory "include/linux" when exported.
+
+
+ --- 7.4 unifdef-y (deprecated)
+
+ unifdef-y is deprecated. A direct replacement is header-y.
+
-=== 7 Kbuild Variables
+=== 8 Kbuild Variables
The top Makefile exports the following variables:
@@ -1206,7 +1273,7 @@ The top Makefile exports the following variables:
INSTALL_MOD_STRIP will used as the option(s) to the strip command.
-=== 8 Makefile language
+=== 9 Makefile language
The kernel Makefiles are designed to be run with GNU Make. The Makefiles
use only the documented features of GNU Make, but they do use many
@@ -1225,14 +1292,14 @@ time the left-hand side is used.
There are some cases where "=" is appropriate. Usually, though, ":="
is the right choice.
-=== 9 Credits
+=== 10 Credits
Original version made by Michael Elizabeth Chastain, <mailto:mec@shout.net>
Updates by Kai Germaschewski <kai@tp1.ruhr-uni-bochum.de>
Updates by Sam Ravnborg <sam@ravnborg.org>
Language QA by Jan Engelhardt <jengelh@gmx.de>
-=== 10 TODO
+=== 11 TODO
- Describe how kbuild supports shipped files with _shipped.
- Generating offset header files.
diff --git a/Documentation/sparse.txt b/Documentation/sparse.txt
index 42f43fa59f24..34c76a55bc04 100644
--- a/Documentation/sparse.txt
+++ b/Documentation/sparse.txt
@@ -42,6 +42,14 @@ sure that bitwise types don't get mixed up (little-endian vs big-endian
vs cpu-endian vs whatever), and there the constant "0" really _is_
special.
+__bitwise__ - to be used for relatively compact stuff (gfp_t, etc.) that
+is mostly warning-free and is supposed to stay that way. Warnings will
+be generated without __CHECK_ENDIAN__.
+
+__bitwise - noisy stuff; in particular, __le*/__be* are that. We really
+don't want to drown in noise unless we'd explicitly asked for it.
+
+
Getting sparse
~~~~~~~~~~~~~~
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index a34d55b65441..df38ef046f8d 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -95,7 +95,7 @@ of struct cmsghdr structures with appended data.
There is only one file in this directory.
unix_dgram_qlen limits the max number of datagrams queued in Unix domain
-socket's buffer. It will not take effect unless PF_UNIX flag is spicified.
+socket's buffer. It will not take effect unless PF_UNIX flag is specified.
3. /proc/sys/net/ipv4 - IPV4 settings
diff --git a/Documentation/tomoyo.txt b/Documentation/tomoyo.txt
new file mode 100644
index 000000000000..b3a232cae7f8
--- /dev/null
+++ b/Documentation/tomoyo.txt
@@ -0,0 +1,55 @@
+--- What is TOMOYO? ---
+
+TOMOYO is a name-based MAC extension (LSM module) for the Linux kernel.
+
+LiveCD-based tutorials are available at
+http://tomoyo.sourceforge.jp/en/1.6.x/1st-step/ubuntu8.04-live/
+http://tomoyo.sourceforge.jp/en/1.6.x/1st-step/centos5-live/ .
+Though these tutorials use non-LSM version of TOMOYO, they are useful for you
+to know what TOMOYO is.
+
+--- How to enable TOMOYO? ---
+
+Build the kernel with CONFIG_SECURITY_TOMOYO=y and pass "security=tomoyo" on
+kernel's command line.
+
+Please see http://tomoyo.sourceforge.jp/en/2.2.x/ for details.
+
+--- Where is documentation? ---
+
+User <-> Kernel interface documentation is available at
+http://tomoyo.sourceforge.jp/en/2.2.x/policy-reference.html .
+
+Materials we prepared for seminars and symposiums are available at
+http://sourceforge.jp/projects/tomoyo/docs/?category_id=532&language_id=1 .
+Below lists are chosen from three aspects.
+
+What is TOMOYO?
+ TOMOYO Linux Overview
+ http://sourceforge.jp/projects/tomoyo/docs/lca2009-takeda.pdf
+ TOMOYO Linux: pragmatic and manageable security for Linux
+ http://sourceforge.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf
+ TOMOYO Linux: A Practical Method to Understand and Protect Your Own Linux Box
+ http://sourceforge.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf
+
+What can TOMOYO do?
+ Deep inside TOMOYO Linux
+ http://sourceforge.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf
+ The role of "pathname based access control" in security.
+ http://sourceforge.jp/projects/tomoyo/docs/lfj2008-bof.pdf
+
+History of TOMOYO?
+ Realities of Mainlining
+ http://sourceforge.jp/projects/tomoyo/docs/lfj2008.pdf
+
+--- What is future plan? ---
+
+We believe that inode based security and name based security are complementary
+and both should be used together. But unfortunately, so far, we cannot enable
+multiple LSM modules at the same time. We feel sorry that you have to give up
+SELinux/SMACK/AppArmor etc. when you want to use TOMOYO.
+
+We hope that LSM becomes stackable in future. Meanwhile, you can use non-LSM
+version of TOMOYO, available at http://tomoyo.sourceforge.jp/en/1.6.x/ .
+LSM version of TOMOYO is a subset of non-LSM version of TOMOYO. We are planning
+to port non-LSM version's functionalities to LSM versions.
diff --git a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX
index 2131b00b63f6..2f77ced35df7 100644
--- a/Documentation/vm/00-INDEX
+++ b/Documentation/vm/00-INDEX
@@ -1,5 +1,7 @@
00-INDEX
- this file.
+active_mm.txt
+ - An explanation from Linus about tsk->active_mm vs tsk->mm.
balance
- various information on memory balancing.
hugetlbpage.txt
diff --git a/Documentation/vm/active_mm.txt b/Documentation/vm/active_mm.txt
new file mode 100644
index 000000000000..4ee1f643d897
--- /dev/null
+++ b/Documentation/vm/active_mm.txt
@@ -0,0 +1,83 @@
+List: linux-kernel
+Subject: Re: active_mm
+From: Linus Torvalds <torvalds () transmeta ! com>
+Date: 1999-07-30 21:36:24
+
+Cc'd to linux-kernel, because I don't write explanations all that often,
+and when I do I feel better about more people reading them.
+
+On Fri, 30 Jul 1999, David Mosberger wrote:
+>
+> Is there a brief description someplace on how "mm" vs. "active_mm" in
+> the task_struct are supposed to be used? (My apologies if this was
+> discussed on the mailing lists---I just returned from vacation and
+> wasn't able to follow linux-kernel for a while).
+
+Basically, the new setup is:
+
+ - we have "real address spaces" and "anonymous address spaces". The
+ difference is that an anonymous address space doesn't care about the
+ user-level page tables at all, so when we do a context switch into an
+ anonymous address space we just leave the previous address space
+ active.
+
+ The obvious use for a "anonymous address space" is any thread that
+ doesn't need any user mappings - all kernel threads basically fall into
+ this category, but even "real" threads can temporarily say that for
+ some amount of time they are not going to be interested in user space,
+ and that the scheduler might as well try to avoid wasting time on
+ switching the VM state around. Currently only the old-style bdflush
+ sync does that.
+
+ - "tsk->mm" points to the "real address space". For an anonymous process,
+ tsk->mm will be NULL, for the logical reason that an anonymous process
+ really doesn't _have_ a real address space at all.
+
+ - however, we obviously need to keep track of which address space we
+ "stole" for such an anonymous user. For that, we have "tsk->active_mm",
+ which shows what the currently active address space is.
+
+ The rule is that for a process with a real address space (ie tsk->mm is
+ non-NULL) the active_mm obviously always has to be the same as the real
+ one.
+
+ For a anonymous process, tsk->mm == NULL, and tsk->active_mm is the
+ "borrowed" mm while the anonymous process is running. When the
+ anonymous process gets scheduled away, the borrowed address space is
+ returned and cleared.
+
+To support all that, the "struct mm_struct" now has two counters: a
+"mm_users" counter that is how many "real address space users" there are,
+and a "mm_count" counter that is the number of "lazy" users (ie anonymous
+users) plus one if there are any real users.
+
+Usually there is at least one real user, but it could be that the real
+user exited on another CPU while a lazy user was still active, so you do
+actually get cases where you have a address space that is _only_ used by
+lazy users. That is often a short-lived state, because once that thread
+gets scheduled away in favour of a real thread, the "zombie" mm gets
+released because "mm_users" becomes zero.
+
+Also, a new rule is that _nobody_ ever has "init_mm" as a real MM any
+more. "init_mm" should be considered just a "lazy context when no other
+context is available", and in fact it is mainly used just at bootup when
+no real VM has yet been created. So code that used to check
+
+ if (current->mm == &init_mm)
+
+should generally just do
+
+ if (!current->mm)
+
+instead (which makes more sense anyway - the test is basically one of "do
+we have a user context", and is generally done by the page fault handler
+and things like that).
+
+Anyway, I put a pre-patch-2.3.13-1 on ftp.kernel.org just a moment ago,
+because it slightly changes the interfaces to accomodate the alpha (who
+would have thought it, but the alpha actually ends up having one of the
+ugliest context switch codes - unlike the other architectures where the MM
+and register state is separate, the alpha PALcode joins the two, and you
+need to switch both together).
+
+(From http://marc.info/?l=linux-kernel&m=93337278602211&w=2)
diff --git a/Documentation/vm/unevictable-lru.txt b/Documentation/vm/unevictable-lru.txt
index 0706a7282a8c..2d70d0d95108 100644
--- a/Documentation/vm/unevictable-lru.txt
+++ b/Documentation/vm/unevictable-lru.txt
@@ -1,588 +1,691 @@
-
-This document describes the Linux memory management "Unevictable LRU"
-infrastructure and the use of this infrastructure to manage several types
-of "unevictable" pages. The document attempts to provide the overall
-rationale behind this mechanism and the rationale for some of the design
-decisions that drove the implementation. The latter design rationale is
-discussed in the context of an implementation description. Admittedly, one
-can obtain the implementation details--the "what does it do?"--by reading the
-code. One hopes that the descriptions below add value by provide the answer
-to "why does it do that?".
-
-Unevictable LRU Infrastructure:
-
-The Unevictable LRU adds an additional LRU list to track unevictable pages
-and to hide these pages from vmscan. This mechanism is based on a patch by
-Larry Woodman of Red Hat to address several scalability problems with page
+ ==============================
+ UNEVICTABLE LRU INFRASTRUCTURE
+ ==============================
+
+========
+CONTENTS
+========
+
+ (*) The Unevictable LRU
+
+ - The unevictable page list.
+ - Memory control group interaction.
+ - Marking address spaces unevictable.
+ - Detecting Unevictable Pages.
+ - vmscan's handling of unevictable pages.
+
+ (*) mlock()'d pages.
+
+ - History.
+ - Basic management.
+ - mlock()/mlockall() system call handling.
+ - Filtering special vmas.
+ - munlock()/munlockall() system call handling.
+ - Migrating mlocked pages.
+ - mmap(MAP_LOCKED) system call handling.
+ - munmap()/exit()/exec() system call handling.
+ - try_to_unmap().
+ - try_to_munlock() reverse map scan.
+ - Page reclaim in shrink_*_list().
+
+
+============
+INTRODUCTION
+============
+
+This document describes the Linux memory manager's "Unevictable LRU"
+infrastructure and the use of this to manage several types of "unevictable"
+pages.
+
+The document attempts to provide the overall rationale behind this mechanism
+and the rationale for some of the design decisions that drove the
+implementation. The latter design rationale is discussed in the context of an
+implementation description. Admittedly, one can obtain the implementation
+details - the "what does it do?" - by reading the code. One hopes that the
+descriptions below add value by provide the answer to "why does it do that?".
+
+
+===================
+THE UNEVICTABLE LRU
+===================
+
+The Unevictable LRU facility adds an additional LRU list to track unevictable
+pages and to hide these pages from vmscan. This mechanism is based on a patch
+by Larry Woodman of Red Hat to address several scalability problems with page
reclaim in Linux. The problems have been observed at customer sites on large
-memory x86_64 systems. For example, a non-numal x86_64 platform with 128GB
-of main memory will have over 32 million 4k pages in a single zone. When a
-large fraction of these pages are not evictable for any reason [see below],
-vmscan will spend a lot of time scanning the LRU lists looking for the small
-fraction of pages that are evictable. This can result in a situation where
-all cpus are spending 100% of their time in vmscan for hours or days on end,
-with the system completely unresponsive.
-
-The Unevictable LRU infrastructure addresses the following classes of
-unevictable pages:
-
-+ page owned by ramfs
-+ page mapped into SHM_LOCKed shared memory regions
-+ page mapped into VM_LOCKED [mlock()ed] vmas
-
-The infrastructure might be able to handle other conditions that make pages
+memory x86_64 systems.
+
+To illustrate this with an example, a non-NUMA x86_64 platform with 128GB of
+main memory will have over 32 million 4k pages in a single zone. When a large
+fraction of these pages are not evictable for any reason [see below], vmscan
+will spend a lot of time scanning the LRU lists looking for the small fraction
+of pages that are evictable. This can result in a situation where all CPUs are
+spending 100% of their time in vmscan for hours or days on end, with the system
+completely unresponsive.
+
+The unevictable list addresses the following classes of unevictable pages:
+
+ (*) Those owned by ramfs.
+
+ (*) Those mapped into SHM_LOCK'd shared memory regions.
+
+ (*) Those mapped into VM_LOCKED [mlock()ed] VMAs.
+
+The infrastructure may also be able to handle other conditions that make pages
unevictable, either by definition or by circumstance, in the future.
-The Unevictable LRU List
+THE UNEVICTABLE PAGE LIST
+-------------------------
The Unevictable LRU infrastructure consists of an additional, per-zone, LRU list
called the "unevictable" list and an associated page flag, PG_unevictable, to
-indicate that the page is being managed on the unevictable list. The
-PG_unevictable flag is analogous to, and mutually exclusive with, the PG_active
-flag in that it indicates on which LRU list a page resides when PG_lru is set.
-The unevictable LRU list is source configurable based on the UNEVICTABLE_LRU
-Kconfig option.
+indicate that the page is being managed on the unevictable list.
+
+The PG_unevictable flag is analogous to, and mutually exclusive with, the
+PG_active flag in that it indicates on which LRU list a page resides when
+PG_lru is set. The unevictable list is compile-time configurable based on the
+UNEVICTABLE_LRU Kconfig option.
The Unevictable LRU infrastructure maintains unevictable pages on an additional
LRU list for a few reasons:
-1) We get to "treat unevictable pages just like we treat other pages in the
- system, which means we get to use the same code to manipulate them, the
- same code to isolate them (for migrate, etc.), the same code to keep track
- of the statistics, etc..." [Rik van Riel]
+ (1) We get to "treat unevictable pages just like we treat other pages in the
+ system - which means we get to use the same code to manipulate them, the
+ same code to isolate them (for migrate, etc.), the same code to keep track
+ of the statistics, etc..." [Rik van Riel]
+
+ (2) We want to be able to migrate unevictable pages between nodes for memory
+ defragmentation, workload management and memory hotplug. The linux kernel
+ can only migrate pages that it can successfully isolate from the LRU
+ lists. If we were to maintain pages elsewhere than on an LRU-like list,
+ where they can be found by isolate_lru_page(), we would prevent their
+ migration, unless we reworked migration code to find the unevictable pages
+ itself.
-2) We want to be able to migrate unevictable pages between nodes--for memory
- defragmentation, workload management and memory hotplug. The linux kernel
- can only migrate pages that it can successfully isolate from the lru lists.
- If we were to maintain pages elsewise than on an lru-like list, where they
- can be found by isolate_lru_page(), we would prevent their migration, unless
- we reworked migration code to find the unevictable pages.
+The unevictable list does not differentiate between file-backed and anonymous,
+swap-backed pages. This differentiation is only important while the pages are,
+in fact, evictable.
-The unevictable LRU list does not differentiate between file backed and swap
-backed [anon] pages. This differentiation is only important while the pages
-are, in fact, evictable.
+The unevictable list benefits from the "arrayification" of the per-zone LRU
+lists and statistics originally proposed and posted by Christoph Lameter.
-The unevictable LRU list benefits from the "arrayification" of the per-zone
-LRU lists and statistics originally proposed and posted by Christoph Lameter.
+The unevictable list does not use the LRU pagevec mechanism. Rather,
+unevictable pages are placed directly on the page's zone's unevictable list
+under the zone lru_lock. This allows us to prevent the stranding of pages on
+the unevictable list when one task has the page isolated from the LRU and other
+tasks are changing the "evictability" state of the page.
-The unevictable list does not use the lru pagevec mechanism. Rather,
-unevictable pages are placed directly on the page's zone's unevictable
-list under the zone lru_lock. The reason for this is to prevent stranding
-of pages on the unevictable list when one task has the page isolated from the
-lru and other tasks are changing the "evictability" state of the page.
+MEMORY CONTROL GROUP INTERACTION
+--------------------------------
-Unevictable LRU and Memory Controller Interaction
+The unevictable LRU facility interacts with the memory control group [aka
+memory controller; see Documentation/cgroups/memory.txt] by extending the
+lru_list enum.
+
+The memory controller data structure automatically gets a per-zone unevictable
+list as a result of the "arrayification" of the per-zone LRU lists (one per
+lru_list enum element). The memory controller tracks the movement of pages to
+and from the unevictable list.
-The memory controller data structure automatically gets a per zone unevictable
-lru list as a result of the "arrayification" of the per-zone LRU lists. The
-memory controller tracks the movement of pages to and from the unevictable list.
When a memory control group comes under memory pressure, the controller will
not attempt to reclaim pages on the unevictable list. This has a couple of
-effects. Because the pages are "hidden" from reclaim on the unevictable list,
-the reclaim process can be more efficient, dealing only with pages that have
-a chance of being reclaimed. On the other hand, if too many of the pages
-charged to the control group are unevictable, the evictable portion of the
-working set of the tasks in the control group may not fit into the available
-memory. This can cause the control group to thrash or to oom-kill tasks.
-
-
-Unevictable LRU: Detecting Unevictable Pages
-
-The function page_evictable(page, vma) in vmscan.c determines whether a
-page is evictable or not. For ramfs pages and pages in SHM_LOCKed regions,
-page_evictable() tests a new address space flag, AS_UNEVICTABLE, in the page's
-address space using a wrapper function. Wrapper functions are used to set,
-clear and test the flag to reduce the requirement for #ifdef's throughout the
-source code. AS_UNEVICTABLE is set on ramfs inode/mapping when it is created.
-This flag remains for the life of the inode.
-
-For shared memory regions, AS_UNEVICTABLE is set when an application
-successfully SHM_LOCKs the region and is removed when the region is
-SHM_UNLOCKed. Note that shmctl(SHM_LOCK, ...) does not populate the page
-tables for the region as does, for example, mlock(). So, we make no special
-effort to push any pages in the SHM_LOCKed region to the unevictable list.
-Vmscan will do this when/if it encounters the pages during reclaim. On
-SHM_UNLOCK, shmctl() scans the pages in the region and "rescues" them from the
-unevictable list if no other condition keeps them unevictable. If a SHM_LOCKed
-region is destroyed, the pages are also "rescued" from the unevictable list in
-the process of freeing them.
-
-page_evictable() detects mlock()ed pages by testing an additional page flag,
-PG_mlocked via the PageMlocked() wrapper. If the page is NOT mlocked, and a
-non-NULL vma is supplied, page_evictable() will check whether the vma is
+effects:
+
+ (1) Because the pages are "hidden" from reclaim on the unevictable list, the
+ reclaim process can be more efficient, dealing only with pages that have a
+ chance of being reclaimed.
+
+ (2) On the other hand, if too many of the pages charged to the control group
+ are unevictable, the evictable portion of the working set of the tasks in
+ the control group may not fit into the available memory. This can cause
+ the control group to thrash or to OOM-kill tasks.
+
+
+MARKING ADDRESS SPACES UNEVICTABLE
+----------------------------------
+
+For facilities such as ramfs none of the pages attached to the address space
+may be evicted. To prevent eviction of any such pages, the AS_UNEVICTABLE
+address space flag is provided, and this can be manipulated by a filesystem
+using a number of wrapper functions:
+
+ (*) void mapping_set_unevictable(struct address_space *mapping);
+
+ Mark the address space as being completely unevictable.
+
+ (*) void mapping_clear_unevictable(struct address_space *mapping);
+
+ Mark the address space as being evictable.
+
+ (*) int mapping_unevictable(struct address_space *mapping);
+
+ Query the address space, and return true if it is completely
+ unevictable.
+
+These are currently used in two places in the kernel:
+
+ (1) By ramfs to mark the address spaces of its inodes when they are created,
+ and this mark remains for the life of the inode.
+
+ (2) By SYSV SHM to mark SHM_LOCK'd address spaces until SHM_UNLOCK is called.
+
+ Note that SHM_LOCK is not required to page in the locked pages if they're
+ swapped out; the application must touch the pages manually if it wants to
+ ensure they're in memory.
+
+
+DETECTING UNEVICTABLE PAGES
+---------------------------
+
+The function page_evictable() in vmscan.c determines whether a page is
+evictable or not using the query function outlined above [see section "Marking
+address spaces unevictable"] to check the AS_UNEVICTABLE flag.
+
+For address spaces that are so marked after being populated (as SHM regions
+might be), the lock action (eg: SHM_LOCK) can be lazy, and need not populate
+the page tables for the region as does, for example, mlock(), nor need it make
+any special effort to push any pages in the SHM_LOCK'd area to the unevictable
+list. Instead, vmscan will do this if and when it encounters the pages during
+a reclamation scan.
+
+On an unlock action (such as SHM_UNLOCK), the unlocker (eg: shmctl()) must scan
+the pages in the region and "rescue" them from the unevictable list if no other
+condition is keeping them unevictable. If an unevictable region is destroyed,
+the pages are also "rescued" from the unevictable list in the process of
+freeing them.
+
+page_evictable() also checks for mlocked pages by testing an additional page
+flag, PG_mlocked (as wrapped by PageMlocked()). If the page is NOT mlocked,
+and a non-NULL VMA is supplied, page_evictable() will check whether the VMA is
VM_LOCKED via is_mlocked_vma(). is_mlocked_vma() will SetPageMlocked() and
update the appropriate statistics if the vma is VM_LOCKED. This method allows
efficient "culling" of pages in the fault path that are being faulted in to
-VM_LOCKED vmas.
+VM_LOCKED VMAs.
-Unevictable Pages and Vmscan [shrink_*_list()]
+VMSCAN'S HANDLING OF UNEVICTABLE PAGES
+--------------------------------------
If unevictable pages are culled in the fault path, or moved to the unevictable
-list at mlock() or mmap() time, vmscan will never encounter the pages until
-they have become evictable again, for example, via munlock() and have been
-"rescued" from the unevictable list. However, there may be situations where we
-decide, for the sake of expediency, to leave a unevictable page on one of the
-regular active/inactive LRU lists for vmscan to deal with. Vmscan checks for
-such pages in all of the shrink_{active|inactive|page}_list() functions and
-will "cull" such pages that it encounters--that is, it diverts those pages to
-the unevictable list for the zone being scanned.
-
-There may be situations where a page is mapped into a VM_LOCKED vma, but the
-page is not marked as PageMlocked. Such pages will make it all the way to
+list at mlock() or mmap() time, vmscan will not encounter the pages until they
+have become evictable again (via munlock() for example) and have been "rescued"
+from the unevictable list. However, there may be situations where we decide,
+for the sake of expediency, to leave a unevictable page on one of the regular
+active/inactive LRU lists for vmscan to deal with. vmscan checks for such
+pages in all of the shrink_{active|inactive|page}_list() functions and will
+"cull" such pages that it encounters: that is, it diverts those pages to the
+unevictable list for the zone being scanned.
+
+There may be situations where a page is mapped into a VM_LOCKED VMA, but the
+page is not marked as PG_mlocked. Such pages will make it all the way to
shrink_page_list() where they will be detected when vmscan walks the reverse
-map in try_to_unmap(). If try_to_unmap() returns SWAP_MLOCK, shrink_page_list()
-will cull the page at that point.
+map in try_to_unmap(). If try_to_unmap() returns SWAP_MLOCK,
+shrink_page_list() will cull the page at that point.
-To "cull" an unevictable page, vmscan simply puts the page back on the lru
-list using putback_lru_page()--the inverse operation to isolate_lru_page()--
-after dropping the page lock. Because the condition which makes the page
-unevictable may change once the page is unlocked, putback_lru_page() will
-recheck the unevictable state of a page that it places on the unevictable lru
-list. If the page has become unevictable, putback_lru_page() removes it from
-the list and retries, including the page_unevictable() test. Because such a
-race is a rare event and movement of pages onto the unevictable list should be
-rare, these extra evictabilty checks should not occur in the majority of calls
-to putback_lru_page().
+To "cull" an unevictable page, vmscan simply puts the page back on the LRU list
+using putback_lru_page() - the inverse operation to isolate_lru_page() - after
+dropping the page lock. Because the condition which makes the page unevictable
+may change once the page is unlocked, putback_lru_page() will recheck the
+unevictable state of a page that it places on the unevictable list. If the
+page has become unevictable, putback_lru_page() removes it from the list and
+retries, including the page_unevictable() test. Because such a race is a rare
+event and movement of pages onto the unevictable list should be rare, these
+extra evictabilty checks should not occur in the majority of calls to
+putback_lru_page().
-Mlocked Page: Prior Work
+=============
+MLOCKED PAGES
+=============
-The "Unevictable Mlocked Pages" infrastructure is based on work originally
+The unevictable page list is also useful for mlock(), in addition to ramfs and
+SYSV SHM. Note that mlock() is only available in CONFIG_MMU=y situations; in
+NOMMU situations, all mappings are effectively mlocked.
+
+
+HISTORY
+-------
+
+The "Unevictable mlocked Pages" infrastructure is based on work originally
posted by Nick Piggin in an RFC patch entitled "mm: mlocked pages off LRU".
-Nick posted his patch as an alternative to a patch posted by Christoph
-Lameter to achieve the same objective--hiding mlocked pages from vmscan.
-In Nick's patch, he used one of the struct page lru list link fields as a count
-of VM_LOCKED vmas that map the page. This use of the link field for a count
-prevented the management of the pages on an LRU list. Thus, mlocked pages were
-not migratable as isolate_lru_page() could not find them and the lru list link
-field was not available to the migration subsystem. Nick resolved this by
-putting mlocked pages back on the lru list before attempting to isolate them,
-thus abandoning the count of VM_LOCKED vmas. When Nick's patch was integrated
-with the Unevictable LRU work, the count was replaced by walking the reverse
-map to determine whether any VM_LOCKED vmas mapped the page. More on this
-below.
-
-
-Mlocked Pages: Basic Management
-
-Mlocked pages--pages mapped into a VM_LOCKED vma--represent one class of
-unevictable pages. When such a page has been "noticed" by the memory
-management subsystem, the page is marked with the PG_mlocked [PageMlocked()]
-flag. A PageMlocked() page will be placed on the unevictable LRU list when
-it is added to the LRU. Pages can be "noticed" by memory management in
-several places:
-
-1) in the mlock()/mlockall() system call handlers.
-2) in the mmap() system call handler when mmap()ing a region with the
- MAP_LOCKED flag, or mmap()ing a region in a task that has called
- mlockall() with the MCL_FUTURE flag. Both of these conditions result
- in the VM_LOCKED flag being set for the vma.
-3) in the fault path, if mlocked pages are "culled" in the fault path,
- and when a VM_LOCKED stack segment is expanded.
-4) as mentioned above, in vmscan:shrink_page_list() when attempting to
- reclaim a page in a VM_LOCKED vma via try_to_unmap().
-
-Mlocked pages become unlocked and rescued from the unevictable list when:
-
-1) mapped in a range unlocked via the munlock()/munlockall() system calls.
-2) munmapped() out of the last VM_LOCKED vma that maps the page, including
- unmapping at task exit.
-3) when the page is truncated from the last VM_LOCKED vma of an mmap()ed file.
-4) before a page is COWed in a VM_LOCKED vma.
-
-
-Mlocked Pages: mlock()/mlockall() System Call Handling
+Nick posted his patch as an alternative to a patch posted by Christoph Lameter
+to achieve the same objective: hiding mlocked pages from vmscan.
+
+In Nick's patch, he used one of the struct page LRU list link fields as a count
+of VM_LOCKED VMAs that map the page. This use of the link field for a count
+prevented the management of the pages on an LRU list, and thus mlocked pages
+were not migratable as isolate_lru_page() could not find them, and the LRU list
+link field was not available to the migration subsystem.
+
+Nick resolved this by putting mlocked pages back on the lru list before
+attempting to isolate them, thus abandoning the count of VM_LOCKED VMAs. When
+Nick's patch was integrated with the Unevictable LRU work, the count was
+replaced by walking the reverse map to determine whether any VM_LOCKED VMAs
+mapped the page. More on this below.
+
+
+BASIC MANAGEMENT
+----------------
+
+mlocked pages - pages mapped into a VM_LOCKED VMA - are a class of unevictable
+pages. When such a page has been "noticed" by the memory management subsystem,
+the page is marked with the PG_mlocked flag. This can be manipulated using the
+PageMlocked() functions.
+
+A PG_mlocked page will be placed on the unevictable list when it is added to
+the LRU. Such pages can be "noticed" by memory management in several places:
+
+ (1) in the mlock()/mlockall() system call handlers;
+
+ (2) in the mmap() system call handler when mmapping a region with the
+ MAP_LOCKED flag;
+
+ (3) mmapping a region in a task that has called mlockall() with the MCL_FUTURE
+ flag
+
+ (4) in the fault path, if mlocked pages are "culled" in the fault path,
+ and when a VM_LOCKED stack segment is expanded; or
+
+ (5) as mentioned above, in vmscan:shrink_page_list() when attempting to
+ reclaim a page in a VM_LOCKED VMA via try_to_unmap()
+
+all of which result in the VM_LOCKED flag being set for the VMA if it doesn't
+already have it set.
+
+mlocked pages become unlocked and rescued from the unevictable list when:
+
+ (1) mapped in a range unlocked via the munlock()/munlockall() system calls;
+
+ (2) munmap()'d out of the last VM_LOCKED VMA that maps the page, including
+ unmapping at task exit;
+
+ (3) when the page is truncated from the last VM_LOCKED VMA of an mmapped file;
+ or
+
+ (4) before a page is COW'd in a VM_LOCKED VMA.
+
+
+mlock()/mlockall() SYSTEM CALL HANDLING
+---------------------------------------
Both [do_]mlock() and [do_]mlockall() system call handlers call mlock_fixup()
-for each vma in the range specified by the call. In the case of mlockall(),
+for each VMA in the range specified by the call. In the case of mlockall(),
this is the entire active address space of the task. Note that mlock_fixup()
-is used for both mlock()ing and munlock()ing a range of memory. A call to
-mlock() an already VM_LOCKED vma, or to munlock() a vma that is not VM_LOCKED
-is treated as a no-op--mlock_fixup() simply returns.
-
-If the vma passes some filtering described in "Mlocked Pages: Filtering Vmas"
-below, mlock_fixup() will attempt to merge the vma with its neighbors or split
-off a subset of the vma if the range does not cover the entire vma. Once the
-vma has been merged or split or neither, mlock_fixup() will call
-__mlock_vma_pages_range() to fault in the pages via get_user_pages() and
-to mark the pages as mlocked via mlock_vma_page().
-
-Note that the vma being mlocked might be mapped with PROT_NONE. In this case,
-get_user_pages() will be unable to fault in the pages. That's OK. If pages
-do end up getting faulted into this VM_LOCKED vma, we'll handle them in the
+is used for both mlocking and munlocking a range of memory. A call to mlock()
+an already VM_LOCKED VMA, or to munlock() a VMA that is not VM_LOCKED is
+treated as a no-op, and mlock_fixup() simply returns.
+
+If the VMA passes some filtering as described in "Filtering Special Vmas"
+below, mlock_fixup() will attempt to merge the VMA with its neighbors or split
+off a subset of the VMA if the range does not cover the entire VMA. Once the
+VMA has been merged or split or neither, mlock_fixup() will call
+__mlock_vma_pages_range() to fault in the pages via get_user_pages() and to
+mark the pages as mlocked via mlock_vma_page().
+
+Note that the VMA being mlocked might be mapped with PROT_NONE. In this case,
+get_user_pages() will be unable to fault in the pages. That's okay. If pages
+do end up getting faulted into this VM_LOCKED VMA, we'll handle them in the
fault path or in vmscan.
Also note that a page returned by get_user_pages() could be truncated or
-migrated out from under us, while we're trying to mlock it. To detect
-this, __mlock_vma_pages_range() tests the page_mapping after acquiring
-the page lock. If the page is still associated with its mapping, we'll
-go ahead and call mlock_vma_page(). If the mapping is gone, we just
-unlock the page and move on. Worse case, this results in page mapped
-in a VM_LOCKED vma remaining on a normal LRU list without being
-PageMlocked(). Again, vmscan will detect and cull such pages.
-
-mlock_vma_page(), called with the page locked [N.B., not "mlocked"], will
-TestSetPageMlocked() for each page returned by get_user_pages(). We use
-TestSetPageMlocked() because the page might already be mlocked by another
-task/vma and we don't want to do extra work. We especially do not want to
-count an mlocked page more than once in the statistics. If the page was
-already mlocked, mlock_vma_page() is done.
+migrated out from under us, while we're trying to mlock it. To detect this,
+__mlock_vma_pages_range() checks page_mapping() after acquiring the page lock.
+If the page is still associated with its mapping, we'll go ahead and call
+mlock_vma_page(). If the mapping is gone, we just unlock the page and move on.
+In the worst case, this will result in a page mapped in a VM_LOCKED VMA
+remaining on a normal LRU list without being PageMlocked(). Again, vmscan will
+detect and cull such pages.
+
+mlock_vma_page() will call TestSetPageMlocked() for each page returned by
+get_user_pages(). We use TestSetPageMlocked() because the page might already
+be mlocked by another task/VMA and we don't want to do extra work. We
+especially do not want to count an mlocked page more than once in the
+statistics. If the page was already mlocked, mlock_vma_page() need do nothing
+more.
If the page was NOT already mlocked, mlock_vma_page() attempts to isolate the
page from the LRU, as it is likely on the appropriate active or inactive list
-at that time. If the isolate_lru_page() succeeds, mlock_vma_page() will
-putback the page--putback_lru_page()--which will notice that the page is now
-mlocked and divert the page to the zone's unevictable LRU list. If
+at that time. If the isolate_lru_page() succeeds, mlock_vma_page() will put
+back the page - by calling putback_lru_page() - which will notice that the page
+is now mlocked and divert the page to the zone's unevictable list. If
mlock_vma_page() is unable to isolate the page from the LRU, vmscan will handle
-it later if/when it attempts to reclaim the page.
+it later if and when it attempts to reclaim the page.
-Mlocked Pages: Filtering Special Vmas
+FILTERING SPECIAL VMAS
+----------------------
-mlock_fixup() filters several classes of "special" vmas:
+mlock_fixup() filters several classes of "special" VMAs:
-1) vmas with VM_IO|VM_PFNMAP set are skipped entirely. The pages behind
+1) VMAs with VM_IO or VM_PFNMAP set are skipped entirely. The pages behind
these mappings are inherently pinned, so we don't need to mark them as
- mlocked. In any case, most of the pages have no struct page in which to
- so mark the page. Because of this, get_user_pages() will fail for these
- vmas, so there is no sense in attempting to visit them.
-
-2) vmas mapping hugetlbfs page are already effectively pinned into memory.
- We don't need nor want to mlock() these pages. However, to preserve the
- prior behavior of mlock()--before the unevictable/mlock changes--
- mlock_fixup() will call make_pages_present() in the hugetlbfs vma range
- to allocate the huge pages and populate the ptes.
-
-3) vmas with VM_DONTEXPAND|VM_RESERVED are generally user space mappings of
- kernel pages, such as the vdso page, relay channel pages, etc. These pages
+ mlocked. In any case, most of the pages have no struct page in which to so
+ mark the page. Because of this, get_user_pages() will fail for these VMAs,
+ so there is no sense in attempting to visit them.
+
+2) VMAs mapping hugetlbfs page are already effectively pinned into memory. We
+ neither need nor want to mlock() these pages. However, to preserve the
+ prior behavior of mlock() - before the unevictable/mlock changes -
+ mlock_fixup() will call make_pages_present() in the hugetlbfs VMA range to
+ allocate the huge pages and populate the ptes.
+
+3) VMAs with VM_DONTEXPAND or VM_RESERVED are generally userspace mappings of
+ kernel pages, such as the VDSO page, relay channel pages, etc. These pages
are inherently unevictable and are not managed on the LRU lists.
- mlock_fixup() treats these vmas the same as hugetlbfs vmas. It calls
+ mlock_fixup() treats these VMAs the same as hugetlbfs VMAs. It calls
make_pages_present() to populate the ptes.
-Note that for all of these special vmas, mlock_fixup() does not set the
+Note that for all of these special VMAs, mlock_fixup() does not set the
VM_LOCKED flag. Therefore, we won't have to deal with them later during
-munlock() or munmap()--for example, at task exit. Neither does mlock_fixup()
-account these vmas against the task's "locked_vm".
-
-Mlocked Pages: Downgrading the Mmap Semaphore.
-
-mlock_fixup() must be called with the mmap semaphore held for write, because
-it may have to merge or split vmas. However, mlocking a large region of
-memory can take a long time--especially if vmscan must reclaim pages to
-satisfy the regions requirements. Faulting in a large region with the mmap
-semaphore held for write can hold off other faults on the address space, in
-the case of a multi-threaded task. It can also hold off scans of the task's
-address space via /proc. While testing under heavy load, it was observed that
-the ps(1) command could be held off for many minutes while a large segment was
-mlock()ed down.
-
-To address this issue, and to make the system more responsive during mlock()ing
-of large segments, mlock_fixup() downgrades the mmap semaphore to read mode
-during the call to __mlock_vma_pages_range(). This works fine. However, the
-callers of mlock_fixup() expect the semaphore to be returned in write mode.
-So, mlock_fixup() "upgrades" the semphore to write mode. Linux does not
-support an atomic upgrade_sem() call, so mlock_fixup() must drop the semaphore
-and reacquire it in write mode. In a multi-threaded task, it is possible for
-the task memory map to change while the semaphore is dropped. Therefore,
-mlock_fixup() looks up the vma at the range start address after reacquiring
-the semaphore in write mode and verifies that it still covers the original
-range. If not, mlock_fixup() returns an error [-EAGAIN]. All callers of
-mlock_fixup() have been changed to deal with this new error condition.
-
-Note: when munlocking a region, all of the pages should already be resident--
-unless we have racing threads mlocking() and munlocking() regions. So,
-unlocking should not have to wait for page allocations nor faults of any kind.
-Therefore mlock_fixup() does not downgrade the semaphore for munlock().
-
-
-Mlocked Pages: munlock()/munlockall() System Call Handling
-
-The munlock() and munlockall() system calls are handled by the same functions--
-do_mlock[all]()--as the mlock() and mlockall() system calls with the unlock
-vs lock operation indicated by an argument. So, these system calls are also
-handled by mlock_fixup(). Again, if called for an already munlock()ed vma,
-mlock_fixup() simply returns. Because of the vma filtering discussed above,
-VM_LOCKED will not be set in any "special" vmas. So, these vmas will be
+munlock(), munmap() or task exit. Neither does mlock_fixup() account these
+VMAs against the task's "locked_vm".
+
+
+munlock()/munlockall() SYSTEM CALL HANDLING
+-------------------------------------------
+
+The munlock() and munlockall() system calls are handled by the same functions -
+do_mlock[all]() - as the mlock() and mlockall() system calls with the unlock vs
+lock operation indicated by an argument. So, these system calls are also
+handled by mlock_fixup(). Again, if called for an already munlocked VMA,
+mlock_fixup() simply returns. Because of the VMA filtering discussed above,
+VM_LOCKED will not be set in any "special" VMAs. So, these VMAs will be
ignored for munlock.
-If the vma is VM_LOCKED, mlock_fixup() again attempts to merge or split off
-the specified range. The range is then munlocked via the function
-__mlock_vma_pages_range()--the same function used to mlock a vma range--
+If the VMA is VM_LOCKED, mlock_fixup() again attempts to merge or split off the
+specified range. The range is then munlocked via the function
+__mlock_vma_pages_range() - the same function used to mlock a VMA range -
passing a flag to indicate that munlock() is being performed.
-Because the vma access protections could have been changed to PROT_NONE after
+Because the VMA access protections could have been changed to PROT_NONE after
faulting in and mlocking pages, get_user_pages() was unreliable for visiting
-these pages for munlocking. Because we don't want to leave pages mlocked(),
+these pages for munlocking. Because we don't want to leave pages mlocked,
get_user_pages() was enhanced to accept a flag to ignore the permissions when
-fetching the pages--all of which should be resident as a result of previous
-mlock()ing.
+fetching the pages - all of which should be resident as a result of previous
+mlocking.
For munlock(), __mlock_vma_pages_range() unlocks individual pages by calling
munlock_vma_page(). munlock_vma_page() unconditionally clears the PG_mlocked
-flag using TestClearPageMlocked(). As with mlock_vma_page(), munlock_vma_page()
-use the Test*PageMlocked() function to handle the case where the page might
-have already been unlocked by another task. If the page was mlocked,
-munlock_vma_page() updates that zone statistics for the number of mlocked
-pages. Note, however, that at this point we haven't checked whether the page
-is mapped by other VM_LOCKED vmas.
-
-We can't call try_to_munlock(), the function that walks the reverse map to check
-for other VM_LOCKED vmas, without first isolating the page from the LRU.
+flag using TestClearPageMlocked(). As with mlock_vma_page(),
+munlock_vma_page() use the Test*PageMlocked() function to handle the case where
+the page might have already been unlocked by another task. If the page was
+mlocked, munlock_vma_page() updates that zone statistics for the number of
+mlocked pages. Note, however, that at this point we haven't checked whether
+the page is mapped by other VM_LOCKED VMAs.
+
+We can't call try_to_munlock(), the function that walks the reverse map to
+check for other VM_LOCKED VMAs, without first isolating the page from the LRU.
try_to_munlock() is a variant of try_to_unmap() and thus requires that the page
-not be on an lru list. [More on these below.] However, the call to
-isolate_lru_page() could fail, in which case we couldn't try_to_munlock().
-So, we go ahead and clear PG_mlocked up front, as this might be the only chance
-we have. If we can successfully isolate the page, we go ahead and
+not be on an LRU list [more on these below]. However, the call to
+isolate_lru_page() could fail, in which case we couldn't try_to_munlock(). So,
+we go ahead and clear PG_mlocked up front, as this might be the only chance we
+have. If we can successfully isolate the page, we go ahead and
try_to_munlock(), which will restore the PG_mlocked flag and update the zone
-page statistics if it finds another vma holding the page mlocked. If we fail
+page statistics if it finds another VMA holding the page mlocked. If we fail
to isolate the page, we'll have left a potentially mlocked page on the LRU.
-This is fine, because we'll catch it later when/if vmscan tries to reclaim the
-page. This should be relatively rare.
-
-Mlocked Pages: Migrating Them...
-
-A page that is being migrated has been isolated from the lru lists and is
-held locked across unmapping of the page, updating the page's mapping
-[address_space] entry and copying the contents and state, until the
-page table entry has been replaced with an entry that refers to the new
-page. Linux supports migration of mlocked pages and other unevictable
-pages. This involves simply moving the PageMlocked and PageUnevictable states
-from the old page to the new page.
-
-Note that page migration can race with mlocking or munlocking of the same
-page. This has been discussed from the mlock/munlock perspective in the
-respective sections above. Both processes [migration, m[un]locking], hold
-the page locked. This provides the first level of synchronization. Page
-migration zeros out the page_mapping of the old page before unlocking it,
-so m[un]lock can skip these pages by testing the page mapping under page
-lock.
-
-When completing page migration, we place the new and old pages back onto the
-lru after dropping the page lock. The "unneeded" page--old page on success,
-new page on failure--will be freed when the reference count held by the
-migration process is released. To ensure that we don't strand pages on the
-unevictable list because of a race between munlock and migration, page
-migration uses the putback_lru_page() function to add migrated pages back to
-the lru.
-
-
-Mlocked Pages: mmap(MAP_LOCKED) System Call Handling
+This is fine, because we'll catch it later if and if vmscan tries to reclaim
+the page. This should be relatively rare.
+
+
+MIGRATING MLOCKED PAGES
+-----------------------
+
+A page that is being migrated has been isolated from the LRU lists and is held
+locked across unmapping of the page, updating the page's address space entry
+and copying the contents and state, until the page table entry has been
+replaced with an entry that refers to the new page. Linux supports migration
+of mlocked pages and other unevictable pages. This involves simply moving the
+PG_mlocked and PG_unevictable states from the old page to the new page.
+
+Note that page migration can race with mlocking or munlocking of the same page.
+This has been discussed from the mlock/munlock perspective in the respective
+sections above. Both processes (migration and m[un]locking) hold the page
+locked. This provides the first level of synchronization. Page migration
+zeros out the page_mapping of the old page before unlocking it, so m[un]lock
+can skip these pages by testing the page mapping under page lock.
+
+To complete page migration, we place the new and old pages back onto the LRU
+after dropping the page lock. The "unneeded" page - old page on success, new
+page on failure - will be freed when the reference count held by the migration
+process is released. To ensure that we don't strand pages on the unevictable
+list because of a race between munlock and migration, page migration uses the
+putback_lru_page() function to add migrated pages back to the LRU.
+
+
+mmap(MAP_LOCKED) SYSTEM CALL HANDLING
+-------------------------------------
In addition the the mlock()/mlockall() system calls, an application can request
-that a region of memory be mlocked using the MAP_LOCKED flag with the mmap()
+that a region of memory be mlocked supplying the MAP_LOCKED flag to the mmap()
call. Furthermore, any mmap() call or brk() call that expands the heap by a
task that has previously called mlockall() with the MCL_FUTURE flag will result
-in the newly mapped memory being mlocked. Before the unevictable/mlock changes,
-the kernel simply called make_pages_present() to allocate pages and populate
-the page table.
+in the newly mapped memory being mlocked. Before the unevictable/mlock
+changes, the kernel simply called make_pages_present() to allocate pages and
+populate the page table.
To mlock a range of memory under the unevictable/mlock infrastructure, the
mmap() handler and task address space expansion functions call
mlock_vma_pages_range() specifying the vma and the address range to mlock.
-mlock_vma_pages_range() filters vmas like mlock_fixup(), as described above in
-"Mlocked Pages: Filtering Vmas". It will clear the VM_LOCKED flag, which will
-have already been set by the caller, in filtered vmas. Thus these vma's need
-not be visited for munlock when the region is unmapped.
+mlock_vma_pages_range() filters VMAs like mlock_fixup(), as described above in
+"Filtering Special VMAs". It will clear the VM_LOCKED flag, which will have
+already been set by the caller, in filtered VMAs. Thus these VMA's need not be
+visited for munlock when the region is unmapped.
-For "normal" vmas, mlock_vma_pages_range() calls __mlock_vma_pages_range() to
+For "normal" VMAs, mlock_vma_pages_range() calls __mlock_vma_pages_range() to
fault/allocate the pages and mlock them. Again, like mlock_fixup(),
mlock_vma_pages_range() downgrades the mmap semaphore to read mode before
-attempting to fault/allocate and mlock the pages; and "upgrades" the semaphore
+attempting to fault/allocate and mlock the pages and "upgrades" the semaphore
back to write mode before returning.
-The callers of mlock_vma_pages_range() will have already added the memory
-range to be mlocked to the task's "locked_vm". To account for filtered vmas,
+The callers of mlock_vma_pages_range() will have already added the memory range
+to be mlocked to the task's "locked_vm". To account for filtered VMAs,
mlock_vma_pages_range() returns the number of pages NOT mlocked. All of the
-callers then subtract a non-negative return value from the task's locked_vm.
-A negative return value represent an error--for example, from get_user_pages()
-attempting to fault in a vma with PROT_NONE access. In this case, we leave
-the memory range accounted as locked_vm, as the protections could be changed
-later and pages allocated into that region.
+callers then subtract a non-negative return value from the task's locked_vm. A
+negative return value represent an error - for example, from get_user_pages()
+attempting to fault in a VMA with PROT_NONE access. In this case, we leave the
+memory range accounted as locked_vm, as the protections could be changed later
+and pages allocated into that region.
-Mlocked Pages: munmap()/exit()/exec() System Call Handling
+munmap()/exit()/exec() SYSTEM CALL HANDLING
+-------------------------------------------
When unmapping an mlocked region of memory, whether by an explicit call to
munmap() or via an internal unmap from exit() or exec() processing, we must
-munlock the pages if we're removing the last VM_LOCKED vma that maps the pages.
+munlock the pages if we're removing the last VM_LOCKED VMA that maps the pages.
Before the unevictable/mlock changes, mlocking did not mark the pages in any
way, so unmapping them required no processing.
To munlock a range of memory under the unevictable/mlock infrastructure, the
-munmap() hander and task address space tear down function call
+munmap() handler and task address space call tear down function
munlock_vma_pages_all(). The name reflects the observation that one always
-specifies the entire vma range when munlock()ing during unmap of a region.
-Because of the vma filtering when mlocking() regions, only "normal" vmas that
+specifies the entire VMA range when munlock()ing during unmap of a region.
+Because of the VMA filtering when mlocking() regions, only "normal" VMAs that
actually contain mlocked pages will be passed to munlock_vma_pages_all().
-munlock_vma_pages_all() clears the VM_LOCKED vma flag and, like mlock_fixup()
+munlock_vma_pages_all() clears the VM_LOCKED VMA flag and, like mlock_fixup()
for the munlock case, calls __munlock_vma_pages_range() to walk the page table
-for the vma's memory range and munlock_vma_page() each resident page mapped by
-the vma. This effectively munlocks the page, only if this is the last
-VM_LOCKED vma that maps the page.
-
+for the VMA's memory range and munlock_vma_page() each resident page mapped by
+the VMA. This effectively munlocks the page, only if this is the last
+VM_LOCKED VMA that maps the page.
-Mlocked Page: try_to_unmap()
-[Note: the code changes represented by this section are really quite small
-compared to the text to describe what happening and why, and to discuss the
-implications.]
+try_to_unmap()
+--------------
-Pages can, of course, be mapped into multiple vmas. Some of these vmas may
+Pages can, of course, be mapped into multiple VMAs. Some of these VMAs may
have VM_LOCKED flag set. It is possible for a page mapped into one or more
-VM_LOCKED vmas not to have the PG_mlocked flag set and therefore reside on one
-of the active or inactive LRU lists. This could happen if, for example, a
-task in the process of munlock()ing the page could not isolate the page from
-the LRU. As a result, vmscan/shrink_page_list() might encounter such a page
-as described in "Unevictable Pages and Vmscan [shrink_*_list()]". To
-handle this situation, try_to_unmap() has been enhanced to check for VM_LOCKED
-vmas while it is walking a page's reverse map.
+VM_LOCKED VMAs not to have the PG_mlocked flag set and therefore reside on one
+of the active or inactive LRU lists. This could happen if, for example, a task
+in the process of munlocking the page could not isolate the page from the LRU.
+As a result, vmscan/shrink_page_list() might encounter such a page as described
+in section "vmscan's handling of unevictable pages". To handle this situation,
+try_to_unmap() checks for VM_LOCKED VMAs while it is walking a page's reverse
+map.
try_to_unmap() is always called, by either vmscan for reclaim or for page
-migration, with the argument page locked and isolated from the LRU. BUG_ON()
-assertions enforce this requirement. Separate functions handle anonymous and
-mapped file pages, as these types of pages have different reverse map
-mechanisms.
-
- try_to_unmap_anon()
-
-To unmap anonymous pages, each vma in the list anchored in the anon_vma must be
-visited--at least until a VM_LOCKED vma is encountered. If the page is being
-unmapped for migration, VM_LOCKED vmas do not stop the process because mlocked
-pages are migratable. However, for reclaim, if the page is mapped into a
-VM_LOCKED vma, the scan stops. try_to_unmap() attempts to acquire the mmap
-semphore of the mm_struct to which the vma belongs in read mode. If this is
-successful, try_to_unmap() will mlock the page via mlock_vma_page()--we
-wouldn't have gotten to try_to_unmap() if the page were already mlocked--and
-will return SWAP_MLOCK, indicating that the page is unevictable. If the
-mmap semaphore cannot be acquired, we are not sure whether the page is really
-unevictable or not. In this case, try_to_unmap() will return SWAP_AGAIN.
-
- try_to_unmap_file() -- linear mappings
-
-Unmapping of a mapped file page works the same, except that the scan visits
-all vmas that maps the page's index/page offset in the page's mapping's
-reverse map priority search tree. It must also visit each vma in the page's
-mapping's non-linear list, if the list is non-empty. As for anonymous pages,
-on encountering a VM_LOCKED vma for a mapped file page, try_to_unmap() will
-attempt to acquire the associated mm_struct's mmap semaphore to mlock the page,
-returning SWAP_MLOCK if this is successful, and SWAP_AGAIN, if not.
-
- try_to_unmap_file() -- non-linear mappings
-
-If a page's mapping contains a non-empty non-linear mapping vma list, then
-try_to_un{map|lock}() must also visit each vma in that list to determine
-whether the page is mapped in a VM_LOCKED vma. Again, the scan must visit
-all vmas in the non-linear list to ensure that the pages is not/should not be
-mlocked. If a VM_LOCKED vma is found in the list, the scan could terminate.
-However, there is no easy way to determine whether the page is actually mapped
-in a given vma--either for unmapping or testing whether the VM_LOCKED vma
-actually pins the page.
-
-So, try_to_unmap_file() handles non-linear mappings by scanning a certain
-number of pages--a "cluster"--in each non-linear vma associated with the page's
-mapping, for each file mapped page that vmscan tries to unmap. If this happens
-to unmap the page we're trying to unmap, try_to_unmap() will notice this on
-return--(page_mapcount(page) == 0)--and return SWAP_SUCCESS. Otherwise, it
-will return SWAP_AGAIN, causing vmscan to recirculate this page. We take
-advantage of the cluster scan in try_to_unmap_cluster() as follows:
-
-For each non-linear vma, try_to_unmap_cluster() attempts to acquire the mmap
-semaphore of the associated mm_struct for read without blocking. If this
-attempt is successful and the vma is VM_LOCKED, try_to_unmap_cluster() will
-retain the mmap semaphore for the scan; otherwise it drops it here. Then,
-for each page in the cluster, if we're holding the mmap semaphore for a locked
-vma, try_to_unmap_cluster() calls mlock_vma_page() to mlock the page. This
-call is a no-op if the page is already locked, but will mlock any pages in
-the non-linear mapping that happen to be unlocked. If one of the pages so
-mlocked is the page passed in to try_to_unmap(), try_to_unmap_cluster() will
-return SWAP_MLOCK, rather than the default SWAP_AGAIN. This will allow vmscan
-to cull the page, rather than recirculating it on the inactive list. Again,
-if try_to_unmap_cluster() cannot acquire the vma's mmap sem, it returns
-SWAP_AGAIN, indicating that the page is mapped by a VM_LOCKED vma, but
-couldn't be mlocked.
-
-
-Mlocked pages: try_to_munlock() Reverse Map Scan
-
-TODO/FIXME: a better name might be page_mlocked()--analogous to the
-page_referenced() reverse map walker.
-
-When munlock_vma_page()--see "Mlocked Pages: munlock()/munlockall()
-System Call Handling" above--tries to munlock a page, it needs to
-determine whether or not the page is mapped by any VM_LOCKED vma, without
-actually attempting to unmap all ptes from the page. For this purpose, the
-unevictable/mlock infrastructure introduced a variant of try_to_unmap() called
-try_to_munlock().
+migration, with the argument page locked and isolated from the LRU. Separate
+functions handle anonymous and mapped file pages, as these types of pages have
+different reverse map mechanisms.
+
+ (*) try_to_unmap_anon()
+
+ To unmap anonymous pages, each VMA in the list anchored in the anon_vma
+ must be visited - at least until a VM_LOCKED VMA is encountered. If the
+ page is being unmapped for migration, VM_LOCKED VMAs do not stop the
+ process because mlocked pages are migratable. However, for reclaim, if
+ the page is mapped into a VM_LOCKED VMA, the scan stops.
+
+ try_to_unmap_anon() attempts to acquire in read mode the mmap semphore of
+ the mm_struct to which the VMA belongs. If this is successful, it will
+ mlock the page via mlock_vma_page() - we wouldn't have gotten to
+ try_to_unmap_anon() if the page were already mlocked - and will return
+ SWAP_MLOCK, indicating that the page is unevictable.
+
+ If the mmap semaphore cannot be acquired, we are not sure whether the page
+ is really unevictable or not. In this case, try_to_unmap_anon() will
+ return SWAP_AGAIN.
+
+ (*) try_to_unmap_file() - linear mappings
+
+ Unmapping of a mapped file page works the same as for anonymous mappings,
+ except that the scan visits all VMAs that map the page's index/page offset
+ in the page's mapping's reverse map priority search tree. It also visits
+ each VMA in the page's mapping's non-linear list, if the list is
+ non-empty.
+
+ As for anonymous pages, on encountering a VM_LOCKED VMA for a mapped file
+ page, try_to_unmap_file() will attempt to acquire the associated
+ mm_struct's mmap semaphore to mlock the page, returning SWAP_MLOCK if this
+ is successful, and SWAP_AGAIN, if not.
+
+ (*) try_to_unmap_file() - non-linear mappings
+
+ If a page's mapping contains a non-empty non-linear mapping VMA list, then
+ try_to_un{map|lock}() must also visit each VMA in that list to determine
+ whether the page is mapped in a VM_LOCKED VMA. Again, the scan must visit
+ all VMAs in the non-linear list to ensure that the pages is not/should not
+ be mlocked.
+
+ If a VM_LOCKED VMA is found in the list, the scan could terminate.
+ However, there is no easy way to determine whether the page is actually
+ mapped in a given VMA - either for unmapping or testing whether the
+ VM_LOCKED VMA actually pins the page.
+
+ try_to_unmap_file() handles non-linear mappings by scanning a certain
+ number of pages - a "cluster" - in each non-linear VMA associated with the
+ page's mapping, for each file mapped page that vmscan tries to unmap. If
+ this happens to unmap the page we're trying to unmap, try_to_unmap() will
+ notice this on return (page_mapcount(page) will be 0) and return
+ SWAP_SUCCESS. Otherwise, it will return SWAP_AGAIN, causing vmscan to
+ recirculate this page. We take advantage of the cluster scan in
+ try_to_unmap_cluster() as follows:
+
+ For each non-linear VMA, try_to_unmap_cluster() attempts to acquire the
+ mmap semaphore of the associated mm_struct for read without blocking.
+
+ If this attempt is successful and the VMA is VM_LOCKED,
+ try_to_unmap_cluster() will retain the mmap semaphore for the scan;
+ otherwise it drops it here.
+
+ Then, for each page in the cluster, if we're holding the mmap semaphore
+ for a locked VMA, try_to_unmap_cluster() calls mlock_vma_page() to
+ mlock the page. This call is a no-op if the page is already locked,
+ but will mlock any pages in the non-linear mapping that happen to be
+ unlocked.
+
+ If one of the pages so mlocked is the page passed in to try_to_unmap(),
+ try_to_unmap_cluster() will return SWAP_MLOCK, rather than the default
+ SWAP_AGAIN. This will allow vmscan to cull the page, rather than
+ recirculating it on the inactive list.
+
+ Again, if try_to_unmap_cluster() cannot acquire the VMA's mmap sem, it
+ returns SWAP_AGAIN, indicating that the page is mapped by a VM_LOCKED
+ VMA, but couldn't be mlocked.
+
+
+try_to_munlock() REVERSE MAP SCAN
+---------------------------------
+
+ [!] TODO/FIXME: a better name might be page_mlocked() - analogous to the
+ page_referenced() reverse map walker.
+
+When munlock_vma_page() [see section "munlock()/munlockall() System Call
+Handling" above] tries to munlock a page, it needs to determine whether or not
+the page is mapped by any VM_LOCKED VMA without actually attempting to unmap
+all PTEs from the page. For this purpose, the unevictable/mlock infrastructure
+introduced a variant of try_to_unmap() called try_to_munlock().
try_to_munlock() calls the same functions as try_to_unmap() for anonymous and
mapped file pages with an additional argument specifing unlock versus unmap
processing. Again, these functions walk the respective reverse maps looking
-for VM_LOCKED vmas. When such a vma is found for anonymous pages and file
+for VM_LOCKED VMAs. When such a VMA is found for anonymous pages and file
pages mapped in linear VMAs, as in the try_to_unmap() case, the functions
attempt to acquire the associated mmap semphore, mlock the page via
mlock_vma_page() and return SWAP_MLOCK. This effectively undoes the
pre-clearing of the page's PG_mlocked done by munlock_vma_page.
-If try_to_unmap() is unable to acquire a VM_LOCKED vma's associated mmap
-semaphore, it will return SWAP_AGAIN. This will allow shrink_page_list()
-to recycle the page on the inactive list and hope that it has better luck
-with the page next time.
-
-For file pages mapped into non-linear vmas, the try_to_munlock() logic works
-slightly differently. On encountering a VM_LOCKED non-linear vma that might
-map the page, try_to_munlock() returns SWAP_AGAIN without actually mlocking
-the page. munlock_vma_page() will just leave the page unlocked and let
-vmscan deal with it--the usual fallback position.
-
-Note that try_to_munlock()'s reverse map walk must visit every vma in a pages'
-reverse map to determine that a page is NOT mapped into any VM_LOCKED vma.
-However, the scan can terminate when it encounters a VM_LOCKED vma and can
-successfully acquire the vma's mmap semphore for read and mlock the page.
-Although try_to_munlock() can be called many [very many!] times when
-munlock()ing a large region or tearing down a large address space that has been
-mlocked via mlockall(), overall this is a fairly rare event.
-
-Mlocked Page: Page Reclaim in shrink_*_list()
-
-shrink_active_list() culls any obviously unevictable pages--i.e.,
-!page_evictable(page, NULL)--diverting these to the unevictable lru
-list. However, shrink_active_list() only sees unevictable pages that
-made it onto the active/inactive lru lists. Note that these pages do not
-have PageUnevictable set--otherwise, they would be on the unevictable list and
-shrink_active_list would never see them.
+If try_to_unmap() is unable to acquire a VM_LOCKED VMA's associated mmap
+semaphore, it will return SWAP_AGAIN. This will allow shrink_page_list() to
+recycle the page on the inactive list and hope that it has better luck with the
+page next time.
+
+For file pages mapped into non-linear VMAs, the try_to_munlock() logic works
+slightly differently. On encountering a VM_LOCKED non-linear VMA that might
+map the page, try_to_munlock() returns SWAP_AGAIN without actually mlocking the
+page. munlock_vma_page() will just leave the page unlocked and let vmscan deal
+with it - the usual fallback position.
+
+Note that try_to_munlock()'s reverse map walk must visit every VMA in a page's
+reverse map to determine that a page is NOT mapped into any VM_LOCKED VMA.
+However, the scan can terminate when it encounters a VM_LOCKED VMA and can
+successfully acquire the VMA's mmap semphore for read and mlock the page.
+Although try_to_munlock() might be called a great many times when munlocking a
+large region or tearing down a large address space that has been mlocked via
+mlockall(), overall this is a fairly rare event.
+
+
+PAGE RECLAIM IN shrink_*_list()
+-------------------------------
+
+shrink_active_list() culls any obviously unevictable pages - i.e.
+!page_evictable(page, NULL) - diverting these to the unevictable list.
+However, shrink_active_list() only sees unevictable pages that made it onto the
+active/inactive lru lists. Note that these pages do not have PageUnevictable
+set - otherwise they would be on the unevictable list and shrink_active_list
+would never see them.
Some examples of these unevictable pages on the LRU lists are:
-1) ramfs pages that have been placed on the lru lists when first allocated.
+ (1) ramfs pages that have been placed on the LRU lists when first allocated.
+
+ (2) SHM_LOCK'd shared memory pages. shmctl(SHM_LOCK) does not attempt to
+ allocate or fault in the pages in the shared memory region. This happens
+ when an application accesses the page the first time after SHM_LOCK'ing
+ the segment.
-2) SHM_LOCKed shared memory pages. shmctl(SHM_LOCK) does not attempt to
- allocate or fault in the pages in the shared memory region. This happens
- when an application accesses the page the first time after SHM_LOCKing
- the segment.
+ (3) mlocked pages that could not be isolated from the LRU and moved to the
+ unevictable list in mlock_vma_page().
-3) Mlocked pages that could not be isolated from the lru and moved to the
- unevictable list in mlock_vma_page().
+ (4) Pages mapped into multiple VM_LOCKED VMAs, but try_to_munlock() couldn't
+ acquire the VMA's mmap semaphore to test the flags and set PageMlocked.
+ munlock_vma_page() was forced to let the page back on to the normal LRU
+ list for vmscan to handle.
-3) Pages mapped into multiple VM_LOCKED vmas, but try_to_munlock() couldn't
- acquire the vma's mmap semaphore to test the flags and set PageMlocked.
- munlock_vma_page() was forced to let the page back on to the normal
- LRU list for vmscan to handle.
+shrink_inactive_list() also diverts any unevictable pages that it finds on the
+inactive lists to the appropriate zone's unevictable list.
-shrink_inactive_list() also culls any unevictable pages that it finds on
-the inactive lists, again diverting them to the appropriate zone's unevictable
-lru list. shrink_inactive_list() should only see SHM_LOCKed pages that became
-SHM_LOCKed after shrink_active_list() had moved them to the inactive list, or
-pages mapped into VM_LOCKED vmas that munlock_vma_page() couldn't isolate from
-the lru to recheck via try_to_munlock(). shrink_inactive_list() won't notice
-the latter, but will pass on to shrink_page_list().
+shrink_inactive_list() should only see SHM_LOCK'd pages that became SHM_LOCK'd
+after shrink_active_list() had moved them to the inactive list, or pages mapped
+into VM_LOCKED VMAs that munlock_vma_page() couldn't isolate from the LRU to
+recheck via try_to_munlock(). shrink_inactive_list() won't notice the latter,
+but will pass on to shrink_page_list().
shrink_page_list() again culls obviously unevictable pages that it could
encounter for similar reason to shrink_inactive_list(). Pages mapped into
-VM_LOCKED vmas but without PG_mlocked set will make it all the way to
+VM_LOCKED VMAs but without PG_mlocked set will make it all the way to
try_to_unmap(). shrink_page_list() will divert them to the unevictable list
when try_to_unmap() returns SWAP_MLOCK, as discussed above.
diff --git a/MAINTAINERS b/MAINTAINERS
index 782ea4f0ca27..0cb20d821694 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -461,7 +461,7 @@ F: arch/x86/include/asm/amd_iommu*.h
AMD MICROCODE UPDATE SUPPORT
P: Andreas Herrmann
-M: andeas.herrmann3@amd.com
+M: andreas.herrmann3@amd.com
L: amd64-microcode@amd64.org
S: Supported
F: arch/x86/kernel/microcode_amd.c
@@ -1894,7 +1894,7 @@ F: fs/ecryptfs/
EDAC-CORE
P: Doug Thompson
M: dougthompson@xmission.com
-L: bluesmoke-devel@lists.sourceforge.net
+L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Supported
F: Documentation/edac.txt
@@ -1906,7 +1906,7 @@ P: Mark Gross
P: Doug Thompson
M: mark.gross@intel.com
M: dougthompson@xmission.com
-L: bluesmoke-devel@lists.sourceforge.net
+L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/e752x_edac.c
@@ -1914,7 +1914,7 @@ F: drivers/edac/e752x_edac.c
EDAC-E7XXX
P: Doug Thompson
M: dougthompson@xmission.com
-L: bluesmoke-devel@lists.sourceforge.net
+L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/e7xxx_edac.c
@@ -1922,7 +1922,7 @@ F: drivers/edac/e7xxx_edac.c
EDAC-I82443BXGX
P: Tim Small
M: tim@buttersideup.com
-L: bluesmoke-devel@lists.sourceforge.net
+L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/i82443bxgx_edac.c
@@ -1930,7 +1930,7 @@ F: drivers/edac/i82443bxgx_edac.c
EDAC-I3000
P: Jason Uhlenkott
M: juhlenko@akamai.com
-L: bluesmoke-devel@lists.sourceforge.net
+L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/i3000_edac.c
@@ -1938,7 +1938,7 @@ F: drivers/edac/i3000_edac.c
EDAC-I5000
P: Doug Thompson
M: dougthompson@xmission.com
-L: bluesmoke-devel@lists.sourceforge.net
+L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/i5000_edac.c
@@ -1946,7 +1946,7 @@ F: drivers/edac/i5000_edac.c
EDAC-I5400
P: Mauro Carvalho Chehab
M: mchehab@redhat.com
-L: bluesmoke-devel@lists.sourceforge.net
+L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/i5400_edac.c
@@ -1956,7 +1956,7 @@ P: Ranganathan Desikan
P: Arvind R.
M: rdesikan@jetzbroadband.com
M: arvind@acarlab.com
-L: bluesmoke-devel@lists.sourceforge.net
+L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/i82975x_edac.c
@@ -1964,7 +1964,7 @@ F: drivers/edac/i82975x_edac.c
EDAC-PASEMI
P: Egor Martovetsky
M: egor@pasemi.com
-L: bluesmoke-devel@lists.sourceforge.net
+L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/pasemi_edac.c
@@ -1972,7 +1972,7 @@ F: drivers/edac/pasemi_edac.c
EDAC-R82600
P: Tim Small
M: tim@buttersideup.com
-L: bluesmoke-devel@lists.sourceforge.net
+L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/r82600_edac.c
@@ -2592,8 +2592,8 @@ S: Maintained
F: fs/hpfs/
HSO 3G MODEM DRIVER
-P: Denis Joseph Barrow
-M: d.barow@option.com
+P: Jan Dumon
+M: j.dumon@option.com
W: http://www.pharscape.org
S: Maintained
F: drivers/net/usb/hso.c
@@ -4979,8 +4979,8 @@ S: Maintained for 2.6.
F: Documentation/sgi-visws.txt
SGI XP/XPC/XPNET DRIVER
-P: Dean Nelson
-M: dcn@sgi.com
+P: Robin Holt
+M: holt@sgi.com
S: Maintained
F: drivers/misc/sgi-xp/
@@ -5314,7 +5314,9 @@ L: linux-sh@vger.kernel.org
W: http://www.linux-sh.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git
S: Supported
+F: Documentation/sh/
F: arch/sh/
+F: drivers/sh/
SUSPEND TO RAM
P: Len Brown
diff --git a/Makefile b/Makefile
index e5ad5fd96177..4f7e3ccde05b 100644
--- a/Makefile
+++ b/Makefile
@@ -169,7 +169,7 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
-e s/arm.*/arm/ -e s/sa110/arm/ \
-e s/s390x/s390/ -e s/parisc64/parisc/ \
-e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
- -e s/sh.*/sh/ )
+ -e s/sh[234].*/sh/ )
# Cross compiling and selecting different set of gcc/bin-utils
# ---------------------------------------------------------------------------
@@ -210,6 +210,11 @@ ifeq ($(ARCH),sparc64)
SRCARCH := sparc
endif
+# Additional ARCH settings for sh
+ifeq ($(ARCH),sh64)
+ SRCARCH := sh
+endif
+
# Where to locate arch specific headers
hdr-arch := $(SRCARCH)
@@ -567,7 +572,7 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
# disable pointer signed / unsigned warnings in gcc 4.0
KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,)
-# disable invalid "can't wrap" optimzations for signed / pointers
+# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fwrapv)
# revert to pre-gcc-4.4 behaviour of .eh_frame
@@ -597,6 +602,10 @@ LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID)
LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID)
+ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
+LDFLAGS_vmlinux += -X
+endif
+
# Default kernel image to build when no specific target is given.
# KBUILD_IMAGE may be overruled on the command line or
# set in the environment
@@ -1587,5 +1596,5 @@ PHONY += FORCE
FORCE:
# Declare the contents of the .PHONY variable as phony. We keep that
-# information in a variable se we can use it in if_changed and friends.
+# information in a variable so we can use it in if_changed and friends.
.PHONY: $(PHONY)
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index fc74e913c415..34a56a136efd 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -131,14 +131,14 @@ static struct musb_hdrc_platform_data musb_plat = {
.power = 50, /* up to 100 mA */
};
-static u64 musb_dmamask = DMA_32BIT_MASK;
+static u64 musb_dmamask = DMA_BIT_MASK(32);
static struct platform_device musb_device = {
.name = "musb_hdrc",
.id = -1,
.dev = {
.dma_mask = &musb_dmamask,
- .coherent_dma_mask = DMA_32BIT_MASK,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &musb_plat,
},
.num_resources = ARRAY_SIZE(musb_resources),
@@ -146,14 +146,14 @@ static struct platform_device musb_device = {
};
#ifdef CONFIG_NOP_USB_XCEIV
-static u64 nop_xceiv_dmamask = DMA_32BIT_MASK;
+static u64 nop_xceiv_dmamask = DMA_BIT_MASK(32);
static struct platform_device nop_xceiv_device = {
.name = "nop_usb_xceiv",
.id = -1,
.dev = {
.dma_mask = &nop_xceiv_dmamask,
- .coherent_dma_mask = DMA_32BIT_MASK,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = NULL,
},
};
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 573f02c39a00..285aae8431c6 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -16,7 +16,7 @@ EXPORT_SYMBOL(swiotlb);
static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
- if (dev->coherent_dma_mask != DMA_64BIT_MASK)
+ if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
gfp |= GFP_DMA;
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
}
diff --git a/arch/mips/include/asm/mach-rc32434/gpio.h b/arch/mips/include/asm/mach-rc32434/gpio.h
index 3cb50d17b62d..12ee8d510160 100644
--- a/arch/mips/include/asm/mach-rc32434/gpio.h
+++ b/arch/mips/include/asm/mach-rc32434/gpio.h
@@ -80,6 +80,9 @@ struct rb532_gpio_reg {
/* Compact Flash GPIO pin */
#define CF_GPIO_NUM 13
+/* S1 button GPIO (shared with UART0_SIN) */
+#define GPIO_BTN_S1 1
+
extern void rb532_gpio_set_ilevel(int bit, unsigned gpio);
extern void rb532_gpio_set_istat(int bit, unsigned gpio);
extern void rb532_gpio_set_func(unsigned gpio);
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index 4a5f05b662ae..9f40e1ff9b4f 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -200,26 +200,9 @@ static struct platform_device rb532_led = {
.id = -1,
};
-static struct gpio_keys_button rb532_gpio_btn[] = {
- {
- .gpio = 1,
- .code = BTN_0,
- .desc = "S1",
- .active_low = 1,
- }
-};
-
-static struct gpio_keys_platform_data rb532_gpio_btn_data = {
- .buttons = rb532_gpio_btn,
- .nbuttons = ARRAY_SIZE(rb532_gpio_btn),
-};
-
static struct platform_device rb532_button = {
- .name = "gpio-keys",
+ .name = "rb532-button",
.id = -1,
- .dev = {
- .platform_data = &rb532_gpio_btn_data,
- }
};
static struct resource rb532_wdt_res[] = {
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
index 414c50e2e881..94942d60ddfd 100644
--- a/arch/powerpc/include/asm/parport.h
+++ b/arch/powerpc/include/asm/parport.h
@@ -29,7 +29,7 @@ static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
prop = of_get_property(np, "interrupts", NULL);
if (!prop)
continue;
- if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL) != NULL)
+ if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL, 0) != NULL)
count++;
}
return count;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5e4babecf934..e7390dd0283d 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -14,6 +14,7 @@ config SUPERH
select HAVE_GENERIC_DMA_COHERENT
select HAVE_IOREMAP_PROT if MMU
select HAVE_ARCH_TRACEHOOK
+ select HAVE_DMA_API_DEBUG
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
@@ -21,7 +22,7 @@ config SUPERH
<http://www.linux-sh.org/>.
config SUPERH32
- def_bool !SUPERH64
+ def_bool ARCH = "sh"
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_FUNCTION_TRACER
@@ -31,7 +32,7 @@ config SUPERH32
select ARCH_HIBERNATION_POSSIBLE if MMU
config SUPERH64
- def_bool y if CPU_SH5
+ def_bool ARCH = "sh64"
config ARCH_DEFCONFIG
string
@@ -187,6 +188,8 @@ config ARCH_SHMOBILE
bool
select ARCH_SUSPEND_POSSIBLE
+if SUPERH32
+
choice
prompt "Processor sub-type selection"
@@ -408,6 +411,15 @@ config CPU_SUBTYPE_SH7366
select SYS_SUPPORTS_NUMA
select SYS_SUPPORTS_CMT
+endchoice
+
+endif
+
+if SUPERH64
+
+choice
+ prompt "Processor sub-type selection"
+
# SH-5 Processor Support
config CPU_SUBTYPE_SH5_101
@@ -420,6 +432,8 @@ config CPU_SUBTYPE_SH5_103
endchoice
+endif
+
source "arch/sh/mm/Kconfig"
source "arch/sh/Kconfig.cpu"
diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c
index 912458f666eb..39e46919df14 100644
--- a/arch/sh/boards/board-ap325rxa.c
+++ b/arch/sh/boards/board-ap325rxa.c
@@ -349,6 +349,7 @@ static int ov7725_power(struct device *dev, int mode)
static struct ov772x_camera_info ov7725_info = {
.buswidth = SOCAM_DATAWIDTH_8,
.flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP,
+ .edgectrl = OV772X_AUTO_EDGECTRL(0xf, 0),
.link = {
.power = ov7725_power,
},
diff --git a/arch/sh/boards/board-urquell.c b/arch/sh/boards/board-urquell.c
index 8367d1d789c3..beb88c4da2c1 100644
--- a/arch/sh/boards/board-urquell.c
+++ b/arch/sh/boards/board-urquell.c
@@ -2,6 +2,8 @@
* Renesas Technology Corp. SH7786 Urquell Support.
*
* Copyright (C) 2008 Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on board-sh7785lcr.c
* Copyright (C) 2008 Yoshihiro Shimoda
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -21,6 +23,32 @@
#include <asm/heartbeat.h>
#include <asm/sizes.h>
+/*
+ * bit 1234 5678
+ *----------------------------
+ * SW1 0101 0010 -> Pck 33MHz version
+ * (1101 0010) Pck 66MHz version
+ * SW2 0x1x xxxx -> little endian
+ * 29bit mode
+ * SW47 0001 1000 -> CS0 : on-board flash
+ * CS1 : SRAM, registers, LAN, PCMCIA
+ * 38400 bps for SCIF1
+ *
+ * Address
+ * 0x00000000 - 0x04000000 (CS0) Nor Flash
+ * 0x04000000 - 0x04200000 (CS1) SRAM
+ * 0x05000000 - 0x05800000 (CS1) on board register
+ * 0x05800000 - 0x06000000 (CS1) LAN91C111
+ * 0x06000000 - 0x06400000 (CS1) PCMCIA
+ * 0x08000000 - 0x10000000 (CS2-CS3) DDR3
+ * 0x10000000 - 0x14000000 (CS4) PCIe
+ * 0x14000000 - 0x14800000 (CS5) Core0 LRAM/URAM
+ * 0x14800000 - 0x15000000 (CS5) Core1 LRAM/URAM
+ * 0x18000000 - 0x1C000000 (CS6) ATA/NAND-Flash
+ * 0x1C000000 - (CS7) SH7786 Control register
+ */
+
+/* HeartBeat */
static struct resource heartbeat_resources[] = {
[0] = {
.start = BOARDREG(SLEDR),
@@ -43,6 +71,7 @@ static struct platform_device heartbeat_device = {
.resource = heartbeat_resources,
};
+/* LAN91C111 */
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
};
@@ -69,6 +98,7 @@ static struct platform_device smc91x_eth_device = {
},
};
+/* Nor Flash */
static struct mtd_partition nor_flash_partitions[] = {
{
.name = "loader",
diff --git a/arch/sh/drivers/pci/ops-sh7785lcr.c b/arch/sh/drivers/pci/ops-sh7785lcr.c
index e8b7446a7c2b..fb0869f0bef8 100644
--- a/arch/sh/drivers/pci/ops-sh7785lcr.c
+++ b/arch/sh/drivers/pci/ops-sh7785lcr.c
@@ -48,8 +48,13 @@ EXPORT_SYMBOL(board_pci_channels);
static struct sh4_pci_address_map sh7785_pci_map = {
.window0 = {
+#if defined(CONFIG_32BIT)
+ .base = SH7780_32BIT_DDR_BASE_ADDR,
+ .size = 0x40000000,
+#else
.base = SH7780_CS0_BASE_ADDR,
.size = 0x20000000,
+#endif
},
.flags = SH4_PCIC_NO_RESET,
diff --git a/arch/sh/drivers/pci/pci-sh7780.h b/arch/sh/drivers/pci/pci-sh7780.h
index 97b2c98f05c4..93adc7119b79 100644
--- a/arch/sh/drivers/pci/pci-sh7780.h
+++ b/arch/sh/drivers/pci/pci-sh7780.h
@@ -104,6 +104,8 @@
#define SH7780_CS5_BASE_ADDR (SH7780_CS4_BASE_ADDR + SH7780_MEM_REGION_SIZE)
#define SH7780_CS6_BASE_ADDR (SH7780_CS5_BASE_ADDR + SH7780_MEM_REGION_SIZE)
+#define SH7780_32BIT_DDR_BASE_ADDR 0x40000000
+
struct sh4_pci_address_map;
/* arch/sh/drivers/pci/pci-sh7780.c */
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index e36c7b870861..0d6ac7a1db49 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/dma-debug.h>
#include <asm/io.h>
static int __init pcibios_init(void)
@@ -43,6 +44,8 @@ static int __init pcibios_init(void)
pci_fixup_irqs(pci_common_swizzle, pcibios_map_platform_irq);
+ dma_debug_add_bus(&pci_bus_type);
+
return 0;
}
subsys_initcall(pcibios_init);
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index 627315ecdb52..ea9d4f41c9d2 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -3,6 +3,7 @@
#include <linux/mm.h>
#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm-generic/dma-coherent.h>
@@ -38,16 +39,26 @@ static inline dma_addr_t dma_map_single(struct device *dev,
void *ptr, size_t size,
enum dma_data_direction dir)
{
+ dma_addr_t addr = virt_to_phys(ptr);
+
#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
if (dev->bus == &pci_bus_type)
- return virt_to_phys(ptr);
+ return addr;
#endif
dma_cache_sync(dev, ptr, size, dir);
- return virt_to_phys(ptr);
+ debug_dma_map_page(dev, virt_to_page(ptr),
+ (unsigned long)ptr & ~PAGE_MASK, size,
+ dir, addr, true);
+
+ return addr;
}
-#define dma_unmap_single(dev, addr, size, dir) do { } while (0)
+static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ debug_dma_unmap_page(dev, addr, size, dir, true);
+}
static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
@@ -59,12 +70,19 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
#endif
sg[i].dma_address = sg_phys(&sg[i]);
+ sg[i].dma_length = sg[i].length;
}
+ debug_dma_map_sg(dev, sg, nents, i, dir);
+
return nents;
}
-#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ debug_dma_unmap_sg(dev, sg, nents, dir);
+}
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
@@ -111,6 +129,7 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
#endif
sg[i].dma_address = sg_phys(&sg[i]);
+ sg[i].dma_length = sg[i].length;
}
}
@@ -119,6 +138,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev,
enum dma_data_direction dir)
{
dma_sync_single(dev, dma_handle, size, dir);
+ debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
}
static inline void dma_sync_single_for_device(struct device *dev,
@@ -127,6 +147,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
enum dma_data_direction dir)
{
dma_sync_single(dev, dma_handle, size, dir);
+ debug_dma_sync_single_for_device(dev, dma_handle, size, dir);
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
@@ -136,6 +157,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
enum dma_data_direction direction)
{
dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
+ debug_dma_sync_single_range_for_cpu(dev, dma_handle,
+ offset, size, direction);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -145,6 +168,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
enum dma_data_direction direction)
{
dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
+ debug_dma_sync_single_range_for_device(dev, dma_handle,
+ offset, size, direction);
}
@@ -153,6 +178,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
enum dma_data_direction dir)
{
dma_sync_sg(dev, sg, nelems, dir);
+ debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}
static inline void dma_sync_sg_for_device(struct device *dev,
@@ -160,9 +186,9 @@ static inline void dma_sync_sg_for_device(struct device *dev,
enum dma_data_direction dir)
{
dma_sync_sg(dev, sg, nelems, dir);
+ debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
}
-
static inline int dma_get_cache_alignment(void)
{
/*
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h
index 2084d0373693..c693d268a413 100644
--- a/arch/sh/include/asm/scatterlist.h
+++ b/arch/sh/include/asm/scatterlist.h
@@ -5,12 +5,13 @@
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
+ unsigned long sg_magic;
#endif
- unsigned long page_link;
- unsigned int offset;/* for highmem, page offset */
- dma_addr_t dma_address;
- unsigned int length;
+ unsigned long page_link;
+ unsigned int offset; /* for highmem, page offset */
+ unsigned int length;
+ dma_addr_t dma_address;
+ unsigned int dma_length;
};
#define ISA_DMA_THRESHOLD PHYS_ADDR_MASK
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index a3f239545897..8489a0905a87 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -37,8 +37,11 @@
#define pcibus_to_node(bus) ((void)(bus), -1)
#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
CPU_MASK_ALL : \
- node_to_cpumask(pcibus_to_node(bus)) \
- )
+ node_to_cpumask(pcibus_to_node(bus)))
+#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
+ CPU_MASK_ALL_PTR : \
+ cpumask_of_node(pcibus_to_node(bus)))
+
#endif
#include <asm-generic/topology.h>
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index d52c000cf924..2efb819e2db3 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -341,8 +341,10 @@
#define __NR_dup3 330
#define __NR_pipe2 331
#define __NR_inotify_init1 332
+#define __NR_preadv 333
+#define __NR_pwritev 334
-#define NR_syscalls 333
+#define NR_syscalls 335
#ifdef __KERNEL__
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index 7c54e91753c1..6eb9d2934c0f 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -381,10 +381,12 @@
#define __NR_dup3 358
#define __NR_pipe2 359
#define __NR_inotify_init1 360
+#define __NR_preadv 361
+#define __NR_pwritev 362
#ifdef __KERNEL__
-#define NR_syscalls 361
+#define NR_syscalls 363
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 5a47e1cf442e..90e8cfff55fd 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -143,14 +143,14 @@ static void __init sh7786_usb_setup(void)
* Set the PHY and PLL enable bit
*/
__raw_writel(PHY_ENB | PLL_ENB, USBPCTL1);
- while (i-- &&
- ((__raw_readl(USBST) & ACT_PLL_STATUS) != ACT_PLL_STATUS))
+ while (i--) {
+ if (ACT_PLL_STATUS == (__raw_readl(USBST) & ACT_PLL_STATUS)) {
+ /* Set the PHY RST bit */
+ __raw_writel(PHY_ENB | PLL_ENB | PHY_RST, USBPCTL1);
+ printk(KERN_INFO "sh7786 usb setup done\n");
+ break;
+ }
cpu_relax();
-
- if (i) {
- /* Set the PHY RST bit */
- __raw_writel(PHY_ENB | PLL_ENB | PHY_RST, USBPCTL1);
- printk(KERN_INFO "sh7786 usb setup done\n");
}
}
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index e67c1733e1b9..05202edd8e21 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -349,3 +349,5 @@ ENTRY(sys_call_table)
.long sys_dup3 /* 330 */
.long sys_pipe2
.long sys_inotify_init1
+ .long sys_preadv
+ .long sys_writev
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 557cb91f5caf..a083609f9284 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -387,3 +387,5 @@ sys_call_table:
.long sys_dup3
.long sys_pipe2
.long sys_inotify_init1 /* 360 */
+ .long sys_preadv
+ .long sys_pwritev
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index edcd5fbf9651..e098ec158ddb 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -10,11 +10,22 @@
* for more details.
*/
#include <linux/mm.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-debug.h>
+#include <linux/io.h>
#include <asm/cacheflush.h>
#include <asm/addrspace.h>
-#include <asm/io.h>
+
+#define PREALLOC_DMA_DEBUG_ENTRIES 4096
+
+static int __init dma_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+ return 0;
+}
+fs_initcall(dma_init);
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
@@ -45,6 +56,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
*dma_handle = virt_to_phys(ret);
+
+ debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache);
+
return ret_nocache;
}
EXPORT_SYMBOL(dma_alloc_coherent);
@@ -56,12 +70,15 @@ void dma_free_coherent(struct device *dev, size_t size,
unsigned long pfn = dma_handle >> PAGE_SHIFT;
int k;
- if (!dma_release_from_coherent(dev, order, vaddr)) {
- WARN_ON(irqs_disabled()); /* for portability */
- for (k = 0; k < (1 << order); k++)
- __free_pages(pfn_to_page(pfn + k), 0);
- iounmap(vaddr);
- }
+ WARN_ON(irqs_disabled()); /* for portability */
+
+ if (dma_release_from_coherent(dev, order, vaddr))
+ return;
+
+ debug_dma_free_coherent(dev, size, vaddr, dma_handle);
+ for (k = 0; k < (1 << order); k++)
+ __free_pages(pfn_to_page(pfn + k), 0);
+ iounmap(vaddr);
}
EXPORT_SYMBOL(dma_free_coherent);
diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
index dff3f0253aa8..ff9ead640c4a 100644
--- a/arch/sparc/include/asm/parport.h
+++ b/arch/sparc/include/asm/parport.h
@@ -117,7 +117,7 @@ static int __devinit ecpp_probe(struct of_device *op, const struct of_device_id
if (!strcmp(parent->name, "dma")) {
p = parport_pc_probe_port(base, base + 0x400,
op->irqs[0], PARPORT_DMA_NOFIFO,
- op->dev.parent->parent);
+ op->dev.parent->parent, 0);
if (!p)
return -ENOMEM;
dev_set_drvdata(&op->dev, p);
@@ -168,7 +168,8 @@ static int __devinit ecpp_probe(struct of_device *op, const struct of_device_id
p = parport_pc_probe_port(base, base + 0x400,
op->irqs[0],
slot,
- op->dev.parent);
+ op->dev.parent,
+ 0);
err = -ENOMEM;
if (!p)
goto out_disable_irq;
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index e5383e3d2f8c..73739322b6d0 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -193,8 +193,10 @@ extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
*/
extern void early_ioremap_init(void);
extern void early_ioremap_reset(void);
-extern void __iomem *early_ioremap(unsigned long offset, unsigned long size);
-extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
+extern void __iomem *early_ioremap(resource_size_t phys_addr,
+ unsigned long size);
+extern void __iomem *early_memremap(resource_size_t phys_addr,
+ unsigned long size);
extern void early_iounmap(void __iomem *addr, unsigned long size);
#define IO_SPACE_LIMIT 0xffff
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 34c52370f2fe..fcf4d92e7e04 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -352,6 +352,11 @@ struct i387_soft_struct {
u32 entry_eip;
};
+struct ymmh_struct {
+ /* 16 * 16 bytes for each YMMH-reg = 256 bytes */
+ u32 ymmh_space[64];
+};
+
struct xsave_hdr_struct {
u64 xstate_bv;
u64 reserved1[2];
@@ -361,6 +366,7 @@ struct xsave_hdr_struct {
struct xsave_struct {
struct i387_fxsave_struct i387;
struct xsave_hdr_struct xsave_hdr;
+ struct ymmh_struct ymmh;
/* new processor state extensions will go here */
} __attribute__ ((packed, aligned (64)));
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index d5cd6c586881..a4737dddfd58 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -50,7 +50,7 @@
#ifdef CONFIG_X86_64
#define NEED_PSE 0
#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
-#define NEED_PGE (1<<(X86_FEATURE_PGE & 31))
+#define NEED_PGE 0
#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index ec666491aaa4..72e5a4491661 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -269,6 +269,11 @@ struct _xsave_hdr {
__u64 reserved2[5];
};
+struct _ymmh_state {
+ /* 16 * 16 bytes for each YMMH-reg */
+ __u32 ymmh_space[64];
+};
+
/*
* Extended state pointed by the fpstate pointer in the sigcontext.
* In addition to the fpstate, information encoded in the xstate_hdr
@@ -278,6 +283,7 @@ struct _xsave_hdr {
struct _xstate {
struct _fpstate fpstate;
struct _xsave_hdr xstate_hdr;
+ struct _ymmh_state ymmh;
/* new processor state extensions go here */
};
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 1a918dde46b5..018a0a400799 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -124,7 +124,8 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
/* VIRT <-> MACHINE conversion */
#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
-#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v))))
+#define virt_to_pfn(v) (PFN_DOWN(__pa(v)))
+#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
static inline unsigned long pte_mfn(pte_t pte)
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 08e9a1ac07a9..727acc152344 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -7,6 +7,7 @@
#define XSTATE_FP 0x1
#define XSTATE_SSE 0x2
+#define XSTATE_YMM 0x4
#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
@@ -15,7 +16,7 @@
/*
* These are the features that the OS can handle currently.
*/
-#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE)
+#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
#ifdef CONFIG_X86_64
#define REX_PREFIX "0x48, "
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 767fe7e46d68..a2789e42e162 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2524,7 +2524,6 @@ static void irq_complete_move(struct irq_desc **descp)
static inline void irq_complete_move(struct irq_desc **descp) {}
#endif
-#ifdef CONFIG_X86_X2APIC
static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
{
int apic, pin;
@@ -2558,6 +2557,7 @@ eoi_ioapic_irq(struct irq_desc *desc)
spin_unlock_irqrestore(&ioapic_lock, flags);
}
+#ifdef CONFIG_X86_X2APIC
static void ack_x2apic_level(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
@@ -2634,6 +2634,9 @@ static void ack_apic_level(unsigned int irq)
*/
ack_APIC_irq();
+ if (irq_remapped(irq))
+ eoi_ioapic_irq(desc);
+
/* Now we can move and renable the irq */
if (unlikely(do_unmask_irq)) {
/* Only migrate the irq if the ack has been received.
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 9d3af380c6bd..837c2c4cc203 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -153,7 +153,8 @@ struct drv_cmd {
u32 val;
};
-static long do_drv_read(void *_cmd)
+/* Called via smp_call_function_single(), on the target CPU */
+static void do_drv_read(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
u32 h;
@@ -170,10 +171,10 @@ static long do_drv_read(void *_cmd)
default:
break;
}
- return 0;
}
-static long do_drv_write(void *_cmd)
+/* Called via smp_call_function_many(), on the target CPUs */
+static void do_drv_write(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
u32 lo, hi;
@@ -192,23 +193,18 @@ static long do_drv_write(void *_cmd)
default:
break;
}
- return 0;
}
static void drv_read(struct drv_cmd *cmd)
{
cmd->val = 0;
- work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd);
+ smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1);
}
static void drv_write(struct drv_cmd *cmd)
{
- unsigned int i;
-
- for_each_cpu(i, cmd->mask) {
- work_on_cpu(i, do_drv_write, cmd);
- }
+ smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
}
static u32 get_cur_val(const struct cpumask *mask)
@@ -252,15 +248,13 @@ struct perf_pair {
} aperf, mperf;
};
-
-static long read_measured_perf_ctrs(void *_cur)
+/* Called via smp_call_function_single(), on the target CPU */
+static void read_measured_perf_ctrs(void *_cur)
{
struct perf_pair *cur = _cur;
rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi);
rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi);
-
- return 0;
}
/*
@@ -283,7 +277,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
unsigned int perf_percent;
unsigned int retval;
- if (!work_on_cpu(cpu, read_measured_perf_ctrs, &readin))
+ if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1))
return 0;
cur.aperf.whole = readin.aperf.whole -
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index dce99dca6cf8..70fd7e414c15 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -679,7 +679,7 @@ void __init get_smp_config(void)
__get_smp_config(0);
}
-static void smp_reserve_bootmem(struct mpf_intel *mpf)
+static void __init smp_reserve_bootmem(struct mpf_intel *mpf)
{
unsigned long size = get_mpc_size(mpf->physptr);
#ifdef CONFIG_X86_32
@@ -838,7 +838,7 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
-static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
+static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
{
int i;
@@ -866,7 +866,8 @@ static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
}
}
#else /* CONFIG_X86_IO_APIC */
-static inline void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
+static
+inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
#endif /* CONFIG_X86_IO_APIC */
static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length,
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 2b54fe002e94..0a5b04aa98f1 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -324,7 +324,7 @@ void __ref xsave_cntxt_init(void)
}
/*
- * for now OS knows only about FP/SSE
+ * Support only the state known to OS.
*/
pcntxt_mask = pcntxt_mask & XCNTXT_MASK;
xsave_init();
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index be54176e9eb2..6340cef6798a 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -219,6 +219,22 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
return 1;
}
+/**
+ * get_user_pages_fast() - pin user pages in memory
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @write: whether pages will be written to
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long.
+ *
+ * Attempt to pin user pages in memory without taking mm->mmap_sem.
+ * If not successful, it will fall back to taking the lock and
+ * calling get_user_pages().
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno.
+ */
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 0dfa09d69e80..09daebfdb11c 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -547,7 +547,7 @@ void __init early_ioremap_reset(void)
}
static void __init __early_set_fixmap(enum fixed_addresses idx,
- unsigned long phys, pgprot_t flags)
+ phys_addr_t phys, pgprot_t flags)
{
unsigned long addr = __fix_to_virt(idx);
pte_t *pte;
@@ -566,7 +566,7 @@ static void __init __early_set_fixmap(enum fixed_addresses idx,
}
static inline void __init early_set_fixmap(enum fixed_addresses idx,
- unsigned long phys, pgprot_t prot)
+ phys_addr_t phys, pgprot_t prot)
{
if (after_paging_init)
__set_fixmap(idx, phys, prot);
@@ -607,9 +607,10 @@ static int __init check_early_ioremap_leak(void)
late_initcall(check_early_ioremap_leak);
static void __init __iomem *
-__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
+__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
{
- unsigned long offset, last_addr;
+ unsigned long offset;
+ resource_size_t last_addr;
unsigned int nrpages;
enum fixed_addresses idx0, idx;
int i, slot;
@@ -625,15 +626,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
}
if (slot < 0) {
- printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
- phys_addr, size);
+ printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
+ (u64)phys_addr, size);
WARN_ON(1);
return NULL;
}
if (early_ioremap_debug) {
- printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
- phys_addr, size, slot);
+ printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
+ (u64)phys_addr, size, slot);
dump_stack();
}
@@ -680,13 +681,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
}
/* Remap an IO device */
-void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
+void __init __iomem *
+early_ioremap(resource_size_t phys_addr, unsigned long size)
{
return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
}
/* Remap memory */
-void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
+void __init __iomem *
+early_memremap(resource_size_t phys_addr, unsigned long size)
{
return __early_ioremap(phys_addr, size, PAGE_KERNEL);
}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 640339ee4fb2..c009a241d562 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -31,7 +31,7 @@
#ifdef CONFIG_X86_PAT
int __read_mostly pat_enabled = 1;
-void __cpuinit pat_disable(const char *reason)
+static inline void pat_disable(const char *reason)
{
pat_enabled = 0;
printk(KERN_INFO "%s\n", reason);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 82cd39a6cbd3..f09e8c36ee80 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -42,6 +42,7 @@
#include <asm/xen/hypervisor.h>
#include <asm/fixmap.h>
#include <asm/processor.h>
+#include <asm/proto.h>
#include <asm/msr-index.h>
#include <asm/setup.h>
#include <asm/desc.h>
@@ -168,21 +169,23 @@ static void __init xen_banner(void)
xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
}
+static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
+static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
+
static void xen_cpuid(unsigned int *ax, unsigned int *bx,
unsigned int *cx, unsigned int *dx)
{
+ unsigned maskecx = ~0;
unsigned maskedx = ~0;
/*
* Mask out inconvenient features, to try and disable as many
* unsupported kernel subsystems as possible.
*/
- if (*ax == 1)
- maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */
- (1 << X86_FEATURE_ACPI) | /* disable ACPI */
- (1 << X86_FEATURE_MCE) | /* disable MCE */
- (1 << X86_FEATURE_MCA) | /* disable MCA */
- (1 << X86_FEATURE_ACC)); /* thermal monitoring */
+ if (*ax == 1) {
+ maskecx = cpuid_leaf1_ecx_mask;
+ maskedx = cpuid_leaf1_edx_mask;
+ }
asm(XEN_EMULATE_PREFIX "cpuid"
: "=a" (*ax),
@@ -190,9 +193,43 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
"=c" (*cx),
"=d" (*dx)
: "0" (*ax), "2" (*cx));
+
+ *cx &= maskecx;
*dx &= maskedx;
}
+static __init void xen_init_cpuid_mask(void)
+{
+ unsigned int ax, bx, cx, dx;
+
+ cpuid_leaf1_edx_mask =
+ ~((1 << X86_FEATURE_MCE) | /* disable MCE */
+ (1 << X86_FEATURE_MCA) | /* disable MCA */
+ (1 << X86_FEATURE_ACC)); /* thermal monitoring */
+
+ if (!xen_initial_domain())
+ cpuid_leaf1_edx_mask &=
+ ~((1 << X86_FEATURE_APIC) | /* disable local APIC */
+ (1 << X86_FEATURE_ACPI)); /* disable ACPI */
+
+ ax = 1;
+ xen_cpuid(&ax, &bx, &cx, &dx);
+
+ /* cpuid claims we support xsave; try enabling it to see what happens */
+ if (cx & (1 << (X86_FEATURE_XSAVE % 32))) {
+ unsigned long cr4;
+
+ set_in_cr4(X86_CR4_OSXSAVE);
+
+ cr4 = read_cr4();
+
+ if ((cr4 & X86_CR4_OSXSAVE) == 0)
+ cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32));
+
+ clear_in_cr4(X86_CR4_OSXSAVE);
+ }
+}
+
static void xen_set_debugreg(int reg, unsigned long val)
{
HYPERVISOR_set_debugreg(reg, val);
@@ -284,12 +321,11 @@ static void xen_set_ldt(const void *addr, unsigned entries)
static void xen_load_gdt(const struct desc_ptr *dtr)
{
- unsigned long *frames;
unsigned long va = dtr->address;
unsigned int size = dtr->size + 1;
unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ unsigned long frames[pages];
int f;
- struct multicall_space mcs;
/* A GDT can be up to 64k in size, which corresponds to 8192
8-byte entries, or 16 4k pages.. */
@@ -297,19 +333,26 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
BUG_ON(size > 65536);
BUG_ON(va & ~PAGE_MASK);
- mcs = xen_mc_entry(sizeof(*frames) * pages);
- frames = mcs.args;
-
for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
- frames[f] = arbitrary_virt_to_mfn((void *)va);
+ int level;
+ pte_t *ptep = lookup_address(va, &level);
+ unsigned long pfn, mfn;
+ void *virt;
+
+ BUG_ON(ptep == NULL);
+
+ pfn = pte_pfn(*ptep);
+ mfn = pfn_to_mfn(pfn);
+ virt = __va(PFN_PHYS(pfn));
+
+ frames[f] = mfn;
make_lowmem_page_readonly((void *)va);
- make_lowmem_page_readonly(mfn_to_virt(frames[f]));
+ make_lowmem_page_readonly(virt);
}
- MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct));
-
- xen_mc_issue(PARAVIRT_LAZY_CPU);
+ if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
+ BUG();
}
static void load_TLS_descriptor(struct thread_struct *t,
@@ -385,7 +428,7 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
static int cvt_gate_to_trap(int vector, const gate_desc *val,
struct trap_info *info)
{
- if (val->type != 0xf && val->type != 0xe)
+ if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
return 0;
info->vector = vector;
@@ -393,8 +436,8 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val,
info->cs = gate_segment(*val);
info->flags = val->dpl;
/* interrupt gates clear IF */
- if (val->type == 0xe)
- info->flags |= 4;
+ if (val->type == GATE_INTERRUPT)
+ info->flags |= 1 << 2;
return 1;
}
@@ -872,7 +915,6 @@ static const struct machine_ops __initdata xen_machine_ops = {
.emergency_restart = xen_emergency_restart,
};
-
/* First C function to be called on Xen boot */
asmlinkage void __init xen_start_kernel(void)
{
@@ -897,6 +939,8 @@ asmlinkage void __init xen_start_kernel(void)
xen_init_irq_ops();
+ xen_init_cpuid_mask();
+
#ifdef CONFIG_X86_LOCAL_APIC
/*
* set up the basic apic ops.
@@ -938,6 +982,11 @@ asmlinkage void __init xen_start_kernel(void)
if (!xen_initial_domain())
__supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
+#ifdef CONFIG_X86_64
+ /* Work out if we support NX */
+ check_efer();
+#endif
+
/* Don't do the full vcpu_info placement stuff until we have a
possible map and a non-dummy shared_info. */
per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 2a81838a9ab7..9842b1212407 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -184,7 +184,7 @@ static inline unsigned p2m_index(unsigned long pfn)
}
/* Build the parallel p2m_top_mfn structures */
-void xen_setup_mfn_list_list(void)
+static void __init xen_build_mfn_list_list(void)
{
unsigned pfn, idx;
@@ -198,7 +198,10 @@ void xen_setup_mfn_list_list(void)
unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
}
+}
+void xen_setup_mfn_list_list(void)
+{
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
@@ -218,6 +221,8 @@ void __init xen_build_dynamic_phys_to_machine(void)
p2m_top[topidx] = &mfn_list[pfn];
}
+
+ xen_build_mfn_list_list();
}
unsigned long get_phys_to_machine(unsigned long pfn)
@@ -233,47 +238,74 @@ unsigned long get_phys_to_machine(unsigned long pfn)
}
EXPORT_SYMBOL_GPL(get_phys_to_machine);
-static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
+/* install a new p2m_top page */
+bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
{
- unsigned long *p;
+ unsigned topidx = p2m_top_index(pfn);
+ unsigned long **pfnp, *mfnp;
unsigned i;
- p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
- BUG_ON(p == NULL);
+ pfnp = &p2m_top[topidx];
+ mfnp = &p2m_top_mfn[topidx];
for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
p[i] = INVALID_P2M_ENTRY;
- if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
- free_page((unsigned long)p);
- else
+ if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
*mfnp = virt_to_mfn(p);
+ return true;
+ }
+
+ return false;
}
-void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+static void alloc_p2m(unsigned long pfn)
{
- unsigned topidx, idx;
+ unsigned long *p;
- if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
- BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
- return;
- }
+ p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
+ BUG_ON(p == NULL);
+
+ if (!install_p2mtop_page(pfn, p))
+ free_page((unsigned long)p);
+}
+
+/* Try to install p2m mapping; fail if intermediate bits missing */
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+ unsigned topidx, idx;
if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
BUG_ON(mfn != INVALID_P2M_ENTRY);
- return;
+ return true;
}
topidx = p2m_top_index(pfn);
if (p2m_top[topidx] == p2m_missing) {
- /* no need to allocate a page to store an invalid entry */
if (mfn == INVALID_P2M_ENTRY)
- return;
- alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
+ return true;
+ return false;
}
idx = p2m_index(pfn);
p2m_top[topidx][idx] = mfn;
+
+ return true;
+}
+
+void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
+ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
+ return;
+ }
+
+ if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
+ alloc_p2m(pfn);
+
+ if (!__set_phys_to_machine(pfn, mfn))
+ BUG();
+ }
}
unsigned long arbitrary_virt_to_mfn(void *vaddr)
@@ -987,7 +1019,7 @@ static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
return 0;
}
-void __init xen_mark_init_mm_pinned(void)
+static void __init xen_mark_init_mm_pinned(void)
{
xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
}
@@ -1270,8 +1302,8 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
} *args;
struct multicall_space mcs;
- BUG_ON(cpumask_empty(cpus));
- BUG_ON(!mm);
+ if (cpumask_empty(cpus))
+ return; /* nothing to do */
mcs = xen_mc_entry(sizeof(*args));
args = mcs.args;
@@ -1438,6 +1470,15 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
}
#endif
+static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
+{
+ struct mmuext_op op;
+ op.cmd = cmd;
+ op.arg1.mfn = pfn_to_mfn(pfn);
+ if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
+ BUG();
+}
+
/* Early in boot, while setting up the initial pagetable, assume
everything is pinned. */
static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
@@ -1446,22 +1487,29 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
BUG_ON(mem_map); /* should only be used early */
#endif
make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
+ pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+}
+
+/* Used for pmd and pud */
+static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
+{
+#ifdef CONFIG_FLATMEM
+ BUG_ON(mem_map); /* should only be used early */
+#endif
+ make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
}
/* Early release_pte assumes that all pts are pinned, since there's
only init_mm and anything attached to that is pinned. */
-static void xen_release_pte_init(unsigned long pfn)
+static __init void xen_release_pte_init(unsigned long pfn)
{
+ pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
}
-static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
+static __init void xen_release_pmd_init(unsigned long pfn)
{
- struct mmuext_op op;
- op.cmd = cmd;
- op.arg1.mfn = pfn_to_mfn(pfn);
- if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
- BUG();
+ make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
}
/* This needs to make sure the new pte page is pinned iff its being
@@ -1773,6 +1821,9 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
#ifdef CONFIG_X86_LOCAL_APIC
case FIX_APIC_BASE: /* maps dummy local APIC */
#endif
+ case FIX_TEXT_POKE0:
+ case FIX_TEXT_POKE1:
+ /* All local page mappings */
pte = pfn_pte(phys, prot);
break;
@@ -1819,7 +1870,6 @@ __init void xen_post_allocator_init(void)
xen_mark_init_mm_pinned();
}
-
const struct pv_mmu_ops xen_mmu_ops __initdata = {
.pagetable_setup_start = xen_pagetable_setup_start,
.pagetable_setup_done = xen_pagetable_setup_done,
@@ -1843,9 +1893,9 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = {
.alloc_pte = xen_alloc_pte_init,
.release_pte = xen_release_pte_init,
- .alloc_pmd = xen_alloc_pte_init,
+ .alloc_pmd = xen_alloc_pmd_init,
.alloc_pmd_clone = paravirt_nop,
- .release_pmd = xen_release_pte_init,
+ .release_pmd = xen_release_pmd_init,
#ifdef CONFIG_HIGHPTE
.kmap_atomic_pte = xen_kmap_atomic_pte,
@@ -1883,8 +1933,8 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = {
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_pgd = xen_set_pgd_hyper,
- .alloc_pud = xen_alloc_pte_init,
- .release_pud = xen_release_pte_init,
+ .alloc_pud = xen_alloc_pmd_init,
+ .release_pud = xen_release_pmd_init,
#endif /* PAGETABLE_LEVELS == 4 */
.activate_mm = xen_activate_mm,
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index 24d1b44a337d..da7302624897 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -11,6 +11,9 @@ enum pt_level {
};
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+bool install_p2mtop_page(unsigned long pfn, unsigned long *p);
+
void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 585a6e330837..429834ec1687 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -317,7 +317,7 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
BUG_ON(rc);
while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
- HYPERVISOR_sched_op(SCHEDOP_yield, 0);
+ HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
barrier();
}
@@ -422,7 +422,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
/* Make sure other vcpus get a chance to run if they need to. */
for_each_cpu(cpu, mask) {
if (xen_vcpu_stolen(cpu)) {
- HYPERVISOR_sched_op(SCHEDOP_yield, 0);
+ HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
break;
}
}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 2f5ef2632ea2..20139464943c 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -57,8 +57,6 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
bool xen_vcpu_stolen(int vcpu);
-void xen_mark_init_mm_pinned(void);
-
void xen_setup_vcpu_info_placement(void);
#ifdef CONFIG_SMP
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 57be6bea48eb..08186ecbaf8d 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -114,6 +114,7 @@ enum {
board_ahci_sb700 = 5, /* for SB700 and SB800 */
board_ahci_mcp65 = 6,
board_ahci_nopmp = 7,
+ board_ahci_yesncq = 8,
/* global controller registers */
HOST_CAP = 0x00, /* host capabilities */
@@ -469,6 +470,14 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ /* board_ahci_yesncq */
+ {
+ AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
};
static const struct pci_device_id ahci_pci_tbl[] = {
@@ -535,30 +544,30 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
- { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e7ea77cf6069..065507c46644 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6110,13 +6110,11 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
ata_port_printk(ap, KERN_INFO, "DUMMY\n");
}
- /* perform each probe synchronously */
- DPRINTK("probe begin\n");
+ /* perform each probe asynchronously */
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
async_schedule(async_port_probe, ap);
}
- DPRINTK("probe end\n");
return 0;
}
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 98e8c50703b3..bdd43c7f432e 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -566,7 +566,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static int printed_version;
unsigned int i;
int rc;
- struct ata_host *host;
+ struct ata_host *host = NULL;
int board_id = (int) ent->driver_data;
const unsigned *bar_sizes;
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index be204308cc1b..9359613addc5 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -1059,7 +1059,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto out;
}
- err = pci_set_dma_mask(dev, DMA_32BIT_MASK);
+ err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
if (err) {
dev_warn(&dev->dev, "Failed to set 32-bit DMA mask\n");
goto out;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 0ef6f08aa6ea..4d4d5e0d3fa6 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -3505,7 +3505,7 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
/* The Inbound Post Queue only accepts 32-bit physical addresses for the
CCISS commands, so they must be allocated from the lower 4GiB of
memory. */
- err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
iounmap(vaddr);
return -ENOMEM;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 9d9490e22e07..3686912427ba 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -2131,6 +2131,8 @@ static const struct intel_driver_description {
{ PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M",
&intel_845_driver, &intel_830_driver },
{ PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854",
+ &intel_845_driver, &intel_830_driver },
{ PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL },
{ PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM",
&intel_845_driver, &intel_830_driver },
@@ -2355,6 +2357,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_82845_HB),
ID(PCI_DEVICE_ID_INTEL_82845G_HB),
ID(PCI_DEVICE_ID_INTEL_82850_HB),
+ ID(PCI_DEVICE_ID_INTEL_82854_HB),
ID(PCI_DEVICE_ID_INTEL_82855PM_HB),
ID(PCI_DEVICE_ID_INTEL_82855GM_HB),
ID(PCI_DEVICE_ID_INTEL_82860_HB),
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 6de020d078e1..b0a6a3e51924 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -35,7 +35,6 @@
#include <linux/vt_kern.h>
#include <linux/workqueue.h>
#include <linux/kexec.h>
-#include <linux/interrupt.h>
#include <linux/hrtimer.h>
#include <linux/oom.h>
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 28f2c3f959b5..6ad95c8d6363 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -767,11 +767,19 @@ static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
pci_write_config_word(pdev, offset, value);
}
-/* write all or some bits in a dword-register*/
+/*
+ * pci_write_bits32
+ *
+ * edac local routine to do pci_write_config_dword, but adds
+ * a mask parameter. If mask is all ones, ignore the mask.
+ * Otherwise utilize the mask to isolate specified bits
+ *
+ * write all or some bits in a dword-register
+ */
static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
u32 value, u32 mask)
{
- if (mask != 0xffff) {
+ if (mask != 0xffffffff) {
u32 buf;
pci_read_config_dword(pdev, offset, &buf);
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index ca9113e1c106..a7d2c717d033 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -389,7 +389,7 @@ static void del_edac_device_from_global_list(struct edac_device_ctl_info
*/
static void edac_device_workq_function(struct work_struct *work_req)
{
- struct delayed_work *d_work = (struct delayed_work *)work_req;
+ struct delayed_work *d_work = to_delayed_work(work_req);
struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
mutex_lock(&device_ctls_mutex);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 25d66940b4fa..335b7ebdb11c 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -260,7 +260,7 @@ static int edac_mc_assert_error_check_and_clear(void)
*/
static void edac_mc_workq_function(struct work_struct *work_req)
{
- struct delayed_work *d_work = (struct delayed_work *)work_req;
+ struct delayed_work *d_work = to_delayed_work(work_req);
struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
mutex_lock(&mem_ctls_mutex);
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 5b150aea703a..30b585b1d60b 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(edac_pci_find);
*/
static void edac_pci_workq_function(struct work_struct *work_req)
{
- struct delayed_work *d_work = (struct delayed_work *)work_req;
+ struct delayed_work *d_work = to_delayed_work(work_req);
struct edac_pci_ctl_info *pci = to_edac_pci_ctl_work(d_work);
int msec;
unsigned long delay;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 0e8a9185f676..d73f5f473e38 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -692,6 +692,16 @@ config SENSORS_PCF8591
These devices are hard to detect and rarely found on mainstream
hardware. If unsure, say N.
+config SENSORS_SHT15
+ tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
+ depends on GENERIC_GPIO
+ help
+ If you say yes here you get support for the Sensiron SHT10, SHT11,
+ SHT15, SHT71, SHT75 humidity and temperature sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called sht15.
+
config SENSORS_SIS5595
tristate "Silicon Integrated Systems Corp. SiS5595"
depends on PCI
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 1d3757837b4f..0ae26984ba45 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
+obj-$(CONFIG_SENSORS_SHT15) += sht15.o
obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index 55d3dc565be6..abca7e9f953b 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -34,7 +34,6 @@
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/freezer.h>
-#include <linux/version.h>
#include <linux/uaccess.h>
#include <linux/leds.h>
#include <acpi/acpi_drivers.h>
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
new file mode 100644
index 000000000000..6cbdc2fea734
--- /dev/null
+++ b/drivers/hwmon/sht15.c
@@ -0,0 +1,692 @@
+/*
+ * sht15.c - support for the SHT15 Temperature and Humidity Sensor
+ *
+ * Copyright (c) 2009 Jonathan Cameron
+ *
+ * Copyright (c) 2007 Wouter Horre
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Currently ignoring checksum on readings.
+ * Default resolution only (14bit temp, 12bit humidity)
+ * Ignoring battery status.
+ * Heater not enabled.
+ * Timings are all conservative.
+ *
+ * Data sheet available (1/2009) at
+ * http://www.sensirion.ch/en/pdf/product_information/Datasheet-humidity-sensor-SHT1x.pdf
+ *
+ * Regulator supply name = vcc
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include <linux/sht15.h>
+#include <linux/regulator/consumer.h>
+#include <asm/atomic.h>
+
+#define SHT15_MEASURE_TEMP 3
+#define SHT15_MEASURE_RH 5
+
+#define SHT15_READING_NOTHING 0
+#define SHT15_READING_TEMP 1
+#define SHT15_READING_HUMID 2
+
+/* Min timings in nsecs */
+#define SHT15_TSCKL 100 /* clock low */
+#define SHT15_TSCKH 100 /* clock high */
+#define SHT15_TSU 150 /* data setup time */
+
+/**
+ * struct sht15_temppair - elements of voltage dependant temp calc
+ * @vdd: supply voltage in microvolts
+ * @d1: see data sheet
+ */
+struct sht15_temppair {
+ int vdd; /* microvolts */
+ int d1;
+};
+
+/* Table 9 from data sheet - relates temperature calculation
+ * to supply voltage.
+ */
+static const struct sht15_temppair temppoints[] = {
+ { 2500000, -39400 },
+ { 3000000, -39600 },
+ { 3500000, -39700 },
+ { 4000000, -39800 },
+ { 5000000, -40100 },
+};
+
+/**
+ * struct sht15_data - device instance specific data
+ * @pdata: platform data (gpio's etc)
+ * @read_work: bh of interrupt handler
+ * @wait_queue: wait queue for getting values from device
+ * @val_temp: last temperature value read from device
+ * @val_humid: last humidity value read from device
+ * @flag: status flag used to identify what the last request was
+ * @valid: are the current stored values valid (start condition)
+ * @last_updat: time of last update
+ * @read_lock: mutex to ensure only one read in progress
+ * at a time.
+ * @dev: associate device structure
+ * @hwmon_dev: device associated with hwmon subsystem
+ * @reg: associated regulator (if specified)
+ * @nb: notifier block to handle notifications of voltage changes
+ * @supply_uV: local copy of supply voltage used to allow
+ * use of regulator consumer if available
+ * @supply_uV_valid: indicates that an updated value has not yet
+ * been obtained from the regulator and so any calculations
+ * based upon it will be invalid.
+ * @update_supply_work: work struct that is used to update the supply_uV
+ * @interrupt_handled: flag used to indicate a hander has been scheduled
+ */
+struct sht15_data {
+ struct sht15_platform_data *pdata;
+ struct work_struct read_work;
+ wait_queue_head_t wait_queue;
+ uint16_t val_temp;
+ uint16_t val_humid;
+ u8 flag;
+ u8 valid;
+ unsigned long last_updat;
+ struct mutex read_lock;
+ struct device *dev;
+ struct device *hwmon_dev;
+ struct regulator *reg;
+ struct notifier_block nb;
+ int supply_uV;
+ int supply_uV_valid;
+ struct work_struct update_supply_work;
+ atomic_t interrupt_handled;
+};
+
+/**
+ * sht15_connection_reset() - reset the comms interface
+ * @data: sht15 specific data
+ *
+ * This implements section 3.4 of the data sheet
+ */
+static void sht15_connection_reset(struct sht15_data *data)
+{
+ int i;
+ gpio_direction_output(data->pdata->gpio_data, 1);
+ ndelay(SHT15_TSCKL);
+ gpio_set_value(data->pdata->gpio_sck, 0);
+ ndelay(SHT15_TSCKL);
+ for (i = 0; i < 9; ++i) {
+ gpio_set_value(data->pdata->gpio_sck, 1);
+ ndelay(SHT15_TSCKH);
+ gpio_set_value(data->pdata->gpio_sck, 0);
+ ndelay(SHT15_TSCKL);
+ }
+}
+/**
+ * sht15_send_bit() - send an individual bit to the device
+ * @data: device state data
+ * @val: value of bit to be sent
+ **/
+static inline void sht15_send_bit(struct sht15_data *data, int val)
+{
+
+ gpio_set_value(data->pdata->gpio_data, val);
+ ndelay(SHT15_TSU);
+ gpio_set_value(data->pdata->gpio_sck, 1);
+ ndelay(SHT15_TSCKH);
+ gpio_set_value(data->pdata->gpio_sck, 0);
+ ndelay(SHT15_TSCKL); /* clock low time */
+}
+
+/**
+ * sht15_transmission_start() - specific sequence for new transmission
+ *
+ * @data: device state data
+ * Timings for this are not documented on the data sheet, so very
+ * conservative ones used in implementation. This implements
+ * figure 12 on the data sheet.
+ **/
+static void sht15_transmission_start(struct sht15_data *data)
+{
+ /* ensure data is high and output */
+ gpio_direction_output(data->pdata->gpio_data, 1);
+ ndelay(SHT15_TSU);
+ gpio_set_value(data->pdata->gpio_sck, 0);
+ ndelay(SHT15_TSCKL);
+ gpio_set_value(data->pdata->gpio_sck, 1);
+ ndelay(SHT15_TSCKH);
+ gpio_set_value(data->pdata->gpio_data, 0);
+ ndelay(SHT15_TSU);
+ gpio_set_value(data->pdata->gpio_sck, 0);
+ ndelay(SHT15_TSCKL);
+ gpio_set_value(data->pdata->gpio_sck, 1);
+ ndelay(SHT15_TSCKH);
+ gpio_set_value(data->pdata->gpio_data, 1);
+ ndelay(SHT15_TSU);
+ gpio_set_value(data->pdata->gpio_sck, 0);
+ ndelay(SHT15_TSCKL);
+}
+/**
+ * sht15_send_byte() - send a single byte to the device
+ * @data: device state
+ * @byte: value to be sent
+ **/
+static void sht15_send_byte(struct sht15_data *data, u8 byte)
+{
+ int i;
+ for (i = 0; i < 8; i++) {
+ sht15_send_bit(data, !!(byte & 0x80));
+ byte <<= 1;
+ }
+}
+/**
+ * sht15_wait_for_response() - checks for ack from device
+ * @data: device state
+ **/
+static int sht15_wait_for_response(struct sht15_data *data)
+{
+ gpio_direction_input(data->pdata->gpio_data);
+ gpio_set_value(data->pdata->gpio_sck, 1);
+ ndelay(SHT15_TSCKH);
+ if (gpio_get_value(data->pdata->gpio_data)) {
+ gpio_set_value(data->pdata->gpio_sck, 0);
+ dev_err(data->dev, "Command not acknowledged\n");
+ sht15_connection_reset(data);
+ return -EIO;
+ }
+ gpio_set_value(data->pdata->gpio_sck, 0);
+ ndelay(SHT15_TSCKL);
+ return 0;
+}
+
+/**
+ * sht15_send_cmd() - Sends a command to the device.
+ * @data: device state
+ * @cmd: command byte to be sent
+ *
+ * On entry, sck is output low, data is output pull high
+ * and the interrupt disabled.
+ **/
+static int sht15_send_cmd(struct sht15_data *data, u8 cmd)
+{
+ int ret = 0;
+ sht15_transmission_start(data);
+ sht15_send_byte(data, cmd);
+ ret = sht15_wait_for_response(data);
+ return ret;
+}
+/**
+ * sht15_update_single_val() - get a new value from device
+ * @data: device instance specific data
+ * @command: command sent to request value
+ * @timeout_msecs: timeout after which comms are assumed
+ * to have failed are reset.
+ **/
+static inline int sht15_update_single_val(struct sht15_data *data,
+ int command,
+ int timeout_msecs)
+{
+ int ret;
+ ret = sht15_send_cmd(data, command);
+ if (ret)
+ return ret;
+
+ gpio_direction_input(data->pdata->gpio_data);
+ atomic_set(&data->interrupt_handled, 0);
+
+ enable_irq(gpio_to_irq(data->pdata->gpio_data));
+ if (gpio_get_value(data->pdata->gpio_data) == 0) {
+ disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ /* Only relevant if the interrupt hasn't occured. */
+ if (!atomic_read(&data->interrupt_handled))
+ schedule_work(&data->read_work);
+ }
+ ret = wait_event_timeout(data->wait_queue,
+ (data->flag == SHT15_READING_NOTHING),
+ msecs_to_jiffies(timeout_msecs));
+ if (ret == 0) {/* timeout occurred */
+ disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));;
+ sht15_connection_reset(data);
+ return -ETIME;
+ }
+ return 0;
+}
+
+/**
+ * sht15_update_vals() - get updated readings from device if too old
+ * @data: device state
+ **/
+static int sht15_update_vals(struct sht15_data *data)
+{
+ int ret = 0;
+ int timeout = HZ;
+
+ mutex_lock(&data->read_lock);
+ if (time_after(jiffies, data->last_updat + timeout)
+ || !data->valid) {
+ data->flag = SHT15_READING_HUMID;
+ ret = sht15_update_single_val(data, SHT15_MEASURE_RH, 160);
+ if (ret)
+ goto error_ret;
+ data->flag = SHT15_READING_TEMP;
+ ret = sht15_update_single_val(data, SHT15_MEASURE_TEMP, 400);
+ if (ret)
+ goto error_ret;
+ data->valid = 1;
+ data->last_updat = jiffies;
+ }
+error_ret:
+ mutex_unlock(&data->read_lock);
+
+ return ret;
+}
+
+/**
+ * sht15_calc_temp() - convert the raw reading to a temperature
+ * @data: device state
+ *
+ * As per section 4.3 of the data sheet.
+ **/
+static inline int sht15_calc_temp(struct sht15_data *data)
+{
+ int d1 = 0;
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++)
+ /* Find pointer to interpolate */
+ if (data->supply_uV > temppoints[i - 1].vdd) {
+ d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
+ * (temppoints[i].d1 - temppoints[i - 1].d1)
+ / (temppoints[i].vdd - temppoints[i - 1].vdd)
+ + temppoints[i - 1].d1;
+ break;
+ }
+
+ return data->val_temp*10 + d1;
+}
+
+/**
+ * sht15_calc_humid() - using last temperature convert raw to humid
+ * @data: device state
+ *
+ * This is the temperature compensated version as per section 4.2 of
+ * the data sheet.
+ **/
+static inline int sht15_calc_humid(struct sht15_data *data)
+{
+ int RHlinear; /* milli percent */
+ int temp = sht15_calc_temp(data);
+
+ const int c1 = -4;
+ const int c2 = 40500; /* x 10 ^ -6 */
+ const int c3 = 2800; /* x10 ^ -9 */
+
+ RHlinear = c1*1000
+ + c2 * data->val_humid/1000
+ + (data->val_humid * data->val_humid * c3)/1000000;
+ return (temp - 25000) * (10000 + 800 * data->val_humid)
+ / 1000000 + RHlinear;
+}
+
+static ssize_t sht15_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct sht15_data *data = dev_get_drvdata(dev);
+
+ /* Technically no need to read humidity as well */
+ ret = sht15_update_vals(data);
+
+ return ret ? ret : sprintf(buf, "%d\n",
+ sht15_calc_temp(data));
+}
+
+static ssize_t sht15_show_humidity(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct sht15_data *data = dev_get_drvdata(dev);
+
+ ret = sht15_update_vals(data);
+
+ return ret ? ret : sprintf(buf, "%d\n", sht15_calc_humid(data));
+
+};
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ return sprintf(buf, "%s\n", pdev->name);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input,
+ S_IRUGO, sht15_show_temp,
+ NULL, 0);
+static SENSOR_DEVICE_ATTR(humidity1_input,
+ S_IRUGO, sht15_show_humidity,
+ NULL, 0);
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static struct attribute *sht15_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_humidity1_input.dev_attr.attr,
+ &dev_attr_name.attr,
+ NULL,
+};
+
+static const struct attribute_group sht15_attr_group = {
+ .attrs = sht15_attrs,
+};
+
+static irqreturn_t sht15_interrupt_fired(int irq, void *d)
+{
+ struct sht15_data *data = d;
+ /* First disable the interrupt */
+ disable_irq_nosync(irq);
+ atomic_inc(&data->interrupt_handled);
+ /* Then schedule a reading work struct */
+ if (data->flag != SHT15_READING_NOTHING)
+ schedule_work(&data->read_work);
+ return IRQ_HANDLED;
+}
+
+/* Each byte of data is acknowledged by pulling the data line
+ * low for one clock pulse.
+ */
+static void sht15_ack(struct sht15_data *data)
+{
+ gpio_direction_output(data->pdata->gpio_data, 0);
+ ndelay(SHT15_TSU);
+ gpio_set_value(data->pdata->gpio_sck, 1);
+ ndelay(SHT15_TSU);
+ gpio_set_value(data->pdata->gpio_sck, 0);
+ ndelay(SHT15_TSU);
+ gpio_set_value(data->pdata->gpio_data, 1);
+
+ gpio_direction_input(data->pdata->gpio_data);
+}
+/**
+ * sht15_end_transmission() - notify device of end of transmission
+ * @data: device state
+ *
+ * This is basically a NAK. (single clock pulse, data high)
+ **/
+static void sht15_end_transmission(struct sht15_data *data)
+{
+ gpio_direction_output(data->pdata->gpio_data, 1);
+ ndelay(SHT15_TSU);
+ gpio_set_value(data->pdata->gpio_sck, 1);
+ ndelay(SHT15_TSCKH);
+ gpio_set_value(data->pdata->gpio_sck, 0);
+ ndelay(SHT15_TSCKL);
+}
+
+static void sht15_bh_read_data(struct work_struct *work_s)
+{
+ int i;
+ uint16_t val = 0;
+ struct sht15_data *data
+ = container_of(work_s, struct sht15_data,
+ read_work);
+ /* Firstly, verify the line is low */
+ if (gpio_get_value(data->pdata->gpio_data)) {
+ /* If not, then start the interrupt again - care
+ here as could have gone low in meantime so verify
+ it hasn't!
+ */
+ atomic_set(&data->interrupt_handled, 0);
+ enable_irq(gpio_to_irq(data->pdata->gpio_data));
+ /* If still not occured or another handler has been scheduled */
+ if (gpio_get_value(data->pdata->gpio_data)
+ || atomic_read(&data->interrupt_handled))
+ return;
+ }
+ /* Read the data back from the device */
+ for (i = 0; i < 16; ++i) {
+ val <<= 1;
+ gpio_set_value(data->pdata->gpio_sck, 1);
+ ndelay(SHT15_TSCKH);
+ val |= !!gpio_get_value(data->pdata->gpio_data);
+ gpio_set_value(data->pdata->gpio_sck, 0);
+ ndelay(SHT15_TSCKL);
+ if (i == 7)
+ sht15_ack(data);
+ }
+ /* Tell the device we are done */
+ sht15_end_transmission(data);
+
+ switch (data->flag) {
+ case SHT15_READING_TEMP:
+ data->val_temp = val;
+ break;
+ case SHT15_READING_HUMID:
+ data->val_humid = val;
+ break;
+ }
+
+ data->flag = SHT15_READING_NOTHING;
+ wake_up(&data->wait_queue);
+}
+
+static void sht15_update_voltage(struct work_struct *work_s)
+{
+ struct sht15_data *data
+ = container_of(work_s, struct sht15_data,
+ update_supply_work);
+ data->supply_uV = regulator_get_voltage(data->reg);
+}
+
+/**
+ * sht15_invalidate_voltage() - mark supply voltage invalid when notified by reg
+ * @nb: associated notification structure
+ * @event: voltage regulator state change event code
+ * @ignored: function parameter - ignored here
+ *
+ * Note that as the notification code holds the regulator lock, we have
+ * to schedule an update of the supply voltage rather than getting it directly.
+ **/
+static int sht15_invalidate_voltage(struct notifier_block *nb,
+ unsigned long event,
+ void *ignored)
+{
+ struct sht15_data *data = container_of(nb, struct sht15_data, nb);
+
+ if (event == REGULATOR_EVENT_VOLTAGE_CHANGE)
+ data->supply_uV_valid = false;
+ schedule_work(&data->update_supply_work);
+
+ return NOTIFY_OK;
+}
+
+static int __devinit sht15_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct sht15_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
+
+ if (!data) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "kzalloc failed");
+ goto error_ret;
+ }
+
+ INIT_WORK(&data->read_work, sht15_bh_read_data);
+ INIT_WORK(&data->update_supply_work, sht15_update_voltage);
+ platform_set_drvdata(pdev, data);
+ mutex_init(&data->read_lock);
+ data->dev = &pdev->dev;
+ init_waitqueue_head(&data->wait_queue);
+
+ if (pdev->dev.platform_data == NULL) {
+ dev_err(&pdev->dev, "no platform data supplied");
+ goto err_free_data;
+ }
+ data->pdata = pdev->dev.platform_data;
+ data->supply_uV = data->pdata->supply_mv*1000;
+
+/* If a regulator is available, query what the supply voltage actually is!*/
+ data->reg = regulator_get(data->dev, "vcc");
+ if (!IS_ERR(data->reg)) {
+ data->supply_uV = regulator_get_voltage(data->reg);
+ regulator_enable(data->reg);
+ /* setup a notifier block to update this if another device
+ * causes the voltage to change */
+ data->nb.notifier_call = &sht15_invalidate_voltage;
+ ret = regulator_register_notifier(data->reg, &data->nb);
+ }
+/* Try requesting the GPIOs */
+ ret = gpio_request(data->pdata->gpio_sck, "SHT15 sck");
+ if (ret) {
+ dev_err(&pdev->dev, "gpio request failed");
+ goto err_free_data;
+ }
+ gpio_direction_output(data->pdata->gpio_sck, 0);
+ ret = gpio_request(data->pdata->gpio_data, "SHT15 data");
+ if (ret) {
+ dev_err(&pdev->dev, "gpio request failed");
+ goto err_release_gpio_sck;
+ }
+ ret = sysfs_create_group(&pdev->dev.kobj, &sht15_attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "sysfs create failed");
+ goto err_free_data;
+ }
+
+ ret = request_irq(gpio_to_irq(data->pdata->gpio_data),
+ sht15_interrupt_fired,
+ IRQF_TRIGGER_FALLING,
+ "sht15 data",
+ data);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get irq for data line");
+ goto err_release_gpio_data;
+ }
+ disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ sht15_connection_reset(data);
+ sht15_send_cmd(data, 0x1E);
+
+ data->hwmon_dev = hwmon_device_register(data->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ goto err_release_gpio_data;
+ }
+ return 0;
+
+err_release_gpio_data:
+ gpio_free(data->pdata->gpio_data);
+err_release_gpio_sck:
+ gpio_free(data->pdata->gpio_sck);
+err_free_data:
+ kfree(data);
+error_ret:
+
+ return ret;
+}
+
+static int __devexit sht15_remove(struct platform_device *pdev)
+{
+ struct sht15_data *data = platform_get_drvdata(pdev);
+
+ /* Make sure any reads from the device are done and
+ * prevent new ones beginnning */
+ mutex_lock(&data->read_lock);
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &sht15_attr_group);
+ if (!IS_ERR(data->reg)) {
+ regulator_unregister_notifier(data->reg, &data->nb);
+ regulator_disable(data->reg);
+ regulator_put(data->reg);
+ }
+
+ free_irq(gpio_to_irq(data->pdata->gpio_data), data);
+ gpio_free(data->pdata->gpio_data);
+ gpio_free(data->pdata->gpio_sck);
+ mutex_unlock(&data->read_lock);
+ kfree(data);
+ return 0;
+}
+
+
+static struct platform_driver sht_drivers[] = {
+ {
+ .driver = {
+ .name = "sht10",
+ .owner = THIS_MODULE,
+ },
+ .probe = sht15_probe,
+ .remove = sht15_remove,
+ }, {
+ .driver = {
+ .name = "sht11",
+ .owner = THIS_MODULE,
+ },
+ .probe = sht15_probe,
+ .remove = sht15_remove,
+ }, {
+ .driver = {
+ .name = "sht15",
+ .owner = THIS_MODULE,
+ },
+ .probe = sht15_probe,
+ .remove = sht15_remove,
+ }, {
+ .driver = {
+ .name = "sht71",
+ .owner = THIS_MODULE,
+ },
+ .probe = sht15_probe,
+ .remove = sht15_remove,
+ }, {
+ .driver = {
+ .name = "sht75",
+ .owner = THIS_MODULE,
+ },
+ .probe = sht15_probe,
+ .remove = sht15_remove,
+ },
+};
+
+
+static int __init sht15_init(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sht_drivers); i++) {
+ ret = platform_driver_register(&sht_drivers[i]);
+ if (ret)
+ goto error_unreg;
+ }
+
+ return 0;
+
+error_unreg:
+ while (--i >= 0)
+ platform_driver_unregister(&sht_drivers[i]);
+
+ return ret;
+}
+module_init(sht15_init);
+
+static void __exit sht15_exit(void)
+{
+ int i;
+ for (i = ARRAY_SIZE(sht_drivers) - 1; i >= 0; i--)
+ platform_driver_unregister(&sht_drivers[i]);
+}
+module_exit(sht15_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ec3db3ade118..d44065d2e662 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -132,6 +132,11 @@ static void input_start_autorepeat(struct input_dev *dev, int code)
}
}
+static void input_stop_autorepeat(struct input_dev *dev)
+{
+ del_timer(&dev->timer);
+}
+
#define INPUT_IGNORE_EVENT 0
#define INPUT_PASS_TO_HANDLERS 1
#define INPUT_PASS_TO_DEVICE 2
@@ -167,6 +172,8 @@ static void input_handle_event(struct input_dev *dev,
__change_bit(code, dev->key);
if (value)
input_start_autorepeat(dev, code);
+ else
+ input_stop_autorepeat(dev);
}
disposition = INPUT_PASS_TO_HANDLERS;
@@ -737,11 +744,11 @@ static inline void input_wakeup_procfs_readers(void)
static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
{
- int state = input_devices_state;
-
poll_wait(file, &input_devices_poll_wait, wait);
- if (state != input_devices_state)
+ if (file->f_version != input_devices_state) {
+ file->f_version = input_devices_state;
return POLLIN | POLLRDNORM;
+ }
return 0;
}
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 45470f18d7e9..f999dc60c3b8 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -229,7 +229,8 @@ struct atkbd {
/*
* System-specific ketymap fixup routine
*/
-static void (*atkbd_platform_fixup)(struct atkbd *);
+static void (*atkbd_platform_fixup)(struct atkbd *, const void *data);
+static void *atkbd_platform_fixup_data;
static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
ssize_t (*handler)(struct atkbd *, char *));
@@ -834,87 +835,64 @@ static void atkbd_disconnect(struct serio *serio)
}
/*
- * Most special keys (Fn+F?) on Dell laptops do not generate release
- * events so we have to do it ourselves.
+ * generate release events for the keycodes given in data
*/
-static void atkbd_dell_laptop_keymap_fixup(struct atkbd *atkbd)
+static void atkbd_apply_forced_release_keylist(struct atkbd* atkbd,
+ const void *data)
{
- static const unsigned int forced_release_keys[] = {
- 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93,
- };
- int i;
+ const unsigned int *keys = data;
+ unsigned int i;
if (atkbd->set == 2)
- for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++)
- __set_bit(forced_release_keys[i],
- atkbd->force_release_mask);
+ for (i = 0; keys[i] != -1U; i++)
+ __set_bit(keys[i], atkbd->force_release_mask);
}
/*
+ * Most special keys (Fn+F?) on Dell laptops do not generate release
+ * events so we have to do it ourselves.
+ */
+static unsigned int atkbd_dell_laptop_forced_release_keys[] = {
+ 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93, -1U
+};
+
+/*
* Perform fixup for HP system that doesn't generate release
* for its video switch
*/
-static void atkbd_hp_keymap_fixup(struct atkbd *atkbd)
-{
- static const unsigned int forced_release_keys[] = {
- 0x94,
- };
- int i;
-
- if (atkbd->set == 2)
- for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++)
- __set_bit(forced_release_keys[i],
- atkbd->force_release_mask);
-}
+static unsigned int atkbd_hp_forced_release_keys[] = {
+ 0x94, -1U
+};
/*
* Inventec system with broken key release on volume keys
*/
-static void atkbd_inventec_keymap_fixup(struct atkbd *atkbd)
-{
- const unsigned int forced_release_keys[] = {
- 0xae, 0xb0,
- };
- int i;
-
- if (atkbd->set == 2)
- for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++)
- __set_bit(forced_release_keys[i],
- atkbd->force_release_mask);
-}
+static unsigned int atkbd_inventec_forced_release_keys[] = {
+ 0xae, 0xb0, -1U
+};
/*
* Perform fixup for HP Pavilion ZV6100 laptop that doesn't generate release
* for its volume buttons
*/
-static void atkbd_hp_zv6100_keymap_fixup(struct atkbd *atkbd)
-{
- const unsigned int forced_release_keys[] = {
- 0xae, 0xb0,
- };
- int i;
-
- if (atkbd->set == 2)
- for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++)
- __set_bit(forced_release_keys[i],
- atkbd->force_release_mask);
-}
+static unsigned int atkbd_hp_zv6100_forced_release_keys[] = {
+ 0xae, 0xb0, -1U
+};
/*
* Samsung NC10 with Fn+F? key release not working
*/
-static void atkbd_samsung_keymap_fixup(struct atkbd *atkbd)
-{
- const unsigned int forced_release_keys[] = {
- 0x82, 0x83, 0x84, 0x86, 0x88, 0x89, 0xb3, 0xf7, 0xf9,
- };
- int i;
+static unsigned int atkbd_samsung_forced_release_keys[] = {
+ 0x82, 0x83, 0x84, 0x86, 0x88, 0x89, 0xb3, 0xf7, 0xf9, -1U
+};
- if (atkbd->set == 2)
- for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++)
- __set_bit(forced_release_keys[i],
- atkbd->force_release_mask);
-}
+/*
+ * The volume up and volume down special keys on a Fujitsu Amilo PA 1510 laptop
+ * do not generate release events so we have to do it ourselves.
+ */
+static unsigned int atkbd_amilo_pa1510_forced_release_keys[] = {
+ 0xb0, 0xae, -1U
+};
/*
* atkbd_set_keycode_table() initializes keyboard's keycode table
@@ -967,7 +945,7 @@ static void atkbd_set_keycode_table(struct atkbd *atkbd)
* Perform additional fixups
*/
if (atkbd_platform_fixup)
- atkbd_platform_fixup(atkbd);
+ atkbd_platform_fixup(atkbd, atkbd_platform_fixup_data);
}
/*
@@ -1492,9 +1470,11 @@ static ssize_t atkbd_show_err_count(struct atkbd *atkbd, char *buf)
return sprintf(buf, "%lu\n", atkbd->err_count);
}
-static int __init atkbd_setup_fixup(const struct dmi_system_id *id)
+static int __init atkbd_setup_forced_release(const struct dmi_system_id *id)
{
- atkbd_platform_fixup = id->driver_data;
+ atkbd_platform_fixup = atkbd_apply_forced_release_keylist;
+ atkbd_platform_fixup_data = id->driver_data;
+
return 0;
}
@@ -1505,8 +1485,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
},
- .callback = atkbd_setup_fixup,
- .driver_data = atkbd_dell_laptop_keymap_fixup,
+ .callback = atkbd_setup_forced_release,
+ .driver_data = atkbd_dell_laptop_forced_release_keys,
},
{
.ident = "Dell Laptop",
@@ -1514,8 +1494,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
},
- .callback = atkbd_setup_fixup,
- .driver_data = atkbd_dell_laptop_keymap_fixup,
+ .callback = atkbd_setup_forced_release,
+ .driver_data = atkbd_dell_laptop_forced_release_keys,
},
{
.ident = "HP 2133",
@@ -1523,8 +1503,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP 2133"),
},
- .callback = atkbd_setup_fixup,
- .driver_data = atkbd_hp_keymap_fixup,
+ .callback = atkbd_setup_forced_release,
+ .driver_data = atkbd_hp_forced_release_keys,
},
{
.ident = "HP Pavilion ZV6100",
@@ -1532,8 +1512,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion ZV6100"),
},
- .callback = atkbd_setup_fixup,
- .driver_data = atkbd_hp_zv6100_keymap_fixup,
+ .callback = atkbd_setup_forced_release,
+ .driver_data = atkbd_hp_zv6100_forced_release_keys,
},
{
.ident = "Inventec Symphony",
@@ -1541,8 +1521,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"),
DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"),
},
- .callback = atkbd_setup_fixup,
- .driver_data = atkbd_inventec_keymap_fixup,
+ .callback = atkbd_setup_forced_release,
+ .driver_data = atkbd_inventec_forced_release_keys,
},
{
.ident = "Samsung NC10",
@@ -1550,8 +1530,17 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
},
- .callback = atkbd_setup_fixup,
- .driver_data = atkbd_samsung_keymap_fixup,
+ .callback = atkbd_setup_forced_release,
+ .driver_data = atkbd_samsung_forced_release_keys,
+ },
+ {
+ .ident = "Fujitsu Amilo PA 1510",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 1510"),
+ },
+ .callback = atkbd_setup_forced_release,
+ .driver_data = atkbd_amilo_pa1510_forced_release_keys,
},
{ }
};
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index ee855c5202e8..e94b7d735aca 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -211,8 +211,8 @@ static int __devinit bfin_kpad_probe(struct platform_device *pdev)
if (!pdata->debounce_time || pdata->debounce_time > MAX_MULT ||
!pdata->coldrive_time || pdata->coldrive_time > MAX_MULT) {
- printk(KERN_ERR DRV_NAME
- ": Invalid Debounce/Columdrive Time from pdata\n");
+ printk(KERN_WARNING DRV_NAME
+ ": Invalid Debounce/Columndrive Time in platform data\n");
bfin_write_KPAD_MSEL(0xFF0); /* Default MSEL */
} else {
bfin_write_KPAD_MSEL(
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index aacf71f3cd44..e9d639ec283d 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -198,45 +198,28 @@ static void hil_do(unsigned char cmd, unsigned char *data, unsigned int len)
}
-/* initialise HIL */
-static int __init
-hil_keyb_init(void)
+/* initialize HIL */
+static int __devinit hil_keyb_init(void)
{
unsigned char c;
unsigned int i, kbid;
wait_queue_head_t hil_wait;
int err;
- if (hil_dev.dev) {
+ if (hil_dev.dev)
return -ENODEV; /* already initialized */
- }
+ init_waitqueue_head(&hil_wait);
spin_lock_init(&hil_dev.lock);
+
hil_dev.dev = input_allocate_device();
if (!hil_dev.dev)
return -ENOMEM;
-#if defined(CONFIG_HP300)
- if (!MACH_IS_HP300) {
- err = -ENODEV;
- goto err1;
- }
- if (!hwreg_present((void *)(HILBASE + HIL_DATA))) {
- printk(KERN_ERR "HIL: hardware register was not found\n");
- err = -ENODEV;
- goto err1;
- }
- if (!request_region(HILBASE + HIL_DATA, 2, "hil")) {
- printk(KERN_ERR "HIL: IOPORT region already used\n");
- err = -EIO;
- goto err1;
- }
-#endif
-
err = request_irq(HIL_IRQ, hil_interrupt, 0, "hil", hil_dev.dev_id);
if (err) {
printk(KERN_ERR "HIL: Can't get IRQ\n");
- goto err2;
+ goto err1;
}
/* Turn on interrupts */
@@ -246,11 +229,9 @@ hil_keyb_init(void)
hil_dev.valid = 0; /* clear any pending data */
hil_do(HIL_READKBDSADR, NULL, 0);
- init_waitqueue_head(&hil_wait);
- wait_event_interruptible_timeout(hil_wait, hil_dev.valid, 3*HZ);
- if (!hil_dev.valid) {
+ wait_event_interruptible_timeout(hil_wait, hil_dev.valid, 3 * HZ);
+ if (!hil_dev.valid)
printk(KERN_WARNING "HIL: timed out, assuming no keyboard present\n");
- }
c = hil_dev.c;
hil_dev.valid = 0;
@@ -268,7 +249,7 @@ hil_keyb_init(void)
for (i = 0; i < HIL_KEYCODES_SET1_TBLSIZE; i++)
if (hphilkeyb_keycode[i] != KEY_RESERVED)
- set_bit(hphilkeyb_keycode[i], hil_dev.dev->keybit);
+ __set_bit(hphilkeyb_keycode[i], hil_dev.dev->keybit);
hil_dev.dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
hil_dev.dev->ledbit[0] = BIT_MASK(LED_NUML) | BIT_MASK(LED_CAPSL) |
@@ -287,34 +268,45 @@ hil_keyb_init(void)
err = input_register_device(hil_dev.dev);
if (err) {
printk(KERN_ERR "HIL: Can't register device\n");
- goto err3;
+ goto err2;
}
+
printk(KERN_INFO "input: %s, ID %d at 0x%08lx (irq %d) found and attached\n",
hil_dev.dev->name, kbid, HILBASE, HIL_IRQ);
return 0;
-err3:
+err2:
hil_do(HIL_INTOFF, NULL, 0);
- disable_irq(HIL_IRQ);
free_irq(HIL_IRQ, hil_dev.dev_id);
-err2:
-#if defined(CONFIG_HP300)
- release_region(HILBASE + HIL_DATA, 2);
err1:
-#endif
input_free_device(hil_dev.dev);
hil_dev.dev = NULL;
return err;
}
+static void __devexit hil_keyb_exit(void)
+{
+ if (HIL_IRQ)
+ free_irq(HIL_IRQ, hil_dev.dev_id);
+
+ /* Turn off interrupts */
+ hil_do(HIL_INTOFF, NULL, 0);
+
+ input_unregister_device(hil_dev.dev);
+ hil_dev.dev = NULL;
+}
#if defined(CONFIG_PARISC)
-static int __init
-hil_init_chip(struct parisc_device *dev)
+static int __devinit hil_probe_chip(struct parisc_device *dev)
{
+ /* Only allow one HIL keyboard */
+ if (hil_dev.dev)
+ return -ENODEV;
+
if (!dev->irq) {
- printk(KERN_WARNING "HIL: IRQ not found for HIL bus at 0x%08lx\n", dev->hpa.start);
+ printk(KERN_WARNING "HIL: IRQ not found for HIL bus at 0x%p\n",
+ (void *)dev->hpa.start);
return -ENODEV;
}
@@ -327,51 +319,79 @@ hil_init_chip(struct parisc_device *dev)
return hil_keyb_init();
}
+static int __devexit hil_remove_chip(struct parisc_device *dev)
+{
+ hil_keyb_exit();
+
+ return 0;
+}
+
static struct parisc_device_id hil_tbl[] = {
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00073 },
{ 0, }
};
+#if 0
+/* Disabled to avoid conflicts with the HP SDC HIL drivers */
MODULE_DEVICE_TABLE(parisc, hil_tbl);
+#endif
static struct parisc_driver hil_driver = {
- .name = "hil",
- .id_table = hil_tbl,
- .probe = hil_init_chip,
+ .name = "hil",
+ .id_table = hil_tbl,
+ .probe = hil_probe_chip,
+ .remove = __devexit_p(hil_remove_chip),
};
-#endif /* CONFIG_PARISC */
-
static int __init hil_init(void)
{
-#if defined(CONFIG_PARISC)
return register_parisc_driver(&hil_driver);
-#else
- return hil_keyb_init();
-#endif
}
-
static void __exit hil_exit(void)
{
- if (HIL_IRQ) {
- disable_irq(HIL_IRQ);
- free_irq(HIL_IRQ, hil_dev.dev_id);
+ unregister_parisc_driver(&hil_driver);
+}
+
+#else /* !CONFIG_PARISC */
+
+static int __init hil_init(void)
+{
+ int error;
+
+ /* Only allow one HIL keyboard */
+ if (hil_dev.dev)
+ return -EBUSY;
+
+ if (!MACH_IS_HP300)
+ return -ENODEV;
+
+ if (!hwreg_present((void *)(HILBASE + HIL_DATA))) {
+ printk(KERN_ERR "HIL: hardware register was not found\n");
+ return -ENODEV;
}
- /* Turn off interrupts */
- hil_do(HIL_INTOFF, NULL, 0);
+ if (!request_region(HILBASE + HIL_DATA, 2, "hil")) {
+ printk(KERN_ERR "HIL: IOPORT region already used\n");
+ return -EIO;
+ }
- input_unregister_device(hil_dev.dev);
+ error = hil_keyb_init();
+ if (error) {
+ release_region(HILBASE + HIL_DATA, 2);
+ return error;
+ }
- hil_dev.dev = NULL;
+ return 0;
+}
-#if defined(CONFIG_PARISC)
- unregister_parisc_driver(&hil_driver);
-#else
- release_region(HILBASE+HIL_DATA, 2);
-#endif
+static void __exit hil_exit(void)
+{
+ hil_keyb_exit();
+ release_region(HILBASE + HIL_DATA, 2);
}
+#endif /* CONFIG_PARISC */
+
module_init(hil_init);
module_exit(hil_exit);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 67e5553f699a..203abac1e23e 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -227,4 +227,27 @@ config INPUT_PCF50633_PMU
Say Y to include support for delivering PMU events via input
layer on NXP PCF50633.
+config INPUT_GPIO_ROTARY_ENCODER
+ tristate "Rotary encoders connected to GPIO pins"
+ depends on GPIOLIB && GENERIC_GPIO
+ help
+ Say Y here to add support for rotary encoders connected to GPIO lines.
+ Check file:Documentation/incput/rotary_encoder.txt for more
+ information.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rotary_encoder.
+
+config INPUT_RB532_BUTTON
+ tristate "Mikrotik Routerboard 532 button interface"
+ depends on MIKROTIK_RB532
+ depends on GPIOLIB && GENERIC_GPIO
+ select INPUT_POLLDEV
+ help
+ Say Y here if you want support for the S1 button built into
+ Mikrotik's Routerboard 532.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rb532_button.
+
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index bb62e6efacf3..eb3f407baedf 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -4,21 +4,23 @@
# Each configuration option enables a list of files.
-obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
-obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
-obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
-obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
-obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o
-obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
-obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o
+obj-$(CONFIG_INPUT_APANEL) += apanel.o
obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o
obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o
-obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
-obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
-obj-$(CONFIG_INPUT_YEALINK) += yealink.o
+obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o
obj-$(CONFIG_INPUT_CM109) += cm109.o
+obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
-obj-$(CONFIG_INPUT_UINPUT) += uinput.o
-obj-$(CONFIG_INPUT_APANEL) += apanel.o
-obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
+obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
+obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
+obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
+obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
+obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
+obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
+obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
+obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
+obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
+obj-$(CONFIG_INPUT_UINPUT) += uinput.o
+obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
+obj-$(CONFIG_INPUT_YEALINK) += yealink.o
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 3c9988dc0e9f..922c05141585 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -31,12 +31,73 @@ MODULE_LICENSE("GPL");
* newly configured "channel".
*/
-static unsigned int channel_mask = 0xFFFF;
-module_param(channel_mask, uint, 0644);
+enum {
+ ATI_REMOTE2_MAX_CHANNEL_MASK = 0xFFFF,
+ ATI_REMOTE2_MAX_MODE_MASK = 0x1F,
+};
+
+static int ati_remote2_set_mask(const char *val,
+ struct kernel_param *kp, unsigned int max)
+{
+ unsigned long mask;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = strict_strtoul(val, 0, &mask);
+ if (ret)
+ return ret;
+
+ if (mask & ~max)
+ return -EINVAL;
+
+ *(unsigned int *)kp->arg = mask;
+
+ return 0;
+}
+
+static int ati_remote2_set_channel_mask(const char *val,
+ struct kernel_param *kp)
+{
+ pr_debug("%s()\n", __func__);
+
+ return ati_remote2_set_mask(val, kp, ATI_REMOTE2_MAX_CHANNEL_MASK);
+}
+
+static int ati_remote2_get_channel_mask(char *buffer, struct kernel_param *kp)
+{
+ pr_debug("%s()\n", __func__);
+
+ return sprintf(buffer, "0x%04x", *(unsigned int *)kp->arg);
+}
+
+static int ati_remote2_set_mode_mask(const char *val, struct kernel_param *kp)
+{
+ pr_debug("%s()\n", __func__);
+
+ return ati_remote2_set_mask(val, kp, ATI_REMOTE2_MAX_MODE_MASK);
+}
+
+static int ati_remote2_get_mode_mask(char *buffer, struct kernel_param *kp)
+{
+ pr_debug("%s()\n", __func__);
+
+ return sprintf(buffer, "0x%02x", *(unsigned int *)kp->arg);
+}
+
+static unsigned int channel_mask = ATI_REMOTE2_MAX_CHANNEL_MASK;
+#define param_check_channel_mask(name, p) __param_check(name, p, unsigned int)
+#define param_set_channel_mask ati_remote2_set_channel_mask
+#define param_get_channel_mask ati_remote2_get_channel_mask
+module_param(channel_mask, channel_mask, 0644);
MODULE_PARM_DESC(channel_mask, "Bitmask of channels to accept <15:Channel16>...<1:Channel2><0:Channel1>");
-static unsigned int mode_mask = 0x1F;
-module_param(mode_mask, uint, 0644);
+static unsigned int mode_mask = ATI_REMOTE2_MAX_MODE_MASK;
+#define param_check_mode_mask(name, p) __param_check(name, p, unsigned int)
+#define param_set_mode_mask ati_remote2_set_mode_mask
+#define param_get_mode_mask ati_remote2_get_mode_mask
+module_param(mode_mask, mode_mask, 0644);
MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>");
static struct usb_device_id ati_remote2_id_table[] = {
@@ -133,12 +194,18 @@ struct ati_remote2 {
u16 keycode[ATI_REMOTE2_MODES][ARRAY_SIZE(ati_remote2_key_table)];
unsigned int flags;
+
+ unsigned int channel_mask;
+ unsigned int mode_mask;
};
static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id);
static void ati_remote2_disconnect(struct usb_interface *interface);
static int ati_remote2_suspend(struct usb_interface *interface, pm_message_t message);
static int ati_remote2_resume(struct usb_interface *interface);
+static int ati_remote2_reset_resume(struct usb_interface *interface);
+static int ati_remote2_pre_reset(struct usb_interface *interface);
+static int ati_remote2_post_reset(struct usb_interface *interface);
static struct usb_driver ati_remote2_driver = {
.name = "ati_remote2",
@@ -147,6 +214,9 @@ static struct usb_driver ati_remote2_driver = {
.id_table = ati_remote2_id_table,
.suspend = ati_remote2_suspend,
.resume = ati_remote2_resume,
+ .reset_resume = ati_remote2_reset_resume,
+ .pre_reset = ati_remote2_pre_reset,
+ .post_reset = ati_remote2_post_reset,
.supports_autosuspend = 1,
};
@@ -238,7 +308,7 @@ static void ati_remote2_input_mouse(struct ati_remote2 *ar2)
channel = data[0] >> 4;
- if (!((1 << channel) & channel_mask))
+ if (!((1 << channel) & ar2->channel_mask))
return;
mode = data[0] & 0x0F;
@@ -250,7 +320,7 @@ static void ati_remote2_input_mouse(struct ati_remote2 *ar2)
return;
}
- if (!((1 << mode) & mode_mask))
+ if (!((1 << mode) & ar2->mode_mask))
return;
input_event(idev, EV_REL, REL_X, (s8) data[1]);
@@ -277,7 +347,7 @@ static void ati_remote2_input_key(struct ati_remote2 *ar2)
channel = data[0] >> 4;
- if (!((1 << channel) & channel_mask))
+ if (!((1 << channel) & ar2->channel_mask))
return;
mode = data[0] & 0x0F;
@@ -305,7 +375,7 @@ static void ati_remote2_input_key(struct ati_remote2 *ar2)
ar2->mode = mode;
}
- if (!((1 << mode) & mode_mask))
+ if (!((1 << mode) & ar2->mode_mask))
return;
index = ati_remote2_lookup(hw_code);
@@ -410,7 +480,7 @@ static int ati_remote2_getkeycode(struct input_dev *idev,
int index, mode;
mode = scancode >> 8;
- if (mode > ATI_REMOTE2_PC || !((1 << mode) & mode_mask))
+ if (mode > ATI_REMOTE2_PC || !((1 << mode) & ar2->mode_mask))
return -EINVAL;
index = ati_remote2_lookup(scancode & 0xFF);
@@ -427,7 +497,7 @@ static int ati_remote2_setkeycode(struct input_dev *idev, int scancode, int keyc
int index, mode, old_keycode;
mode = scancode >> 8;
- if (mode > ATI_REMOTE2_PC || !((1 << mode) & mode_mask))
+ if (mode > ATI_REMOTE2_PC || !((1 << mode) & ar2->mode_mask))
return -EINVAL;
index = ati_remote2_lookup(scancode & 0xFF);
@@ -550,7 +620,7 @@ static void ati_remote2_urb_cleanup(struct ati_remote2 *ar2)
}
}
-static int ati_remote2_setup(struct ati_remote2 *ar2)
+static int ati_remote2_setup(struct ati_remote2 *ar2, unsigned int ch_mask)
{
int r, i, channel;
@@ -565,8 +635,8 @@ static int ati_remote2_setup(struct ati_remote2 *ar2)
channel = 0;
for (i = 0; i < 16; i++) {
- if ((1 << i) & channel_mask) {
- if (!(~(1 << i) & 0xFFFF & channel_mask))
+ if ((1 << i) & ch_mask) {
+ if (!(~(1 << i) & ch_mask))
channel = i + 1;
break;
}
@@ -585,6 +655,99 @@ static int ati_remote2_setup(struct ati_remote2 *ar2)
return 0;
}
+static ssize_t ati_remote2_show_channel_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_device *udev = to_usb_device(dev);
+ struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
+ struct ati_remote2 *ar2 = usb_get_intfdata(intf);
+
+ return sprintf(buf, "0x%04x\n", ar2->channel_mask);
+}
+
+static ssize_t ati_remote2_store_channel_mask(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_device *udev = to_usb_device(dev);
+ struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
+ struct ati_remote2 *ar2 = usb_get_intfdata(intf);
+ unsigned long mask;
+ int r;
+
+ if (strict_strtoul(buf, 0, &mask))
+ return -EINVAL;
+
+ if (mask & ~ATI_REMOTE2_MAX_CHANNEL_MASK)
+ return -EINVAL;
+
+ r = usb_autopm_get_interface(ar2->intf[0]);
+ if (r) {
+ dev_err(&ar2->intf[0]->dev,
+ "%s(): usb_autopm_get_interface() = %d\n", __func__, r);
+ return r;
+ }
+
+ mutex_lock(&ati_remote2_mutex);
+
+ if (mask != ar2->channel_mask && !ati_remote2_setup(ar2, mask))
+ ar2->channel_mask = mask;
+
+ mutex_unlock(&ati_remote2_mutex);
+
+ usb_autopm_put_interface(ar2->intf[0]);
+
+ return count;
+}
+
+static ssize_t ati_remote2_show_mode_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_device *udev = to_usb_device(dev);
+ struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
+ struct ati_remote2 *ar2 = usb_get_intfdata(intf);
+
+ return sprintf(buf, "0x%02x\n", ar2->mode_mask);
+}
+
+static ssize_t ati_remote2_store_mode_mask(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_device *udev = to_usb_device(dev);
+ struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
+ struct ati_remote2 *ar2 = usb_get_intfdata(intf);
+ unsigned long mask;
+
+ if (strict_strtoul(buf, 0, &mask))
+ return -EINVAL;
+
+ if (mask & ~ATI_REMOTE2_MAX_MODE_MASK)
+ return -EINVAL;
+
+ ar2->mode_mask = mask;
+
+ return count;
+}
+
+static DEVICE_ATTR(channel_mask, 0644, ati_remote2_show_channel_mask,
+ ati_remote2_store_channel_mask);
+
+static DEVICE_ATTR(mode_mask, 0644, ati_remote2_show_mode_mask,
+ ati_remote2_store_mode_mask);
+
+static struct attribute *ati_remote2_attrs[] = {
+ &dev_attr_channel_mask.attr,
+ &dev_attr_mode_mask.attr,
+ NULL,
+};
+
+static struct attribute_group ati_remote2_attr_group = {
+ .attrs = ati_remote2_attrs,
+};
+
static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
@@ -615,7 +778,10 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
if (r)
goto fail2;
- r = ati_remote2_setup(ar2);
+ ar2->channel_mask = channel_mask;
+ ar2->mode_mask = mode_mask;
+
+ r = ati_remote2_setup(ar2, ar2->channel_mask);
if (r)
goto fail2;
@@ -624,19 +790,24 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
strlcat(ar2->name, "ATI Remote Wonder II", sizeof(ar2->name));
- r = ati_remote2_input_init(ar2);
+ r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
if (r)
goto fail2;
+ r = ati_remote2_input_init(ar2);
+ if (r)
+ goto fail3;
+
usb_set_intfdata(interface, ar2);
interface->needs_remote_wakeup = 1;
return 0;
+ fail3:
+ sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
fail2:
ati_remote2_urb_cleanup(ar2);
-
usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
fail1:
kfree(ar2);
@@ -657,6 +828,8 @@ static void ati_remote2_disconnect(struct usb_interface *interface)
input_unregister_device(ar2->idev);
+ sysfs_remove_group(&ar2->udev->dev.kobj, &ati_remote2_attr_group);
+
ati_remote2_urb_cleanup(ar2);
usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
@@ -715,6 +888,78 @@ static int ati_remote2_resume(struct usb_interface *interface)
return r;
}
+static int ati_remote2_reset_resume(struct usb_interface *interface)
+{
+ struct ati_remote2 *ar2;
+ struct usb_host_interface *alt = interface->cur_altsetting;
+ int r = 0;
+
+ if (alt->desc.bInterfaceNumber)
+ return 0;
+
+ ar2 = usb_get_intfdata(interface);
+
+ dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__);
+
+ mutex_lock(&ati_remote2_mutex);
+
+ r = ati_remote2_setup(ar2, ar2->channel_mask);
+ if (r)
+ goto out;
+
+ if (ar2->flags & ATI_REMOTE2_OPENED)
+ r = ati_remote2_submit_urbs(ar2);
+
+ if (!r)
+ ar2->flags &= ~ATI_REMOTE2_SUSPENDED;
+
+ out:
+ mutex_unlock(&ati_remote2_mutex);
+
+ return r;
+}
+
+static int ati_remote2_pre_reset(struct usb_interface *interface)
+{
+ struct ati_remote2 *ar2;
+ struct usb_host_interface *alt = interface->cur_altsetting;
+
+ if (alt->desc.bInterfaceNumber)
+ return 0;
+
+ ar2 = usb_get_intfdata(interface);
+
+ dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__);
+
+ mutex_lock(&ati_remote2_mutex);
+
+ if (ar2->flags == ATI_REMOTE2_OPENED)
+ ati_remote2_kill_urbs(ar2);
+
+ return 0;
+}
+
+static int ati_remote2_post_reset(struct usb_interface *interface)
+{
+ struct ati_remote2 *ar2;
+ struct usb_host_interface *alt = interface->cur_altsetting;
+ int r = 0;
+
+ if (alt->desc.bInterfaceNumber)
+ return 0;
+
+ ar2 = usb_get_intfdata(interface);
+
+ dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__);
+
+ if (ar2->flags == ATI_REMOTE2_OPENED)
+ r = ati_remote2_submit_urbs(ar2);
+
+ mutex_unlock(&ati_remote2_mutex);
+
+ return r;
+}
+
static int __init ati_remote2_init(void)
{
int r;
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
new file mode 100644
index 000000000000..e2c7f622a0b5
--- /dev/null
+++ b/drivers/input/misc/rb532_button.c
@@ -0,0 +1,120 @@
+/*
+ * Support for the S1 button on Routerboard 532
+ *
+ * Copyright (C) 2009 Phil Sutter <n0-1@freewrt.org>
+ */
+
+#include <linux/input-polldev.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-rc32434/gpio.h>
+#include <asm/mach-rc32434/rb.h>
+
+#define DRV_NAME "rb532-button"
+
+#define RB532_BTN_RATE 100 /* msec */
+#define RB532_BTN_KSYM BTN_0
+
+/* The S1 button state is provided by GPIO pin 1. But as this
+ * pin is also used for uart input as alternate function, the
+ * operational modes must be switched first:
+ * 1) disable uart using set_latch_u5()
+ * 2) turn off alternate function implicitly through
+ * gpio_direction_input()
+ * 3) read the GPIO's current value
+ * 4) undo step 2 by enabling alternate function (in this
+ * mode the GPIO direction is fixed, so no change needed)
+ * 5) turn on uart again
+ * The GPIO value occurs to be inverted, so pin high means
+ * button is not pressed.
+ */
+static bool rb532_button_pressed(void)
+{
+ int val;
+
+ set_latch_u5(0, LO_FOFF);
+ gpio_direction_input(GPIO_BTN_S1);
+
+ val = gpio_get_value(GPIO_BTN_S1);
+
+ rb532_gpio_set_func(GPIO_BTN_S1);
+ set_latch_u5(LO_FOFF, 0);
+
+ return !val;
+}
+
+static void rb532_button_poll(struct input_polled_dev *poll_dev)
+{
+ input_report_key(poll_dev->input, RB532_BTN_KSYM,
+ rb532_button_pressed());
+ input_sync(poll_dev->input);
+}
+
+static int __devinit rb532_button_probe(struct platform_device *pdev)
+{
+ struct input_polled_dev *poll_dev;
+ int error;
+
+ poll_dev = input_allocate_polled_device();
+ if (!poll_dev)
+ return -ENOMEM;
+
+ poll_dev->poll = rb532_button_poll;
+ poll_dev->poll_interval = RB532_BTN_RATE;
+
+ poll_dev->input->name = "rb532 button";
+ poll_dev->input->phys = "rb532/button0";
+ poll_dev->input->id.bustype = BUS_HOST;
+ poll_dev->input->dev.parent = &pdev->dev;
+
+ dev_set_drvdata(&pdev->dev, poll_dev);
+
+ input_set_capability(poll_dev->input, EV_KEY, RB532_BTN_KSYM);
+
+ error = input_register_polled_device(poll_dev);
+ if (error) {
+ input_free_polled_device(poll_dev);
+ return error;
+ }
+
+ return 0;
+}
+
+static int __devexit rb532_button_remove(struct platform_device *pdev)
+{
+ struct input_polled_dev *poll_dev = dev_get_drvdata(&pdev->dev);
+
+ input_unregister_polled_device(poll_dev);
+ input_free_polled_device(poll_dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver rb532_button_driver = {
+ .probe = rb532_button_probe,
+ .remove = __devexit_p(rb532_button_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init rb532_button_init(void)
+{
+ return platform_driver_register(&rb532_button_driver);
+}
+
+static void __exit rb532_button_exit(void)
+{
+ platform_driver_unregister(&rb532_button_driver);
+}
+
+module_init(rb532_button_init);
+module_exit(rb532_button_exit);
+
+MODULE_AUTHOR("Phil Sutter <n0-1@freewrt.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Support for S1 button on Routerboard 532");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
new file mode 100644
index 000000000000..5bb3ab51b8c6
--- /dev/null
+++ b/drivers/input/misc/rotary_encoder.c
@@ -0,0 +1,221 @@
+/*
+ * rotary_encoder.c
+ *
+ * (c) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * state machine code inspired by code from Tim Ruetz
+ *
+ * A generic driver for rotary encoders connected to GPIO lines.
+ * See file:Documentation/input/rotary_encoder.txt for more information
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/rotary_encoder.h>
+
+#define DRV_NAME "rotary-encoder"
+
+struct rotary_encoder {
+ unsigned int irq_a;
+ unsigned int irq_b;
+ unsigned int pos;
+ unsigned int armed;
+ unsigned int dir;
+ struct input_dev *input;
+ struct rotary_encoder_platform_data *pdata;
+};
+
+static irqreturn_t rotary_encoder_irq(int irq, void *dev_id)
+{
+ struct rotary_encoder *encoder = dev_id;
+ struct rotary_encoder_platform_data *pdata = encoder->pdata;
+ int a = !!gpio_get_value(pdata->gpio_a);
+ int b = !!gpio_get_value(pdata->gpio_b);
+ int state;
+
+ a ^= pdata->inverted_a;
+ b ^= pdata->inverted_b;
+ state = (a << 1) | b;
+
+ switch (state) {
+
+ case 0x0:
+ if (!encoder->armed)
+ break;
+
+ if (encoder->dir) {
+ /* turning counter-clockwise */
+ encoder->pos += pdata->steps;
+ encoder->pos--;
+ encoder->pos %= pdata->steps;
+ } else {
+ /* turning clockwise */
+ encoder->pos++;
+ encoder->pos %= pdata->steps;
+ }
+
+ input_report_abs(encoder->input, pdata->axis, encoder->pos);
+ input_sync(encoder->input);
+
+ encoder->armed = 0;
+ break;
+
+ case 0x1:
+ case 0x2:
+ if (encoder->armed)
+ encoder->dir = state - 1;
+ break;
+
+ case 0x3:
+ encoder->armed = 1;
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit rotary_encoder_probe(struct platform_device *pdev)
+{
+ struct rotary_encoder_platform_data *pdata = pdev->dev.platform_data;
+ struct rotary_encoder *encoder;
+ struct input_dev *input;
+ int err;
+
+ if (!pdata || !pdata->steps) {
+ dev_err(&pdev->dev, "invalid platform data\n");
+ return -ENOENT;
+ }
+
+ encoder = kzalloc(sizeof(struct rotary_encoder), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!encoder || !input) {
+ dev_err(&pdev->dev, "failed to allocate memory for device\n");
+ err = -ENOMEM;
+ goto exit_free_mem;
+ }
+
+ encoder->input = input;
+ encoder->pdata = pdata;
+ encoder->irq_a = gpio_to_irq(pdata->gpio_a);
+ encoder->irq_b = gpio_to_irq(pdata->gpio_b);
+
+ /* create and register the input driver */
+ input->name = pdev->name;
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &pdev->dev;
+ input->evbit[0] = BIT_MASK(EV_ABS);
+ input_set_abs_params(encoder->input,
+ pdata->axis, 0, pdata->steps, 0, 1);
+
+ err = input_register_device(input);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ goto exit_free_mem;
+ }
+
+ /* request the GPIOs */
+ err = gpio_request(pdata->gpio_a, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "unable to request GPIO %d\n",
+ pdata->gpio_a);
+ goto exit_unregister_input;
+ }
+
+ err = gpio_request(pdata->gpio_b, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "unable to request GPIO %d\n",
+ pdata->gpio_b);
+ goto exit_free_gpio_a;
+ }
+
+ /* request the IRQs */
+ err = request_irq(encoder->irq_a, &rotary_encoder_irq,
+ IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
+ DRV_NAME, encoder);
+ if (err) {
+ dev_err(&pdev->dev, "unable to request IRQ %d\n",
+ encoder->irq_a);
+ goto exit_free_gpio_b;
+ }
+
+ err = request_irq(encoder->irq_b, &rotary_encoder_irq,
+ IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
+ DRV_NAME, encoder);
+ if (err) {
+ dev_err(&pdev->dev, "unable to request IRQ %d\n",
+ encoder->irq_b);
+ goto exit_free_irq_a;
+ }
+
+ platform_set_drvdata(pdev, encoder);
+
+ return 0;
+
+exit_free_irq_a:
+ free_irq(encoder->irq_a, encoder);
+exit_free_gpio_b:
+ gpio_free(pdata->gpio_b);
+exit_free_gpio_a:
+ gpio_free(pdata->gpio_a);
+exit_unregister_input:
+ input_unregister_device(input);
+ input = NULL; /* so we don't try to free it */
+exit_free_mem:
+ input_free_device(input);
+ kfree(encoder);
+ return err;
+}
+
+static int __devexit rotary_encoder_remove(struct platform_device *pdev)
+{
+ struct rotary_encoder *encoder = platform_get_drvdata(pdev);
+ struct rotary_encoder_platform_data *pdata = pdev->dev.platform_data;
+
+ free_irq(encoder->irq_a, encoder);
+ free_irq(encoder->irq_b, encoder);
+ gpio_free(pdata->gpio_a);
+ gpio_free(pdata->gpio_b);
+ input_unregister_device(encoder->input);
+ platform_set_drvdata(pdev, NULL);
+ kfree(encoder);
+
+ return 0;
+}
+
+static struct platform_driver rotary_encoder_driver = {
+ .probe = rotary_encoder_probe,
+ .remove = __devexit_p(rotary_encoder_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init rotary_encoder_init(void)
+{
+ return platform_driver_register(&rotary_encoder_driver);
+}
+
+static void __exit rotary_encoder_exit(void)
+{
+ platform_driver_unregister(&rotary_encoder_driver);
+}
+
+module_init(rotary_encoder_init);
+module_exit(rotary_encoder_exit);
+
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_DESCRIPTION("GPIO rotary encoder driver");
+MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 4f38e6f7dfdd..c66cc3d08c2f 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -292,4 +292,15 @@ config MOUSE_PXA930_TRKBALL
help
Say Y here to support PXA930 Trackball mouse.
+config MOUSE_MAPLE
+ tristate "Maple mouse (for the Dreamcast)"
+ depends on MAPLE
+ help
+ This driver supports the Maple mouse on the SEGA Dreamcast.
+
+ Most Dreamcast users, who have a mouse, will say Y here.
+
+ To compile this driver as a module choose M here: the module will be
+ called maplemouse.
+
endif
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 8c8a1f236e28..472189468d67 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -6,18 +6,19 @@
obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o
obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o
-obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o
obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o
-obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o
+obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o
+obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
+obj-$(CONFIG_MOUSE_HIL) += hil_ptr.o
obj-$(CONFIG_MOUSE_INPORT) += inport.o
obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o
+obj-$(CONFIG_MOUSE_MAPLE) += maplemouse.o
obj-$(CONFIG_MOUSE_PC110PAD) += pc110pad.o
obj-$(CONFIG_MOUSE_PS2) += psmouse.o
obj-$(CONFIG_MOUSE_PXA930_TRKBALL) += pxa930_trkball.o
+obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o
obj-$(CONFIG_MOUSE_SERIAL) += sermouse.o
-obj-$(CONFIG_MOUSE_HIL) += hil_ptr.o
obj-$(CONFIG_MOUSE_VSXXXAA) += vsxxxaa.o
-obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
psmouse-objs := psmouse-base.o synaptics.o
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 55cd0fa68339..a1ad2f1a7bb3 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -472,7 +472,7 @@ static enum hgpk_model_t hgpk_get_model(struct psmouse *psmouse)
return -EIO;
}
- hgpk_dbg(psmouse, "ID: %02x %02x %02x", param[0], param[1], param[2]);
+ hgpk_dbg(psmouse, "ID: %02x %02x %02x\n", param[0], param[1], param[2]);
/* HGPK signature: 0x67, 0x00, 0x<model> */
if (param[0] != 0x67 || param[1] != 0x00)
diff --git a/drivers/input/mouse/maplemouse.c b/drivers/input/mouse/maplemouse.c
new file mode 100644
index 000000000000..d196abfb68bc
--- /dev/null
+++ b/drivers/input/mouse/maplemouse.c
@@ -0,0 +1,147 @@
+/*
+ * SEGA Dreamcast mouse driver
+ * Based on drivers/usb/usbmouse.c
+ *
+ * Copyright Yaegashi Takeshi, 2001
+ * Adrian McMenamin, 2008
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/maple.h>
+
+MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
+MODULE_DESCRIPTION("SEGA Dreamcast mouse driver");
+MODULE_LICENSE("GPL");
+
+struct dc_mouse {
+ struct input_dev *dev;
+ struct maple_device *mdev;
+};
+
+static void dc_mouse_callback(struct mapleq *mq)
+{
+ int buttons, relx, rely, relz;
+ struct maple_device *mapledev = mq->dev;
+ struct dc_mouse *mse = maple_get_drvdata(mapledev);
+ struct input_dev *dev = mse->dev;
+ unsigned char *res = mq->recvbuf;
+
+ buttons = ~res[8];
+ relx = *(unsigned short *)(res + 12) - 512;
+ rely = *(unsigned short *)(res + 14) - 512;
+ relz = *(unsigned short *)(res + 16) - 512;
+
+ input_report_key(dev, BTN_LEFT, buttons & 4);
+ input_report_key(dev, BTN_MIDDLE, buttons & 9);
+ input_report_key(dev, BTN_RIGHT, buttons & 2);
+ input_report_rel(dev, REL_X, relx);
+ input_report_rel(dev, REL_Y, rely);
+ input_report_rel(dev, REL_WHEEL, relz);
+ input_sync(dev);
+}
+
+static int dc_mouse_open(struct input_dev *dev)
+{
+ struct dc_mouse *mse = dev->dev.platform_data;
+
+ maple_getcond_callback(mse->mdev, dc_mouse_callback, HZ/50,
+ MAPLE_FUNC_MOUSE);
+
+ return 0;
+}
+
+static void dc_mouse_close(struct input_dev *dev)
+{
+ struct dc_mouse *mse = dev->dev.platform_data;
+
+ maple_getcond_callback(mse->mdev, dc_mouse_callback, 0,
+ MAPLE_FUNC_MOUSE);
+}
+
+
+static int __devinit probe_maple_mouse(struct device *dev)
+{
+ struct maple_device *mdev = to_maple_dev(dev);
+ struct maple_driver *mdrv = to_maple_driver(dev->driver);
+ struct input_dev *input_dev;
+ struct dc_mouse *mse;
+ int error;
+
+ mse = kzalloc(sizeof(struct dc_mouse), GFP_KERNEL);
+ input_dev = input_allocate_device();
+
+ if (!mse || !input_dev) {
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ mse->dev = input_dev;
+ mse->mdev = mdev;
+
+ input_set_drvdata(input_dev, mse);
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
+ input_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) |
+ BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
+ input_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y) |
+ BIT_MASK(REL_WHEEL);
+ input_dev->name = mdev->product_name;
+ input_dev->id.bustype = BUS_HOST;
+ input_dev->open = dc_mouse_open;
+ input_dev->close = dc_mouse_close;
+
+ mdev->driver = mdrv;
+ maple_set_drvdata(mdev, mse);
+
+ error = input_register_device(input_dev);
+ if (error)
+ goto fail;
+
+ return 0;
+
+fail:
+ input_free_device(input_dev);
+ maple_set_drvdata(mdev, NULL);
+ kfree(mse);
+ mdev->driver = NULL;
+ return error;
+}
+
+static int __devexit remove_maple_mouse(struct device *dev)
+{
+ struct maple_device *mdev = to_maple_dev(dev);
+ struct dc_mouse *mse = maple_get_drvdata(mdev);
+
+ mdev->callback = NULL;
+ input_unregister_device(mse->dev);
+ maple_set_drvdata(mdev, NULL);
+ kfree(mse);
+
+ return 0;
+}
+
+static struct maple_driver dc_mouse_driver = {
+ .function = MAPLE_FUNC_MOUSE,
+ .drv = {
+ .name = "Dreamcast_mouse",
+ .probe = probe_maple_mouse,
+ .remove = __devexit_p(remove_maple_mouse),
+ },
+};
+
+static int __init dc_mouse_init(void)
+{
+ return maple_driver_register(&dc_mouse_driver);
+}
+
+static void __exit dc_mouse_exit(void)
+{
+ maple_driver_unregister(&dc_mouse_driver);
+}
+
+module_init(dc_mouse_init);
+module_exit(dc_mouse_exit);
diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c
index fd09c8df81f2..f63995f854ff 100644
--- a/drivers/input/mouse/pc110pad.c
+++ b/drivers/input/mouse/pc110pad.c
@@ -111,11 +111,8 @@ static int __init pc110pad_init(void)
struct pci_dev *dev;
int err;
- dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- if (dev) {
- pci_dev_put(dev);
+ if (!no_pci_devices())
return -ENODEV;
- }
if (!request_region(pc110pad_io, 4, "pc110pad")) {
printk(KERN_ERR "pc110pad: I/O area %#x-%#x in use.\n",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 6fa2deff7446..83ed2d56b924 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -151,6 +151,14 @@ static struct dmi_system_id __initdata i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "01"),
},
},
+ {
+ .ident = "HP DV9700",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
+ },
+ },
{ }
};
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index bb6486a8c070..b01fd61dadcc 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -29,6 +29,51 @@ config TOUCHSCREEN_ADS7846
To compile this driver as a module, choose M here: the
module will be called ads7846.
+config TOUCHSCREEN_AD7877
+ tristate "AD7877 based touchscreens"
+ depends on SPI_MASTER
+ help
+ Say Y here if you have a touchscreen interface using the
+ AD7877 controller, and your board-specific initialization
+ code includes that in its table of SPI devices.
+
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7877.
+
+config TOUCHSCREEN_AD7879_I2C
+ tristate "AD7879 based touchscreens: AD7879-1 I2C Interface"
+ depends on I2C
+ select TOUCHSCREEN_AD7879
+ help
+ Say Y here if you have a touchscreen interface using the
+ AD7879-1 controller, and your board-specific initialization
+ code includes that in its table of I2C devices.
+
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7879.
+
+config TOUCHSCREEN_AD7879_SPI
+ tristate "AD7879 based touchscreens: AD7879 SPI Interface"
+ depends on SPI_MASTER && TOUCHSCREEN_AD7879_I2C = n
+ select TOUCHSCREEN_AD7879
+ help
+ Say Y here if you have a touchscreen interface using the
+ AD7879 controller, and your board-specific initialization
+ code includes that in its table of SPI devices.
+
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7879.
+
+config TOUCHSCREEN_AD7879
+ tristate
+ default n
+
config TOUCHSCREEN_BITSY
tristate "Compaq iPAQ H3600 (Bitsy) touchscreen"
depends on SA1100_BITSY
@@ -308,6 +353,19 @@ config TOUCHSCREEN_WM97XX_MAINSTONE
To compile this driver as a module, choose M here: the
module will be called mainstone-wm97xx.
+config TOUCHSCREEN_WM97XX_ZYLONITE
+ tristate "Zylonite accelerated touch"
+ depends on TOUCHSCREEN_WM97XX && MACH_ZYLONITE
+ select TOUCHSCREEN_WM9713
+ help
+ Say Y here for support for streaming mode with the touchscreen
+ on Zylonite systems.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called zylonite-wm97xx.
+
config TOUCHSCREEN_USB_COMPOSITE
tristate "USB Touchscreen Driver"
depends on USB_ARCH_HAS_HCD
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index d3375aff46fe..6700f7b9d165 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -6,6 +6,8 @@
wm97xx-ts-y := wm97xx-core.o
+obj-$(CONFIG_TOUCHSCREEN_AD7877) += ad7877.o
+obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
@@ -34,3 +36,4 @@ wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9705) += wm9705.o
wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9712) += wm9712.o
wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9713) += wm9713.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o
+obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
new file mode 100644
index 000000000000..e4728a28f492
--- /dev/null
+++ b/drivers/input/touchscreen/ad7877.c
@@ -0,0 +1,844 @@
+/*
+ * Copyright (C) 2006-2008 Michael Hennerich, Analog Devices Inc.
+ *
+ * Description: AD7877 based touchscreen, sensor (ADCs), DAC and GPIO driver
+ * Based on: ads7846.c
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * History:
+ * Copyright (c) 2005 David Brownell
+ * Copyright (c) 2006 Nokia Corporation
+ * Various changes: Imre Deak <imre.deak@nokia.com>
+ *
+ * Using code from:
+ * - corgi_ts.c
+ * Copyright (C) 2004-2005 Richard Purdie
+ * - omap_ts.[hc], ads7846.h, ts_osk.c
+ * Copyright (C) 2002 MontaVista Software
+ * Copyright (C) 2004 Texas Instruments
+ * Copyright (C) 2005 Dirk Behme
+ */
+
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/ad7877.h>
+#include <asm/irq.h>
+
+#define TS_PEN_UP_TIMEOUT msecs_to_jiffies(50)
+
+#define MAX_SPI_FREQ_HZ 20000000
+#define MAX_12BIT ((1<<12)-1)
+
+#define AD7877_REG_ZEROS 0
+#define AD7877_REG_CTRL1 1
+#define AD7877_REG_CTRL2 2
+#define AD7877_REG_ALERT 3
+#define AD7877_REG_AUX1HIGH 4
+#define AD7877_REG_AUX1LOW 5
+#define AD7877_REG_BAT1HIGH 6
+#define AD7877_REG_BAT1LOW 7
+#define AD7877_REG_BAT2HIGH 8
+#define AD7877_REG_BAT2LOW 9
+#define AD7877_REG_TEMP1HIGH 10
+#define AD7877_REG_TEMP1LOW 11
+#define AD7877_REG_SEQ0 12
+#define AD7877_REG_SEQ1 13
+#define AD7877_REG_DAC 14
+#define AD7877_REG_NONE1 15
+#define AD7877_REG_EXTWRITE 15
+#define AD7877_REG_XPLUS 16
+#define AD7877_REG_YPLUS 17
+#define AD7877_REG_Z2 18
+#define AD7877_REG_aux1 19
+#define AD7877_REG_aux2 20
+#define AD7877_REG_aux3 21
+#define AD7877_REG_bat1 22
+#define AD7877_REG_bat2 23
+#define AD7877_REG_temp1 24
+#define AD7877_REG_temp2 25
+#define AD7877_REG_Z1 26
+#define AD7877_REG_GPIOCTRL1 27
+#define AD7877_REG_GPIOCTRL2 28
+#define AD7877_REG_GPIODATA 29
+#define AD7877_REG_NONE2 30
+#define AD7877_REG_NONE3 31
+
+#define AD7877_SEQ_YPLUS_BIT (1<<11)
+#define AD7877_SEQ_XPLUS_BIT (1<<10)
+#define AD7877_SEQ_Z2_BIT (1<<9)
+#define AD7877_SEQ_AUX1_BIT (1<<8)
+#define AD7877_SEQ_AUX2_BIT (1<<7)
+#define AD7877_SEQ_AUX3_BIT (1<<6)
+#define AD7877_SEQ_BAT1_BIT (1<<5)
+#define AD7877_SEQ_BAT2_BIT (1<<4)
+#define AD7877_SEQ_TEMP1_BIT (1<<3)
+#define AD7877_SEQ_TEMP2_BIT (1<<2)
+#define AD7877_SEQ_Z1_BIT (1<<1)
+
+enum {
+ AD7877_SEQ_YPOS = 0,
+ AD7877_SEQ_XPOS = 1,
+ AD7877_SEQ_Z2 = 2,
+ AD7877_SEQ_AUX1 = 3,
+ AD7877_SEQ_AUX2 = 4,
+ AD7877_SEQ_AUX3 = 5,
+ AD7877_SEQ_BAT1 = 6,
+ AD7877_SEQ_BAT2 = 7,
+ AD7877_SEQ_TEMP1 = 8,
+ AD7877_SEQ_TEMP2 = 9,
+ AD7877_SEQ_Z1 = 10,
+ AD7877_NR_SENSE = 11,
+};
+
+/* DAC Register Default RANGE 0 to Vcc, Volatge Mode, DAC On */
+#define AD7877_DAC_CONF 0x1
+
+/* If gpio3 is set AUX3/GPIO3 acts as GPIO Output */
+#define AD7877_EXTW_GPIO_3_CONF 0x1C4
+#define AD7877_EXTW_GPIO_DATA 0x200
+
+/* Control REG 2 */
+#define AD7877_TMR(x) ((x & 0x3) << 0)
+#define AD7877_REF(x) ((x & 0x1) << 2)
+#define AD7877_POL(x) ((x & 0x1) << 3)
+#define AD7877_FCD(x) ((x & 0x3) << 4)
+#define AD7877_PM(x) ((x & 0x3) << 6)
+#define AD7877_ACQ(x) ((x & 0x3) << 8)
+#define AD7877_AVG(x) ((x & 0x3) << 10)
+
+/* Control REG 1 */
+#define AD7877_SER (1 << 11) /* non-differential */
+#define AD7877_DFR (0 << 11) /* differential */
+
+#define AD7877_MODE_NOC (0) /* Do not convert */
+#define AD7877_MODE_SCC (1) /* Single channel conversion */
+#define AD7877_MODE_SEQ0 (2) /* Sequence 0 in Slave Mode */
+#define AD7877_MODE_SEQ1 (3) /* Sequence 1 in Master Mode */
+
+#define AD7877_CHANADD(x) ((x&0xF)<<7)
+#define AD7877_READADD(x) ((x)<<2)
+#define AD7877_WRITEADD(x) ((x)<<12)
+
+#define AD7877_READ_CHAN(x) (AD7877_WRITEADD(AD7877_REG_CTRL1) | AD7877_SER | \
+ AD7877_MODE_SCC | AD7877_CHANADD(AD7877_REG_ ## x) | \
+ AD7877_READADD(AD7877_REG_ ## x))
+
+#define AD7877_MM_SEQUENCE (AD7877_SEQ_YPLUS_BIT | AD7877_SEQ_XPLUS_BIT | \
+ AD7877_SEQ_Z2_BIT | AD7877_SEQ_Z1_BIT)
+
+/*
+ * Non-touchscreen sensors only use single-ended conversions.
+ */
+
+struct ser_req {
+ u16 reset;
+ u16 ref_on;
+ u16 command;
+ u16 sample;
+ struct spi_message msg;
+ struct spi_transfer xfer[6];
+};
+
+struct ad7877 {
+ struct input_dev *input;
+ char phys[32];
+
+ struct spi_device *spi;
+ u16 model;
+ u16 vref_delay_usecs;
+ u16 x_plate_ohms;
+ u16 pressure_max;
+
+ u16 cmd_crtl1;
+ u16 cmd_crtl2;
+ u16 cmd_dummy;
+ u16 dac;
+
+ u8 stopacq_polarity;
+ u8 first_conversion_delay;
+ u8 acquisition_time;
+ u8 averaging;
+ u8 pen_down_acc_interval;
+
+ u16 conversion_data[AD7877_NR_SENSE];
+
+ struct spi_transfer xfer[AD7877_NR_SENSE + 2];
+ struct spi_message msg;
+
+ struct mutex mutex;
+ unsigned disabled:1; /* P: mutex */
+ unsigned gpio3:1; /* P: mutex */
+ unsigned gpio4:1; /* P: mutex */
+
+ spinlock_t lock;
+ struct timer_list timer; /* P: lock */
+ unsigned pending:1; /* P: lock */
+};
+
+static int gpio3;
+module_param(gpio3, int, 0);
+MODULE_PARM_DESC(gpio3, "If gpio3 is set to 1 AUX3 acts as GPIO3");
+
+/*
+ * ad7877_read/write are only used for initial setup and for sysfs controls.
+ * The main traffic is done using spi_async() in the interrupt handler.
+ */
+
+static int ad7877_read(struct spi_device *spi, u16 reg)
+{
+ struct ser_req *req;
+ int status, ret;
+
+ req = kzalloc(sizeof *req, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ spi_message_init(&req->msg);
+
+ req->command = (u16) (AD7877_WRITEADD(AD7877_REG_CTRL1) |
+ AD7877_READADD(reg));
+ req->xfer[0].tx_buf = &req->command;
+ req->xfer[0].len = 2;
+
+ req->xfer[1].rx_buf = &req->sample;
+ req->xfer[1].len = 2;
+
+ spi_message_add_tail(&req->xfer[0], &req->msg);
+ spi_message_add_tail(&req->xfer[1], &req->msg);
+
+ status = spi_sync(spi, &req->msg);
+ ret = status ? : req->sample;
+
+ kfree(req);
+
+ return ret;
+}
+
+static int ad7877_write(struct spi_device *spi, u16 reg, u16 val)
+{
+ struct ser_req *req;
+ int status;
+
+ req = kzalloc(sizeof *req, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ spi_message_init(&req->msg);
+
+ req->command = (u16) (AD7877_WRITEADD(reg) | (val & MAX_12BIT));
+ req->xfer[0].tx_buf = &req->command;
+ req->xfer[0].len = 2;
+
+ spi_message_add_tail(&req->xfer[0], &req->msg);
+
+ status = spi_sync(spi, &req->msg);
+
+ kfree(req);
+
+ return status;
+}
+
+static int ad7877_read_adc(struct spi_device *spi, unsigned command)
+{
+ struct ad7877 *ts = dev_get_drvdata(&spi->dev);
+ struct ser_req *req;
+ int status;
+ int sample;
+ int i;
+
+ req = kzalloc(sizeof *req, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ spi_message_init(&req->msg);
+
+ /* activate reference, so it has time to settle; */
+ req->ref_on = AD7877_WRITEADD(AD7877_REG_CTRL2) |
+ AD7877_POL(ts->stopacq_polarity) |
+ AD7877_AVG(0) | AD7877_PM(2) | AD7877_TMR(0) |
+ AD7877_ACQ(ts->acquisition_time) | AD7877_FCD(0);
+
+ req->reset = AD7877_WRITEADD(AD7877_REG_CTRL1) | AD7877_MODE_NOC;
+
+ req->command = (u16) command;
+
+ req->xfer[0].tx_buf = &req->reset;
+ req->xfer[0].len = 2;
+
+ req->xfer[1].tx_buf = &req->ref_on;
+ req->xfer[1].len = 2;
+ req->xfer[1].delay_usecs = ts->vref_delay_usecs;
+
+ req->xfer[2].tx_buf = &req->command;
+ req->xfer[2].len = 2;
+ req->xfer[2].delay_usecs = ts->vref_delay_usecs;
+
+ req->xfer[3].rx_buf = &req->sample;
+ req->xfer[3].len = 2;
+
+ req->xfer[4].tx_buf = &ts->cmd_crtl2; /*REF OFF*/
+ req->xfer[4].len = 2;
+
+ req->xfer[5].tx_buf = &ts->cmd_crtl1; /*DEFAULT*/
+ req->xfer[5].len = 2;
+
+ /* group all the transfers together, so we can't interfere with
+ * reading touchscreen state; disable penirq while sampling
+ */
+ for (i = 0; i < 6; i++)
+ spi_message_add_tail(&req->xfer[i], &req->msg);
+
+ status = spi_sync(spi, &req->msg);
+ sample = req->sample;
+
+ kfree(req);
+
+ return status ? : sample;
+}
+
+static void ad7877_rx(struct ad7877 *ts)
+{
+ struct input_dev *input_dev = ts->input;
+ unsigned Rt;
+ u16 x, y, z1, z2;
+
+ x = ts->conversion_data[AD7877_SEQ_XPOS] & MAX_12BIT;
+ y = ts->conversion_data[AD7877_SEQ_YPOS] & MAX_12BIT;
+ z1 = ts->conversion_data[AD7877_SEQ_Z1] & MAX_12BIT;
+ z2 = ts->conversion_data[AD7877_SEQ_Z2] & MAX_12BIT;
+
+ /*
+ * The samples processed here are already preprocessed by the AD7877.
+ * The preprocessing function consists of an averaging filter.
+ * The combination of 'first conversion delay' and averaging provides a robust solution,
+ * discarding the spurious noise in the signal and keeping only the data of interest.
+ * The size of the averaging filter is programmable. (dev.platform_data, see linux/spi/ad7877.h)
+ * Other user-programmable conversion controls include variable acquisition time,
+ * and first conversion delay. Up to 16 averages can be taken per conversion.
+ */
+
+ if (likely(x && z1)) {
+ /* compute touch pressure resistance using equation #1 */
+ Rt = (z2 - z1) * x * ts->x_plate_ohms;
+ Rt /= z1;
+ Rt = (Rt + 2047) >> 12;
+
+ input_report_abs(input_dev, ABS_X, x);
+ input_report_abs(input_dev, ABS_Y, y);
+ input_report_abs(input_dev, ABS_PRESSURE, Rt);
+ input_sync(input_dev);
+ }
+}
+
+static inline void ad7877_ts_event_release(struct ad7877 *ts)
+{
+ struct input_dev *input_dev = ts->input;
+
+ input_report_abs(input_dev, ABS_PRESSURE, 0);
+ input_sync(input_dev);
+}
+
+static void ad7877_timer(unsigned long handle)
+{
+ struct ad7877 *ts = (void *)handle;
+
+ ad7877_ts_event_release(ts);
+}
+
+static irqreturn_t ad7877_irq(int irq, void *handle)
+{
+ struct ad7877 *ts = handle;
+ unsigned long flags;
+ int status;
+
+ /*
+ * The repeated conversion sequencer controlled by TMR kicked off
+ * too fast. We ignore the last and process the sample sequence
+ * currently in the queue. It can't be older than 9.4ms, and we
+ * need to avoid that ts->msg doesn't get issued twice while in work.
+ */
+
+ spin_lock_irqsave(&ts->lock, flags);
+ if (!ts->pending) {
+ ts->pending = 1;
+
+ status = spi_async(ts->spi, &ts->msg);
+ if (status)
+ dev_err(&ts->spi->dev, "spi_sync --> %d\n", status);
+ }
+ spin_unlock_irqrestore(&ts->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void ad7877_callback(void *_ts)
+{
+ struct ad7877 *ts = _ts;
+
+ spin_lock_irq(&ts->lock);
+
+ ad7877_rx(ts);
+ ts->pending = 0;
+ mod_timer(&ts->timer, jiffies + TS_PEN_UP_TIMEOUT);
+
+ spin_unlock_irq(&ts->lock);
+}
+
+static void ad7877_disable(struct ad7877 *ts)
+{
+ mutex_lock(&ts->mutex);
+
+ if (!ts->disabled) {
+ ts->disabled = 1;
+ disable_irq(ts->spi->irq);
+
+ /* Wait for spi_async callback */
+ while (ts->pending)
+ msleep(1);
+
+ if (del_timer_sync(&ts->timer))
+ ad7877_ts_event_release(ts);
+ }
+
+ /* we know the chip's in lowpower mode since we always
+ * leave it that way after every request
+ */
+
+ mutex_unlock(&ts->mutex);
+}
+
+static void ad7877_enable(struct ad7877 *ts)
+{
+ mutex_lock(&ts->mutex);
+
+ if (ts->disabled) {
+ ts->disabled = 0;
+ enable_irq(ts->spi->irq);
+ }
+
+ mutex_unlock(&ts->mutex);
+}
+
+#define SHOW(name) static ssize_t \
+name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct ad7877 *ts = dev_get_drvdata(dev); \
+ ssize_t v = ad7877_read_adc(ts->spi, \
+ AD7877_READ_CHAN(name)); \
+ if (v < 0) \
+ return v; \
+ return sprintf(buf, "%u\n", (unsigned) v); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, name ## _show, NULL);
+
+SHOW(aux1)
+SHOW(aux2)
+SHOW(aux3)
+SHOW(bat1)
+SHOW(bat2)
+SHOW(temp1)
+SHOW(temp2)
+
+static ssize_t ad7877_disable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ad7877 *ts = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", ts->disabled);
+}
+
+static ssize_t ad7877_disable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ad7877 *ts = dev_get_drvdata(dev);
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buf, 10, &val);
+ if (error)
+ return error;
+
+ if (val)
+ ad7877_disable(ts);
+ else
+ ad7877_enable(ts);
+
+ return count;
+}
+
+static DEVICE_ATTR(disable, 0664, ad7877_disable_show, ad7877_disable_store);
+
+static ssize_t ad7877_dac_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ad7877 *ts = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", ts->dac);
+}
+
+static ssize_t ad7877_dac_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ad7877 *ts = dev_get_drvdata(dev);
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buf, 10, &val);
+ if (error)
+ return error;
+
+ mutex_lock(&ts->mutex);
+ ts->dac = val & 0xFF;
+ ad7877_write(ts->spi, AD7877_REG_DAC, (ts->dac << 4) | AD7877_DAC_CONF);
+ mutex_unlock(&ts->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(dac, 0664, ad7877_dac_show, ad7877_dac_store);
+
+static ssize_t ad7877_gpio3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ad7877 *ts = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", ts->gpio3);
+}
+
+static ssize_t ad7877_gpio3_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ad7877 *ts = dev_get_drvdata(dev);
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buf, 10, &val);
+ if (error)
+ return error;
+
+ mutex_lock(&ts->mutex);
+ ts->gpio3 = !!val;
+ ad7877_write(ts->spi, AD7877_REG_EXTWRITE, AD7877_EXTW_GPIO_DATA |
+ (ts->gpio4 << 4) | (ts->gpio3 << 5));
+ mutex_unlock(&ts->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(gpio3, 0664, ad7877_gpio3_show, ad7877_gpio3_store);
+
+static ssize_t ad7877_gpio4_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ad7877 *ts = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", ts->gpio4);
+}
+
+static ssize_t ad7877_gpio4_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ad7877 *ts = dev_get_drvdata(dev);
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buf, 10, &val);
+ if (error)
+ return error;
+
+ mutex_lock(&ts->mutex);
+ ts->gpio4 = !!val;
+ ad7877_write(ts->spi, AD7877_REG_EXTWRITE, AD7877_EXTW_GPIO_DATA |
+ (ts->gpio4 << 4) | (ts->gpio3 << 5));
+ mutex_unlock(&ts->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(gpio4, 0664, ad7877_gpio4_show, ad7877_gpio4_store);
+
+static struct attribute *ad7877_attributes[] = {
+ &dev_attr_temp1.attr,
+ &dev_attr_temp2.attr,
+ &dev_attr_aux1.attr,
+ &dev_attr_aux2.attr,
+ &dev_attr_bat1.attr,
+ &dev_attr_bat2.attr,
+ &dev_attr_disable.attr,
+ &dev_attr_dac.attr,
+ &dev_attr_gpio4.attr,
+ NULL
+};
+
+static const struct attribute_group ad7877_attr_group = {
+ .attrs = ad7877_attributes,
+};
+
+static void ad7877_setup_ts_def_msg(struct spi_device *spi, struct ad7877 *ts)
+{
+ struct spi_message *m;
+ int i;
+
+ ts->cmd_crtl2 = AD7877_WRITEADD(AD7877_REG_CTRL2) |
+ AD7877_POL(ts->stopacq_polarity) |
+ AD7877_AVG(ts->averaging) | AD7877_PM(1) |
+ AD7877_TMR(ts->pen_down_acc_interval) |
+ AD7877_ACQ(ts->acquisition_time) |
+ AD7877_FCD(ts->first_conversion_delay);
+
+ ad7877_write(spi, AD7877_REG_CTRL2, ts->cmd_crtl2);
+
+ ts->cmd_crtl1 = AD7877_WRITEADD(AD7877_REG_CTRL1) |
+ AD7877_READADD(AD7877_REG_XPLUS-1) |
+ AD7877_MODE_SEQ1 | AD7877_DFR;
+
+ ad7877_write(spi, AD7877_REG_CTRL1, ts->cmd_crtl1);
+
+ ts->cmd_dummy = 0;
+
+ m = &ts->msg;
+
+ spi_message_init(m);
+
+ m->complete = ad7877_callback;
+ m->context = ts;
+
+ ts->xfer[0].tx_buf = &ts->cmd_crtl1;
+ ts->xfer[0].len = 2;
+
+ spi_message_add_tail(&ts->xfer[0], m);
+
+ ts->xfer[1].tx_buf = &ts->cmd_dummy; /* Send ZERO */
+ ts->xfer[1].len = 2;
+
+ spi_message_add_tail(&ts->xfer[1], m);
+
+ for (i = 0; i < 11; i++) {
+ ts->xfer[i + 2].rx_buf = &ts->conversion_data[AD7877_SEQ_YPOS + i];
+ ts->xfer[i + 2].len = 2;
+ spi_message_add_tail(&ts->xfer[i + 2], m);
+ }
+}
+
+static int __devinit ad7877_probe(struct spi_device *spi)
+{
+ struct ad7877 *ts;
+ struct input_dev *input_dev;
+ struct ad7877_platform_data *pdata = spi->dev.platform_data;
+ int err;
+ u16 verify;
+
+ if (!spi->irq) {
+ dev_dbg(&spi->dev, "no IRQ?\n");
+ return -ENODEV;
+ }
+
+ if (!pdata) {
+ dev_dbg(&spi->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ /* don't exceed max specified SPI CLK frequency */
+ if (spi->max_speed_hz > MAX_SPI_FREQ_HZ) {
+ dev_dbg(&spi->dev, "SPI CLK %d Hz?\n",spi->max_speed_hz);
+ return -EINVAL;
+ }
+
+ ts = kzalloc(sizeof(struct ad7877), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!ts || !input_dev) {
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ dev_set_drvdata(&spi->dev, ts);
+ ts->spi = spi;
+ ts->input = input_dev;
+
+ setup_timer(&ts->timer, ad7877_timer, (unsigned long) ts);
+ mutex_init(&ts->mutex);
+ spin_lock_init(&ts->lock);
+
+ ts->model = pdata->model ? : 7877;
+ ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100;
+ ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
+ ts->pressure_max = pdata->pressure_max ? : ~0;
+
+ ts->stopacq_polarity = pdata->stopacq_polarity;
+ ts->first_conversion_delay = pdata->first_conversion_delay;
+ ts->acquisition_time = pdata->acquisition_time;
+ ts->averaging = pdata->averaging;
+ ts->pen_down_acc_interval = pdata->pen_down_acc_interval;
+
+ snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&spi->dev));
+
+ input_dev->name = "AD7877 Touchscreen";
+ input_dev->phys = ts->phys;
+ input_dev->dev.parent = &spi->dev;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(ABS_X, input_dev->absbit);
+ __set_bit(ABS_Y, input_dev->absbit);
+ __set_bit(ABS_PRESSURE, input_dev->absbit);
+
+ input_set_abs_params(input_dev, ABS_X,
+ pdata->x_min ? : 0,
+ pdata->x_max ? : MAX_12BIT,
+ 0, 0);
+ input_set_abs_params(input_dev, ABS_Y,
+ pdata->y_min ? : 0,
+ pdata->y_max ? : MAX_12BIT,
+ 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ pdata->pressure_min, pdata->pressure_max, 0, 0);
+
+ ad7877_write(spi, AD7877_REG_SEQ1, AD7877_MM_SEQUENCE);
+
+ verify = ad7877_read(spi, AD7877_REG_SEQ1);
+
+ if (verify != AD7877_MM_SEQUENCE){
+ dev_err(&spi->dev, "%s: Failed to probe %s\n",
+ dev_name(&spi->dev), input_dev->name);
+ err = -ENODEV;
+ goto err_free_mem;
+ }
+
+ if (gpio3)
+ ad7877_write(spi, AD7877_REG_EXTWRITE, AD7877_EXTW_GPIO_3_CONF);
+
+ ad7877_setup_ts_def_msg(spi, ts);
+
+ /* Request AD7877 /DAV GPIO interrupt */
+
+ err = request_irq(spi->irq, ad7877_irq, IRQF_TRIGGER_FALLING |
+ IRQF_SAMPLE_RANDOM, spi->dev.driver->name, ts);
+ if (err) {
+ dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq);
+ goto err_free_mem;
+ }
+
+ err = sysfs_create_group(&spi->dev.kobj, &ad7877_attr_group);
+ if (err)
+ goto err_free_irq;
+
+ err = device_create_file(&spi->dev,
+ gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3);
+ if (err)
+ goto err_remove_attr_group;
+
+ err = input_register_device(input_dev);
+ if (err)
+ goto err_remove_attr;
+
+ return 0;
+
+err_remove_attr:
+ device_remove_file(&spi->dev,
+ gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3);
+err_remove_attr_group:
+ sysfs_remove_group(&spi->dev.kobj, &ad7877_attr_group);
+err_free_irq:
+ free_irq(spi->irq, ts);
+err_free_mem:
+ input_free_device(input_dev);
+ kfree(ts);
+ dev_set_drvdata(&spi->dev, NULL);
+ return err;
+}
+
+static int __devexit ad7877_remove(struct spi_device *spi)
+{
+ struct ad7877 *ts = dev_get_drvdata(&spi->dev);
+
+ sysfs_remove_group(&spi->dev.kobj, &ad7877_attr_group);
+ device_remove_file(&spi->dev,
+ gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3);
+
+ ad7877_disable(ts);
+ free_irq(ts->spi->irq, ts);
+
+ input_unregister_device(ts->input);
+ kfree(ts);
+
+ dev_dbg(&spi->dev, "unregistered touchscreen\n");
+ dev_set_drvdata(&spi->dev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ad7877_suspend(struct spi_device *spi, pm_message_t message)
+{
+ struct ad7877 *ts = dev_get_drvdata(&spi->dev);
+
+ ad7877_disable(ts);
+
+ return 0;
+}
+
+static int ad7877_resume(struct spi_device *spi)
+{
+ struct ad7877 *ts = dev_get_drvdata(&spi->dev);
+
+ ad7877_enable(ts);
+
+ return 0;
+}
+#else
+#define ad7877_suspend NULL
+#define ad7877_resume NULL
+#endif
+
+static struct spi_driver ad7877_driver = {
+ .driver = {
+ .name = "ad7877",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7877_probe,
+ .remove = __devexit_p(ad7877_remove),
+ .suspend = ad7877_suspend,
+ .resume = ad7877_resume,
+};
+
+static int __init ad7877_init(void)
+{
+ return spi_register_driver(&ad7877_driver);
+}
+module_init(ad7877_init);
+
+static void __exit ad7877_exit(void)
+{
+ spi_unregister_driver(&ad7877_driver);
+}
+module_exit(ad7877_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("AD7877 touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
new file mode 100644
index 000000000000..ea4c61d68683
--- /dev/null
+++ b/drivers/input/touchscreen/ad7879.c
@@ -0,0 +1,782 @@
+/*
+ * Copyright (C) 2008 Michael Hennerich, Analog Devices Inc.
+ *
+ * Description: AD7879 based touchscreen, and GPIO driver (I2C/SPI Interface)
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * History:
+ * Copyright (c) 2005 David Brownell
+ * Copyright (c) 2006 Nokia Corporation
+ * Various changes: Imre Deak <imre.deak@nokia.com>
+ *
+ * Using code from:
+ * - corgi_ts.c
+ * Copyright (C) 2004-2005 Richard Purdie
+ * - omap_ts.[hc], ads7846.h, ts_osk.c
+ * Copyright (C) 2002 MontaVista Software
+ * Copyright (C) 2004 Texas Instruments
+ * Copyright (C) 2005 Dirk Behme
+ * - ad7877.c
+ * Copyright (C) 2006-2008 Analog Devices Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+
+#include <linux/spi/ad7879.h>
+
+#define AD7879_REG_ZEROS 0
+#define AD7879_REG_CTRL1 1
+#define AD7879_REG_CTRL2 2
+#define AD7879_REG_CTRL3 3
+#define AD7879_REG_AUX1HIGH 4
+#define AD7879_REG_AUX1LOW 5
+#define AD7879_REG_TEMP1HIGH 6
+#define AD7879_REG_TEMP1LOW 7
+#define AD7879_REG_XPLUS 8
+#define AD7879_REG_YPLUS 9
+#define AD7879_REG_Z1 10
+#define AD7879_REG_Z2 11
+#define AD7879_REG_AUXVBAT 12
+#define AD7879_REG_TEMP 13
+#define AD7879_REG_REVID 14
+
+/* Control REG 1 */
+#define AD7879_TMR(x) ((x & 0xFF) << 0)
+#define AD7879_ACQ(x) ((x & 0x3) << 8)
+#define AD7879_MODE_NOC (0 << 10) /* Do not convert */
+#define AD7879_MODE_SCC (1 << 10) /* Single channel conversion */
+#define AD7879_MODE_SEQ0 (2 << 10) /* Sequence 0 in Slave Mode */
+#define AD7879_MODE_SEQ1 (3 << 10) /* Sequence 1 in Master Mode */
+#define AD7879_MODE_INT (1 << 15) /* PENIRQ disabled INT enabled */
+
+/* Control REG 2 */
+#define AD7879_FCD(x) ((x & 0x3) << 0)
+#define AD7879_RESET (1 << 4)
+#define AD7879_MFS(x) ((x & 0x3) << 5)
+#define AD7879_AVG(x) ((x & 0x3) << 7)
+#define AD7879_SER (1 << 9) /* non-differential */
+#define AD7879_DFR (0 << 9) /* differential */
+#define AD7879_GPIOPOL (1 << 10)
+#define AD7879_GPIODIR (1 << 11)
+#define AD7879_GPIO_DATA (1 << 12)
+#define AD7879_GPIO_EN (1 << 13)
+#define AD7879_PM(x) ((x & 0x3) << 14)
+#define AD7879_PM_SHUTDOWN (0)
+#define AD7879_PM_DYN (1)
+#define AD7879_PM_FULLON (2)
+
+/* Control REG 3 */
+#define AD7879_TEMPMASK_BIT (1<<15)
+#define AD7879_AUXVBATMASK_BIT (1<<14)
+#define AD7879_INTMODE_BIT (1<<13)
+#define AD7879_GPIOALERTMASK_BIT (1<<12)
+#define AD7879_AUXLOW_BIT (1<<11)
+#define AD7879_AUXHIGH_BIT (1<<10)
+#define AD7879_TEMPLOW_BIT (1<<9)
+#define AD7879_TEMPHIGH_BIT (1<<8)
+#define AD7879_YPLUS_BIT (1<<7)
+#define AD7879_XPLUS_BIT (1<<6)
+#define AD7879_Z1_BIT (1<<5)
+#define AD7879_Z2_BIT (1<<4)
+#define AD7879_AUX_BIT (1<<3)
+#define AD7879_VBAT_BIT (1<<2)
+#define AD7879_TEMP_BIT (1<<1)
+
+enum {
+ AD7879_SEQ_XPOS = 0,
+ AD7879_SEQ_YPOS = 1,
+ AD7879_SEQ_Z1 = 2,
+ AD7879_SEQ_Z2 = 3,
+ AD7879_NR_SENSE = 4,
+};
+
+#define MAX_12BIT ((1<<12)-1)
+#define TS_PEN_UP_TIMEOUT msecs_to_jiffies(50)
+
+#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
+#define AD7879_DEVID 0x7A
+typedef struct spi_device bus_device;
+#elif defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE)
+#define AD7879_DEVID 0x79
+typedef struct i2c_client bus_device;
+#endif
+
+struct ad7879 {
+ bus_device *bus;
+ struct input_dev *input;
+ struct work_struct work;
+ struct timer_list timer;
+
+ struct mutex mutex;
+ unsigned disabled:1; /* P: mutex */
+
+#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
+ struct spi_message msg;
+ struct spi_transfer xfer[AD7879_NR_SENSE + 1];
+ u16 cmd;
+#endif
+ u16 conversion_data[AD7879_NR_SENSE];
+ char phys[32];
+ u8 first_conversion_delay;
+ u8 acquisition_time;
+ u8 averaging;
+ u8 pen_down_acc_interval;
+ u8 median;
+ u16 x_plate_ohms;
+ u16 pressure_max;
+ u16 gpio_init;
+ u16 cmd_crtl1;
+ u16 cmd_crtl2;
+ u16 cmd_crtl3;
+ unsigned gpio:1;
+};
+
+static int ad7879_read(bus_device *, u8);
+static int ad7879_write(bus_device *, u8, u16);
+static void ad7879_collect(struct ad7879 *);
+
+static void ad7879_report(struct ad7879 *ts)
+{
+ struct input_dev *input_dev = ts->input;
+ unsigned Rt;
+ u16 x, y, z1, z2;
+
+ x = ts->conversion_data[AD7879_SEQ_XPOS] & MAX_12BIT;
+ y = ts->conversion_data[AD7879_SEQ_YPOS] & MAX_12BIT;
+ z1 = ts->conversion_data[AD7879_SEQ_Z1] & MAX_12BIT;
+ z2 = ts->conversion_data[AD7879_SEQ_Z2] & MAX_12BIT;
+
+ /*
+ * The samples processed here are already preprocessed by the AD7879.
+ * The preprocessing function consists of a median and an averaging filter.
+ * The combination of these two techniques provides a robust solution,
+ * discarding the spurious noise in the signal and keeping only the data of interest.
+ * The size of both filters is programmable. (dev.platform_data, see linux/spi/ad7879.h)
+ * Other user-programmable conversion controls include variable acquisition time,
+ * and first conversion delay. Up to 16 averages can be taken per conversion.
+ */
+
+ if (likely(x && z1)) {
+ /* compute touch pressure resistance using equation #1 */
+ Rt = (z2 - z1) * x * ts->x_plate_ohms;
+ Rt /= z1;
+ Rt = (Rt + 2047) >> 12;
+
+ input_report_abs(input_dev, ABS_X, x);
+ input_report_abs(input_dev, ABS_Y, y);
+ input_report_abs(input_dev, ABS_PRESSURE, Rt);
+ input_sync(input_dev);
+ }
+}
+
+static void ad7879_work(struct work_struct *work)
+{
+ struct ad7879 *ts = container_of(work, struct ad7879, work);
+
+ /* use keventd context to read the result registers */
+ ad7879_collect(ts);
+ ad7879_report(ts);
+ mod_timer(&ts->timer, jiffies + TS_PEN_UP_TIMEOUT);
+}
+
+static void ad7879_ts_event_release(struct ad7879 *ts)
+{
+ struct input_dev *input_dev = ts->input;
+
+ input_report_abs(input_dev, ABS_PRESSURE, 0);
+ input_sync(input_dev);
+}
+
+static void ad7879_timer(unsigned long handle)
+{
+ struct ad7879 *ts = (void *)handle;
+
+ ad7879_ts_event_release(ts);
+}
+
+static irqreturn_t ad7879_irq(int irq, void *handle)
+{
+ struct ad7879 *ts = handle;
+
+ /* The repeated conversion sequencer controlled by TMR kicked off too fast.
+ * We ignore the last and process the sample sequence currently in the queue.
+ * It can't be older than 9.4ms
+ */
+
+ if (!work_pending(&ts->work))
+ schedule_work(&ts->work);
+
+ return IRQ_HANDLED;
+}
+
+static void ad7879_setup(struct ad7879 *ts)
+{
+ ts->cmd_crtl3 = AD7879_YPLUS_BIT |
+ AD7879_XPLUS_BIT |
+ AD7879_Z2_BIT |
+ AD7879_Z1_BIT |
+ AD7879_TEMPMASK_BIT |
+ AD7879_AUXVBATMASK_BIT |
+ AD7879_GPIOALERTMASK_BIT;
+
+ ts->cmd_crtl2 = AD7879_PM(AD7879_PM_DYN) | AD7879_DFR |
+ AD7879_AVG(ts->averaging) |
+ AD7879_MFS(ts->median) |
+ AD7879_FCD(ts->first_conversion_delay) |
+ ts->gpio_init;
+
+ ts->cmd_crtl1 = AD7879_MODE_INT | AD7879_MODE_SEQ1 |
+ AD7879_ACQ(ts->acquisition_time) |
+ AD7879_TMR(ts->pen_down_acc_interval);
+
+ ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2);
+ ad7879_write(ts->bus, AD7879_REG_CTRL3, ts->cmd_crtl3);
+ ad7879_write(ts->bus, AD7879_REG_CTRL1, ts->cmd_crtl1);
+}
+
+static void ad7879_disable(struct ad7879 *ts)
+{
+ mutex_lock(&ts->mutex);
+
+ if (!ts->disabled) {
+
+ ts->disabled = 1;
+ disable_irq(ts->bus->irq);
+
+ cancel_work_sync(&ts->work);
+
+ if (del_timer_sync(&ts->timer))
+ ad7879_ts_event_release(ts);
+
+ ad7879_write(ts->bus, AD7879_REG_CTRL2,
+ AD7879_PM(AD7879_PM_SHUTDOWN));
+ }
+
+ mutex_unlock(&ts->mutex);
+}
+
+static void ad7879_enable(struct ad7879 *ts)
+{
+ mutex_lock(&ts->mutex);
+
+ if (ts->disabled) {
+ ad7879_setup(ts);
+ ts->disabled = 0;
+ enable_irq(ts->bus->irq);
+ }
+
+ mutex_unlock(&ts->mutex);
+}
+
+static ssize_t ad7879_disable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ad7879 *ts = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", ts->disabled);
+}
+
+static ssize_t ad7879_disable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ad7879 *ts = dev_get_drvdata(dev);
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buf, 10, &val);
+ if (error)
+ return error;
+
+ if (val)
+ ad7879_disable(ts);
+ else
+ ad7879_enable(ts);
+
+ return count;
+}
+
+static DEVICE_ATTR(disable, 0664, ad7879_disable_show, ad7879_disable_store);
+
+static ssize_t ad7879_gpio_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ad7879 *ts = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", ts->gpio);
+}
+
+static ssize_t ad7879_gpio_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ad7879 *ts = dev_get_drvdata(dev);
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buf, 10, &val);
+ if (error)
+ return error;
+
+ mutex_lock(&ts->mutex);
+ ts->gpio = !!val;
+ error = ad7879_write(ts->bus, AD7879_REG_CTRL2,
+ ts->gpio ?
+ ts->cmd_crtl2 & ~AD7879_GPIO_DATA :
+ ts->cmd_crtl2 | AD7879_GPIO_DATA);
+ mutex_unlock(&ts->mutex);
+
+ return error ? : count;
+}
+
+static DEVICE_ATTR(gpio, 0664, ad7879_gpio_show, ad7879_gpio_store);
+
+static struct attribute *ad7879_attributes[] = {
+ &dev_attr_disable.attr,
+ &dev_attr_gpio.attr,
+ NULL
+};
+
+static const struct attribute_group ad7879_attr_group = {
+ .attrs = ad7879_attributes,
+};
+
+static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts)
+{
+ struct input_dev *input_dev;
+ struct ad7879_platform_data *pdata = bus->dev.platform_data;
+ int err;
+ u16 revid;
+
+ if (!bus->irq) {
+ dev_err(&bus->dev, "no IRQ?\n");
+ return -ENODEV;
+ }
+
+ if (!pdata) {
+ dev_err(&bus->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ input_dev = input_allocate_device();
+ if (!input_dev)
+ return -ENOMEM;
+
+ ts->input = input_dev;
+
+ setup_timer(&ts->timer, ad7879_timer, (unsigned long) ts);
+ INIT_WORK(&ts->work, ad7879_work);
+ mutex_init(&ts->mutex);
+
+ ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
+ ts->pressure_max = pdata->pressure_max ? : ~0;
+
+ ts->first_conversion_delay = pdata->first_conversion_delay;
+ ts->acquisition_time = pdata->acquisition_time;
+ ts->averaging = pdata->averaging;
+ ts->pen_down_acc_interval = pdata->pen_down_acc_interval;
+ ts->median = pdata->median;
+
+ if (pdata->gpio_output)
+ ts->gpio_init = AD7879_GPIO_EN |
+ (pdata->gpio_default ? 0 : AD7879_GPIO_DATA);
+ else
+ ts->gpio_init = AD7879_GPIO_EN | AD7879_GPIODIR;
+
+ snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&bus->dev));
+
+ input_dev->name = "AD7879 Touchscreen";
+ input_dev->phys = ts->phys;
+ input_dev->dev.parent = &bus->dev;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(ABS_X, input_dev->absbit);
+ __set_bit(ABS_Y, input_dev->absbit);
+ __set_bit(ABS_PRESSURE, input_dev->absbit);
+
+ input_set_abs_params(input_dev, ABS_X,
+ pdata->x_min ? : 0,
+ pdata->x_max ? : MAX_12BIT,
+ 0, 0);
+ input_set_abs_params(input_dev, ABS_Y,
+ pdata->y_min ? : 0,
+ pdata->y_max ? : MAX_12BIT,
+ 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ pdata->pressure_min, pdata->pressure_max, 0, 0);
+
+ err = ad7879_write(bus, AD7879_REG_CTRL2, AD7879_RESET);
+
+ if (err < 0) {
+ dev_err(&bus->dev, "Failed to write %s\n", input_dev->name);
+ goto err_free_mem;
+ }
+
+ revid = ad7879_read(bus, AD7879_REG_REVID);
+
+ if ((revid & 0xFF) != AD7879_DEVID) {
+ dev_err(&bus->dev, "Failed to probe %s\n", input_dev->name);
+ err = -ENODEV;
+ goto err_free_mem;
+ }
+
+ ad7879_setup(ts);
+
+ err = request_irq(bus->irq, ad7879_irq,
+ IRQF_TRIGGER_FALLING | IRQF_SAMPLE_RANDOM,
+ bus->dev.driver->name, ts);
+
+ if (err) {
+ dev_err(&bus->dev, "irq %d busy?\n", bus->irq);
+ goto err_free_mem;
+ }
+
+ err = sysfs_create_group(&bus->dev.kobj, &ad7879_attr_group);
+ if (err)
+ goto err_free_irq;
+
+ err = input_register_device(input_dev);
+ if (err)
+ goto err_remove_attr;
+
+ dev_info(&bus->dev, "Rev.%d touchscreen, irq %d\n",
+ revid >> 8, bus->irq);
+
+ return 0;
+
+err_remove_attr:
+ sysfs_remove_group(&bus->dev.kobj, &ad7879_attr_group);
+err_free_irq:
+ free_irq(bus->irq, ts);
+err_free_mem:
+ input_free_device(input_dev);
+
+ return err;
+}
+
+static int __devexit ad7879_destroy(bus_device *bus, struct ad7879 *ts)
+{
+ ad7879_disable(ts);
+ sysfs_remove_group(&ts->bus->dev.kobj, &ad7879_attr_group);
+ free_irq(ts->bus->irq, ts);
+ input_unregister_device(ts->input);
+ dev_dbg(&bus->dev, "unregistered touchscreen\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ad7879_suspend(bus_device *bus, pm_message_t message)
+{
+ struct ad7879 *ts = dev_get_drvdata(&bus->dev);
+
+ ad7879_disable(ts);
+
+ return 0;
+}
+
+static int ad7879_resume(bus_device *bus)
+{
+ struct ad7879 *ts = dev_get_drvdata(&bus->dev);
+
+ ad7879_enable(ts);
+
+ return 0;
+}
+#else
+#define ad7879_suspend NULL
+#define ad7879_resume NULL
+#endif
+
+#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
+#define MAX_SPI_FREQ_HZ 5000000
+#define AD7879_CMD_MAGIC 0xE000
+#define AD7879_CMD_READ (1 << 10)
+#define AD7879_WRITECMD(reg) (AD7879_CMD_MAGIC | (reg & 0xF))
+#define AD7879_READCMD(reg) (AD7879_CMD_MAGIC | AD7879_CMD_READ | (reg & 0xF))
+
+struct ser_req {
+ u16 command;
+ u16 data;
+ struct spi_message msg;
+ struct spi_transfer xfer[2];
+};
+
+/*
+ * ad7879_read/write are only used for initial setup and for sysfs controls.
+ * The main traffic is done in ad7879_collect().
+ */
+
+static int ad7879_read(struct spi_device *spi, u8 reg)
+{
+ struct ser_req *req;
+ int status, ret;
+
+ req = kzalloc(sizeof *req, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ spi_message_init(&req->msg);
+
+ req->command = (u16) AD7879_READCMD(reg);
+ req->xfer[0].tx_buf = &req->command;
+ req->xfer[0].len = 2;
+
+ req->xfer[1].rx_buf = &req->data;
+ req->xfer[1].len = 2;
+
+ spi_message_add_tail(&req->xfer[0], &req->msg);
+ spi_message_add_tail(&req->xfer[1], &req->msg);
+
+ status = spi_sync(spi, &req->msg);
+ ret = status ? : req->data;
+
+ kfree(req);
+
+ return ret;
+}
+
+static int ad7879_write(struct spi_device *spi, u8 reg, u16 val)
+{
+ struct ser_req *req;
+ int status;
+
+ req = kzalloc(sizeof *req, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ spi_message_init(&req->msg);
+
+ req->command = (u16) AD7879_WRITECMD(reg);
+ req->xfer[0].tx_buf = &req->command;
+ req->xfer[0].len = 2;
+
+ req->data = val;
+ req->xfer[1].tx_buf = &req->data;
+ req->xfer[1].len = 2;
+
+ spi_message_add_tail(&req->xfer[0], &req->msg);
+ spi_message_add_tail(&req->xfer[1], &req->msg);
+
+ status = spi_sync(spi, &req->msg);
+
+ kfree(req);
+
+ return status;
+}
+
+static void ad7879_collect(struct ad7879 *ts)
+{
+ int status = spi_sync(ts->bus, &ts->msg);
+
+ if (status)
+ dev_err(&ts->bus->dev, "spi_sync --> %d\n", status);
+}
+
+static void ad7879_setup_ts_def_msg(struct ad7879 *ts)
+{
+ struct spi_message *m;
+ int i;
+
+ ts->cmd = (u16) AD7879_READCMD(AD7879_REG_XPLUS);
+
+ m = &ts->msg;
+ spi_message_init(m);
+ ts->xfer[0].tx_buf = &ts->cmd;
+ ts->xfer[0].len = 2;
+
+ spi_message_add_tail(&ts->xfer[0], m);
+
+ for (i = 0; i < AD7879_NR_SENSE; i++) {
+ ts->xfer[i + 1].rx_buf = &ts->conversion_data[i];
+ ts->xfer[i + 1].len = 2;
+ spi_message_add_tail(&ts->xfer[i + 1], m);
+ }
+}
+
+static int __devinit ad7879_probe(struct spi_device *spi)
+{
+ struct ad7879 *ts;
+ int error;
+
+ /* don't exceed max specified SPI CLK frequency */
+ if (spi->max_speed_hz > MAX_SPI_FREQ_HZ) {
+ dev_err(&spi->dev, "SPI CLK %d Hz?\n", spi->max_speed_hz);
+ return -EINVAL;
+ }
+
+ ts = kzalloc(sizeof(struct ad7879), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, ts);
+ ts->bus = spi;
+
+ ad7879_setup_ts_def_msg(ts);
+
+ error = ad7879_construct(spi, ts);
+ if (error) {
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(ts);
+ }
+
+ return 0;
+}
+
+static int __devexit ad7879_remove(struct spi_device *spi)
+{
+ struct ad7879 *ts = dev_get_drvdata(&spi->dev);
+
+ ad7879_destroy(spi, ts);
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(ts);
+
+ return 0;
+}
+
+static struct spi_driver ad7879_driver = {
+ .driver = {
+ .name = "ad7879",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7879_probe,
+ .remove = __devexit_p(ad7879_remove),
+ .suspend = ad7879_suspend,
+ .resume = ad7879_resume,
+};
+
+static int __init ad7879_init(void)
+{
+ return spi_register_driver(&ad7879_driver);
+}
+module_init(ad7879_init);
+
+static void __exit ad7879_exit(void)
+{
+ spi_unregister_driver(&ad7879_driver);
+}
+module_exit(ad7879_exit);
+
+#elif defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE)
+
+/* All registers are word-sized.
+ * AD7879 uses a high-byte first convention.
+ */
+static int ad7879_read(struct i2c_client *client, u8 reg)
+{
+ return swab16(i2c_smbus_read_word_data(client, reg));
+}
+
+static int ad7879_write(struct i2c_client *client, u8 reg, u16 val)
+{
+ return i2c_smbus_write_word_data(client, reg, swab16(val));
+}
+
+static void ad7879_collect(struct ad7879 *ts)
+{
+ int i;
+
+ for (i = 0; i < AD7879_NR_SENSE; i++)
+ ts->conversion_data[i] = ad7879_read(ts->bus,
+ AD7879_REG_XPLUS + i);
+}
+
+static int __devinit ad7879_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ad7879 *ts;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(&client->dev, "SMBUS Word Data not Supported\n");
+ return -EIO;
+ }
+
+ ts = kzalloc(sizeof(struct ad7879), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, ts);
+ ts->bus = client;
+
+ error = ad7879_construct(client, ts);
+ if (error) {
+ i2c_set_clientdata(client, NULL);
+ kfree(ts);
+ }
+
+ return 0;
+}
+
+static int __devexit ad7879_remove(struct i2c_client *client)
+{
+ struct ad7879 *ts = dev_get_drvdata(&client->dev);
+
+ ad7879_destroy(client, ts);
+ i2c_set_clientdata(client, NULL);
+ kfree(ts);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad7879_id[] = {
+ { "ad7879", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ad7879_id);
+
+static struct i2c_driver ad7879_driver = {
+ .driver = {
+ .name = "ad7879",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7879_probe,
+ .remove = __devexit_p(ad7879_remove),
+ .suspend = ad7879_suspend,
+ .resume = ad7879_resume,
+ .id_table = ad7879_id,
+};
+
+static int __init ad7879_init(void)
+{
+ return i2c_add_driver(&ad7879_driver);
+}
+module_init(ad7879_init);
+
+static void __exit ad7879_exit(void)
+{
+ i2c_del_driver(&ad7879_driver);
+}
+module_exit(ad7879_exit);
+#endif
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("AD7879(-1) touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index 1d11e2be9ef8..dfa6a84ab50a 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -162,6 +162,7 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
input_report_abs(wm->input_dev, ABS_X, x & 0xfff);
input_report_abs(wm->input_dev, ABS_Y, y & 0xfff);
input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff);
+ input_report_key(wm->input_dev, BTN_TOUCH, (p != 0));
input_sync(wm->input_dev);
reads++;
} while (reads < cinfo[sp_idx].reads);
@@ -245,7 +246,7 @@ static void wm97xx_irq_enable(struct wm97xx *wm, int enable)
if (enable)
enable_irq(wm->pen_irq);
else
- disable_irq(wm->pen_irq);
+ disable_irq_nosync(wm->pen_irq);
}
static struct wm97xx_mach_ops mainstone_mach_ops = {
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 54986627def0..e868264fe799 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -151,12 +151,14 @@ static void ucb1400_ts_evt_add(struct input_dev *idev, u16 pressure, u16 x, u16
input_report_abs(idev, ABS_X, x);
input_report_abs(idev, ABS_Y, y);
input_report_abs(idev, ABS_PRESSURE, pressure);
+ input_report_key(idev, BTN_TOUCH, 1);
input_sync(idev);
}
static void ucb1400_ts_event_release(struct input_dev *idev)
{
input_report_abs(idev, ABS_PRESSURE, 0);
+ input_report_key(idev, BTN_TOUCH, 0);
input_sync(idev);
}
@@ -377,7 +379,8 @@ static int ucb1400_ts_probe(struct platform_device *dev)
ucb->ts_idev->id.product = ucb->id;
ucb->ts_idev->open = ucb1400_ts_open;
ucb->ts_idev->close = ucb1400_ts_close;
- ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS);
+ ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY);
+ ucb->ts_idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
ucb1400_adc_enable(ucb->ac97);
x_res = ucb1400_ts_read_xres(ucb);
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index d15aa11d7056..cec480bffe38 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -409,6 +409,7 @@ static int wm97xx_read_samples(struct wm97xx *wm)
wm->pen_is_down = 0;
dev_dbg(wm->dev, "pen up\n");
input_report_abs(wm->input_dev, ABS_PRESSURE, 0);
+ input_report_key(wm->input_dev, BTN_TOUCH, 0);
input_sync(wm->input_dev);
} else if (!(rc & RC_AGAIN)) {
/* We need high frequency updates only while
@@ -433,6 +434,7 @@ static int wm97xx_read_samples(struct wm97xx *wm)
input_report_abs(wm->input_dev, ABS_X, data.x & 0xfff);
input_report_abs(wm->input_dev, ABS_Y, data.y & 0xfff);
input_report_abs(wm->input_dev, ABS_PRESSURE, data.p & 0xfff);
+ input_report_key(wm->input_dev, BTN_TOUCH, 1);
input_sync(wm->input_dev);
wm->pen_is_down = 1;
wm->ts_reader_interval = wm->ts_reader_min_interval;
@@ -628,18 +630,21 @@ static int wm97xx_probe(struct device *dev)
wm->input_dev->phys = "wm97xx";
wm->input_dev->open = wm97xx_ts_input_open;
wm->input_dev->close = wm97xx_ts_input_close;
- set_bit(EV_ABS, wm->input_dev->evbit);
- set_bit(ABS_X, wm->input_dev->absbit);
- set_bit(ABS_Y, wm->input_dev->absbit);
- set_bit(ABS_PRESSURE, wm->input_dev->absbit);
+
+ __set_bit(EV_ABS, wm->input_dev->evbit);
+ __set_bit(EV_KEY, wm->input_dev->evbit);
+ __set_bit(BTN_TOUCH, wm->input_dev->keybit);
+
input_set_abs_params(wm->input_dev, ABS_X, abs_x[0], abs_x[1],
abs_x[2], 0);
input_set_abs_params(wm->input_dev, ABS_Y, abs_y[0], abs_y[1],
abs_y[2], 0);
input_set_abs_params(wm->input_dev, ABS_PRESSURE, abs_p[0], abs_p[1],
abs_p[2], 0);
+
input_set_drvdata(wm->input_dev, wm);
wm->input_dev->dev.parent = dev;
+
ret = input_register_device(wm->input_dev);
if (ret < 0)
goto dev_alloc_err;
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c
new file mode 100644
index 000000000000..41e4359c277c
--- /dev/null
+++ b/drivers/input/touchscreen/zylonite-wm97xx.c
@@ -0,0 +1,240 @@
+/*
+ * zylonite-wm97xx.c -- Zylonite Continuous Touch screen driver
+ *
+ * Copyright 2004, 2007, 2008 Wolfson Microelectronics PLC.
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ * Parts Copyright : Ian Molton <spyro@f2s.com>
+ * Andrew Zabolotny <zap@homelink.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Notes:
+ * This is a wm97xx extended touch driver supporting interrupt driven
+ * and continuous operation on Marvell Zylonite development systems
+ * (which have a WM9713 on board).
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/wm97xx.h>
+
+#include <mach/hardware.h>
+#include <mach/mfp.h>
+#include <mach/regs-ac97.h>
+
+struct continuous {
+ u16 id; /* codec id */
+ u8 code; /* continuous code */
+ u8 reads; /* number of coord reads per read cycle */
+ u32 speed; /* number of coords per second */
+};
+
+#define WM_READS(sp) ((sp / HZ) + 1)
+
+static const struct continuous cinfo[] = {
+ { WM9713_ID2, 0, WM_READS(94), 94 },
+ { WM9713_ID2, 1, WM_READS(120), 120 },
+ { WM9713_ID2, 2, WM_READS(154), 154 },
+ { WM9713_ID2, 3, WM_READS(188), 188 },
+};
+
+/* continuous speed index */
+static int sp_idx;
+
+/*
+ * Pen sampling frequency (Hz) in continuous mode.
+ */
+static int cont_rate = 200;
+module_param(cont_rate, int, 0);
+MODULE_PARM_DESC(cont_rate, "Sampling rate in continuous mode (Hz)");
+
+/*
+ * Pressure readback.
+ *
+ * Set to 1 to read back pen down pressure
+ */
+static int pressure;
+module_param(pressure, int, 0);
+MODULE_PARM_DESC(pressure, "Pressure readback (1 = pressure, 0 = no pressure)");
+
+/*
+ * AC97 touch data slot.
+ *
+ * Touch screen readback data ac97 slot
+ */
+static int ac97_touch_slot = 5;
+module_param(ac97_touch_slot, int, 0);
+MODULE_PARM_DESC(ac97_touch_slot, "Touch screen data slot AC97 number");
+
+
+/* flush AC97 slot 5 FIFO machines */
+static void wm97xx_acc_pen_up(struct wm97xx *wm)
+{
+ int i;
+
+ msleep(1);
+
+ for (i = 0; i < 16; i++)
+ MODR;
+}
+
+static int wm97xx_acc_pen_down(struct wm97xx *wm)
+{
+ u16 x, y, p = 0x100 | WM97XX_ADCSEL_PRES;
+ int reads = 0;
+ static u16 last, tries;
+
+ /* When the AC97 queue has been drained we need to allow time
+ * to buffer up samples otherwise we end up spinning polling
+ * for samples. The controller can't have a suitably low
+ * threashold set to use the notifications it gives.
+ */
+ msleep(1);
+
+ if (tries > 5) {
+ tries = 0;
+ return RC_PENUP;
+ }
+
+ x = MODR;
+ if (x == last) {
+ tries++;
+ return RC_AGAIN;
+ }
+ last = x;
+ do {
+ if (reads)
+ x = MODR;
+ y = MODR;
+ if (pressure)
+ p = MODR;
+
+ /* are samples valid */
+ if ((x & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_X ||
+ (y & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_Y ||
+ (p & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_PRES)
+ goto up;
+
+ /* coordinate is good */
+ tries = 0;
+ input_report_abs(wm->input_dev, ABS_X, x & 0xfff);
+ input_report_abs(wm->input_dev, ABS_Y, y & 0xfff);
+ input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff);
+ input_report_key(wm->input_dev, BTN_TOUCH, (p != 0));
+ input_sync(wm->input_dev);
+ reads++;
+ } while (reads < cinfo[sp_idx].reads);
+up:
+ return RC_PENDOWN | RC_AGAIN;
+}
+
+static int wm97xx_acc_startup(struct wm97xx *wm)
+{
+ int idx;
+
+ /* check we have a codec */
+ if (wm->ac97 == NULL)
+ return -ENODEV;
+
+ /* Go you big red fire engine */
+ for (idx = 0; idx < ARRAY_SIZE(cinfo); idx++) {
+ if (wm->id != cinfo[idx].id)
+ continue;
+ sp_idx = idx;
+ if (cont_rate <= cinfo[idx].speed)
+ break;
+ }
+ wm->acc_rate = cinfo[sp_idx].code;
+ wm->acc_slot = ac97_touch_slot;
+ dev_info(wm->dev,
+ "zylonite accelerated touchscreen driver, %d samples/sec\n",
+ cinfo[sp_idx].speed);
+
+ return 0;
+}
+
+static void wm97xx_irq_enable(struct wm97xx *wm, int enable)
+{
+ if (enable)
+ enable_irq(wm->pen_irq);
+ else
+ disable_irq_nosync(wm->pen_irq);
+}
+
+static struct wm97xx_mach_ops zylonite_mach_ops = {
+ .acc_enabled = 1,
+ .acc_pen_up = wm97xx_acc_pen_up,
+ .acc_pen_down = wm97xx_acc_pen_down,
+ .acc_startup = wm97xx_acc_startup,
+ .irq_enable = wm97xx_irq_enable,
+ .irq_gpio = WM97XX_GPIO_2,
+};
+
+static int zylonite_wm97xx_probe(struct platform_device *pdev)
+{
+ struct wm97xx *wm = platform_get_drvdata(pdev);
+ int gpio_touch_irq;
+
+ if (cpu_is_pxa320())
+ gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO15);
+ else
+ gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26);
+
+ wm->pen_irq = IRQ_GPIO(gpio_touch_irq);
+ set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH);
+
+ wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN,
+ WM97XX_GPIO_POL_HIGH,
+ WM97XX_GPIO_STICKY,
+ WM97XX_GPIO_WAKE);
+ wm97xx_config_gpio(wm, WM97XX_GPIO_2, WM97XX_GPIO_OUT,
+ WM97XX_GPIO_POL_HIGH,
+ WM97XX_GPIO_NOTSTICKY,
+ WM97XX_GPIO_NOWAKE);
+
+ return wm97xx_register_mach_ops(wm, &zylonite_mach_ops);
+}
+
+static int zylonite_wm97xx_remove(struct platform_device *pdev)
+{
+ struct wm97xx *wm = platform_get_drvdata(pdev);
+
+ wm97xx_unregister_mach_ops(wm);
+
+ return 0;
+}
+
+static struct platform_driver zylonite_wm97xx_driver = {
+ .probe = zylonite_wm97xx_probe,
+ .remove = zylonite_wm97xx_remove,
+ .driver = {
+ .name = "wm97xx-touch",
+ },
+};
+
+static int __init zylonite_wm97xx_init(void)
+{
+ return platform_driver_register(&zylonite_wm97xx_driver);
+}
+
+static void __exit zylonite_wm97xx_exit(void)
+{
+ platform_driver_unregister(&zylonite_wm97xx_driver);
+}
+
+module_init(zylonite_wm97xx_init);
+module_exit(zylonite_wm97xx_exit);
+
+/* Module information */
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("wm97xx continuous touch driver for Zylonite");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index d184dfab9631..db39f4a52f53 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -278,7 +278,7 @@ static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr,
* We only use page mode writes; the alternative is sloooow. This routine
* writes at most one page.
*/
-static ssize_t at24_eeprom_write(struct at24_data *at24, char *buf,
+static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf,
unsigned offset, size_t count)
{
struct i2c_client *client;
@@ -347,8 +347,8 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, char *buf,
return -ETIMEDOUT;
}
-static ssize_t at24_write(struct at24_data *at24,
- char *buf, loff_t off, size_t count)
+static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
+ size_t count)
{
ssize_t retval = 0;
@@ -406,7 +406,7 @@ static ssize_t at24_macc_read(struct memory_accessor *macc, char *buf,
return at24_read(at24, buf, offset, count);
}
-static ssize_t at24_macc_write(struct memory_accessor *macc, char *buf,
+static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf,
off_t offset, size_t count)
{
struct at24_data *at24 = container_of(macc, struct at24_data, macc);
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 6bc0dac5c1e8..b34cb5f79eea 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -140,7 +140,8 @@ at25_bin_read(struct kobject *kobj, struct bin_attribute *bin_attr,
static ssize_t
-at25_ee_write(struct at25_data *at25, char *buf, loff_t off, size_t count)
+at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
+ size_t count)
{
ssize_t status = 0;
unsigned written = 0;
@@ -276,7 +277,7 @@ static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf,
return at25_ee_read(at25, buf, offset, count);
}
-static ssize_t at25_mem_write(struct memory_accessor *mem, char *buf,
+static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf,
off_t offset, size_t count)
{
struct at25_data *at25 = container_of(mem, struct at25_data, mem);
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 114444cfd496..b94d5f767703 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -90,18 +90,21 @@ struct xpc_rsvd_page {
short max_npartitions; /* value of XPC_MAX_PARTITIONS */
u8 version;
u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */
+ unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */
union {
- unsigned long vars_pa; /* phys address of struct xpc_vars */
- unsigned long activate_gru_mq_desc_gpa; /* phys addr of */
- /* activate mq's */
- /* gru mq descriptor */
+ struct {
+ unsigned long vars_pa; /* phys addr */
+ } sn2;
+ struct {
+ unsigned long heartbeat_gpa; /* phys addr */
+ unsigned long activate_gru_mq_desc_gpa; /* phys addr */
+ } uv;
} sn;
- unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */
- u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */
+ u64 pad2[9]; /* align to last u64 in 2nd 64-byte cacheline */
u64 SAL_nasids_size; /* SAL: size of each nasid mask in bytes */
};
-#define XPC_RP_VERSION _XPC_VERSION(2, 0) /* version 2.0 of the reserved page */
+#define XPC_RP_VERSION _XPC_VERSION(3, 0) /* version 3.0 of the reserved page */
/*
* Define the structures by which XPC variables can be exported to other
@@ -182,6 +185,17 @@ struct xpc_vars_part_sn2 {
(XPC_RP_MACH_NASIDS(_rp) + \
xpc_nasid_mask_nlongs))
+
+/*
+ * The following structure describes the partition's heartbeat info which
+ * will be periodically read by other partitions to determine whether this
+ * XPC is still 'alive'.
+ */
+struct xpc_heartbeat_uv {
+ unsigned long value;
+ unsigned long offline; /* if 0, heartbeat should be changing */
+};
+
/*
* Info pertinent to a GRU message queue using a watch list for irq generation.
*/
@@ -198,7 +212,7 @@ struct xpc_gru_mq_uv {
/*
* The activate_mq is used to send/receive GRU messages that affect XPC's
- * heartbeat, partition active state, and channel state. This is UV only.
+ * partition active state and channel state. This is uv only.
*/
struct xpc_activate_mq_msghdr_uv {
unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */
@@ -210,33 +224,27 @@ struct xpc_activate_mq_msghdr_uv {
/* activate_mq defined message types */
#define XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV 0
-#define XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV 1
-#define XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV 2
-#define XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV 3
-#define XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV 4
-#define XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV 5
+#define XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV 1
+#define XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV 2
-#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV 6
-#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV 7
-#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV 8
-#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV 9
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV 3
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV 4
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV 5
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV 6
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV 7
-#define XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV 10
-#define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 11
+#define XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV 8
+#define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 9
struct xpc_activate_mq_msg_uv {
struct xpc_activate_mq_msghdr_uv hdr;
};
-struct xpc_activate_mq_msg_heartbeat_req_uv {
- struct xpc_activate_mq_msghdr_uv hdr;
- u64 heartbeat;
-};
-
struct xpc_activate_mq_msg_activate_req_uv {
struct xpc_activate_mq_msghdr_uv hdr;
unsigned long rp_gpa;
+ unsigned long heartbeat_gpa;
unsigned long activate_gru_mq_desc_gpa;
};
@@ -271,6 +279,11 @@ struct xpc_activate_mq_msg_chctl_openreply_uv {
unsigned long notify_gru_mq_desc_gpa;
};
+struct xpc_activate_mq_msg_chctl_opencomplete_uv {
+ struct xpc_activate_mq_msghdr_uv hdr;
+ short ch_number;
+};
+
/*
* Functions registered by add_timer() or called by kernel_thread() only
* allow for a single 64-bit argument. The following macros can be used to
@@ -576,30 +589,32 @@ struct xpc_channel {
#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */
-#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */
-#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */
-#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
-#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
+#define XPC_C_ROPENCOMPLETE 0x00000002 /* remote open channel complete */
+#define XPC_C_OPENCOMPLETE 0x00000004 /* local open channel complete */
+#define XPC_C_ROPENREPLY 0x00000008 /* remote open channel reply */
+#define XPC_C_OPENREPLY 0x00000010 /* local open channel reply */
+#define XPC_C_ROPENREQUEST 0x00000020 /* remote open channel request */
+#define XPC_C_OPENREQUEST 0x00000040 /* local open channel request */
-#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
-#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */
+#define XPC_C_SETUP 0x00000080 /* channel's msgqueues are alloc'd */
+#define XPC_C_CONNECTEDCALLOUT 0x00000100 /* connected callout initiated */
#define XPC_C_CONNECTEDCALLOUT_MADE \
- 0x00000080 /* connected callout completed */
-#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */
-#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */
+ 0x00000200 /* connected callout completed */
+#define XPC_C_CONNECTED 0x00000400 /* local channel is connected */
+#define XPC_C_CONNECTING 0x00000800 /* channel is being connected */
-#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */
-#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */
-#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */
-#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */
+#define XPC_C_RCLOSEREPLY 0x00001000 /* remote close channel reply */
+#define XPC_C_CLOSEREPLY 0x00002000 /* local close channel reply */
+#define XPC_C_RCLOSEREQUEST 0x00004000 /* remote close channel request */
+#define XPC_C_CLOSEREQUEST 0x00008000 /* local close channel request */
-#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */
-#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */
+#define XPC_C_DISCONNECTED 0x00010000 /* channel is disconnected */
+#define XPC_C_DISCONNECTING 0x00020000 /* channel is being disconnected */
#define XPC_C_DISCONNECTINGCALLOUT \
- 0x00010000 /* disconnecting callout initiated */
+ 0x00040000 /* disconnecting callout initiated */
#define XPC_C_DISCONNECTINGCALLOUT_MADE \
- 0x00020000 /* disconnecting callout completed */
-#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
+ 0x00080000 /* disconnecting callout completed */
+#define XPC_C_WDISCONNECT 0x00100000 /* waiting for channel disconnect */
/*
* The channel control flags (chctl) union consists of a 64-bit variable which
@@ -618,11 +633,13 @@ union xpc_channel_ctl_flags {
#define XPC_CHCTL_CLOSEREPLY 0x02
#define XPC_CHCTL_OPENREQUEST 0x04
#define XPC_CHCTL_OPENREPLY 0x08
-#define XPC_CHCTL_MSGREQUEST 0x10
+#define XPC_CHCTL_OPENCOMPLETE 0x10
+#define XPC_CHCTL_MSGREQUEST 0x20
#define XPC_OPENCLOSE_CHCTL_FLAGS \
(XPC_CHCTL_CLOSEREQUEST | XPC_CHCTL_CLOSEREPLY | \
- XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY)
+ XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY | \
+ XPC_CHCTL_OPENCOMPLETE)
#define XPC_MSG_CHCTL_FLAGS XPC_CHCTL_MSGREQUEST
static inline int
@@ -687,6 +704,9 @@ struct xpc_partition_sn2 {
};
struct xpc_partition_uv {
+ unsigned long heartbeat_gpa; /* phys addr of partition's heartbeat */
+ struct xpc_heartbeat_uv cached_heartbeat; /* cached copy of */
+ /* partition's heartbeat */
unsigned long activate_gru_mq_desc_gpa; /* phys addr of parititon's */
/* activate mq's gru mq */
/* descriptor */
@@ -698,14 +718,12 @@ struct xpc_partition_uv {
u8 remote_act_state; /* remote partition's act_state */
u8 act_state_req; /* act_state request from remote partition */
enum xp_retval reason; /* reason for deactivate act_state request */
- u64 heartbeat; /* incremented by remote partition */
};
/* struct xpc_partition_uv flags */
-#define XPC_P_HEARTBEAT_OFFLINE_UV 0x00000001
+#define XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV 0x00000001
#define XPC_P_ENGAGED_UV 0x00000002
-#define XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV 0x00000004
/* struct xpc_partition_uv act_state change requests */
@@ -762,6 +780,62 @@ struct xpc_partition {
} ____cacheline_aligned;
+struct xpc_arch_operations {
+ int (*setup_partitions) (void);
+ void (*teardown_partitions) (void);
+ void (*process_activate_IRQ_rcvd) (void);
+ enum xp_retval (*get_partition_rsvd_page_pa)
+ (void *, u64 *, unsigned long *, size_t *);
+ int (*setup_rsvd_page) (struct xpc_rsvd_page *);
+
+ void (*allow_hb) (short);
+ void (*disallow_hb) (short);
+ void (*disallow_all_hbs) (void);
+ void (*increment_heartbeat) (void);
+ void (*offline_heartbeat) (void);
+ void (*online_heartbeat) (void);
+ void (*heartbeat_init) (void);
+ void (*heartbeat_exit) (void);
+ enum xp_retval (*get_remote_heartbeat) (struct xpc_partition *);
+
+ void (*request_partition_activation) (struct xpc_rsvd_page *,
+ unsigned long, int);
+ void (*request_partition_reactivation) (struct xpc_partition *);
+ void (*request_partition_deactivation) (struct xpc_partition *);
+ void (*cancel_partition_deactivation_request) (struct xpc_partition *);
+ enum xp_retval (*setup_ch_structures) (struct xpc_partition *);
+ void (*teardown_ch_structures) (struct xpc_partition *);
+
+ enum xp_retval (*make_first_contact) (struct xpc_partition *);
+
+ u64 (*get_chctl_all_flags) (struct xpc_partition *);
+ void (*send_chctl_closerequest) (struct xpc_channel *, unsigned long *);
+ void (*send_chctl_closereply) (struct xpc_channel *, unsigned long *);
+ void (*send_chctl_openrequest) (struct xpc_channel *, unsigned long *);
+ void (*send_chctl_openreply) (struct xpc_channel *, unsigned long *);
+ void (*send_chctl_opencomplete) (struct xpc_channel *, unsigned long *);
+ void (*process_msg_chctl_flags) (struct xpc_partition *, int);
+
+ enum xp_retval (*save_remote_msgqueue_pa) (struct xpc_channel *,
+ unsigned long);
+
+ enum xp_retval (*setup_msg_structures) (struct xpc_channel *);
+ void (*teardown_msg_structures) (struct xpc_channel *);
+
+ void (*indicate_partition_engaged) (struct xpc_partition *);
+ void (*indicate_partition_disengaged) (struct xpc_partition *);
+ void (*assume_partition_disengaged) (short);
+ int (*partition_engaged) (short);
+ int (*any_partition_engaged) (void);
+
+ int (*n_of_deliverable_payloads) (struct xpc_channel *);
+ enum xp_retval (*send_payload) (struct xpc_channel *, u32, void *,
+ u16, u8, xpc_notify_func, void *);
+ void *(*get_deliverable_payload) (struct xpc_channel *);
+ void (*received_payload) (struct xpc_channel *, void *);
+ void (*notify_senders_of_disconnect) (struct xpc_channel *);
+};
+
/* struct xpc_partition act_state values (for XPC HB) */
#define XPC_P_AS_INACTIVE 0x00 /* partition is not active */
@@ -802,67 +876,17 @@ extern struct xpc_registration xpc_registrations[];
/* found in xpc_main.c */
extern struct device *xpc_part;
extern struct device *xpc_chan;
+extern struct xpc_arch_operations xpc_arch_ops;
extern int xpc_disengage_timelimit;
extern int xpc_disengage_timedout;
extern int xpc_activate_IRQ_rcvd;
extern spinlock_t xpc_activate_IRQ_rcvd_lock;
extern wait_queue_head_t xpc_activate_IRQ_wq;
-extern void *xpc_heartbeating_to_mask;
extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **);
extern void xpc_activate_partition(struct xpc_partition *);
extern void xpc_activate_kthreads(struct xpc_channel *, int);
extern void xpc_create_kthreads(struct xpc_channel *, int, int);
extern void xpc_disconnect_wait(int);
-extern int (*xpc_setup_partitions_sn) (void);
-extern void (*xpc_teardown_partitions_sn) (void);
-extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *,
- unsigned long *,
- size_t *);
-extern int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *);
-extern void (*xpc_heartbeat_init) (void);
-extern void (*xpc_heartbeat_exit) (void);
-extern void (*xpc_increment_heartbeat) (void);
-extern void (*xpc_offline_heartbeat) (void);
-extern void (*xpc_online_heartbeat) (void);
-extern enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *);
-extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *);
-extern u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *);
-extern enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *);
-extern void (*xpc_teardown_msg_structures) (struct xpc_channel *);
-extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *);
-extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int);
-extern int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *);
-extern void *(*xpc_get_deliverable_payload) (struct xpc_channel *);
-extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *,
- unsigned long, int);
-extern void (*xpc_request_partition_reactivation) (struct xpc_partition *);
-extern void (*xpc_request_partition_deactivation) (struct xpc_partition *);
-extern void (*xpc_cancel_partition_deactivation_request) (
- struct xpc_partition *);
-extern void (*xpc_process_activate_IRQ_rcvd) (void);
-extern enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *);
-extern void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *);
-
-extern void (*xpc_indicate_partition_engaged) (struct xpc_partition *);
-extern int (*xpc_partition_engaged) (short);
-extern int (*xpc_any_partition_engaged) (void);
-extern void (*xpc_indicate_partition_disengaged) (struct xpc_partition *);
-extern void (*xpc_assume_partition_disengaged) (short);
-
-extern void (*xpc_send_chctl_closerequest) (struct xpc_channel *,
- unsigned long *);
-extern void (*xpc_send_chctl_closereply) (struct xpc_channel *,
- unsigned long *);
-extern void (*xpc_send_chctl_openrequest) (struct xpc_channel *,
- unsigned long *);
-extern void (*xpc_send_chctl_openreply) (struct xpc_channel *, unsigned long *);
-
-extern enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *,
- unsigned long);
-
-extern enum xp_retval (*xpc_send_payload) (struct xpc_channel *, u32, void *,
- u16, u8, xpc_notify_func, void *);
-extern void (*xpc_received_payload) (struct xpc_channel *, void *);
/* found in xpc_sn2.c */
extern int xpc_init_sn2(void);
@@ -909,40 +933,6 @@ extern void xpc_disconnect_channel(const int, struct xpc_channel *,
extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval);
extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval);
-static inline int
-xpc_hb_allowed(short partid, void *heartbeating_to_mask)
-{
- return test_bit(partid, heartbeating_to_mask);
-}
-
-static inline int
-xpc_any_hbs_allowed(void)
-{
- DBUG_ON(xpc_heartbeating_to_mask == NULL);
- return !bitmap_empty(xpc_heartbeating_to_mask, xp_max_npartitions);
-}
-
-static inline void
-xpc_allow_hb(short partid)
-{
- DBUG_ON(xpc_heartbeating_to_mask == NULL);
- set_bit(partid, xpc_heartbeating_to_mask);
-}
-
-static inline void
-xpc_disallow_hb(short partid)
-{
- DBUG_ON(xpc_heartbeating_to_mask == NULL);
- clear_bit(partid, xpc_heartbeating_to_mask);
-}
-
-static inline void
-xpc_disallow_all_hbs(void)
-{
- DBUG_ON(xpc_heartbeating_to_mask == NULL);
- bitmap_zero(xpc_heartbeating_to_mask, xp_max_npartitions);
-}
-
static inline void
xpc_wakeup_channel_mgr(struct xpc_partition *part)
{
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 99a2534c38a1..652593fc486d 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
@@ -39,34 +39,38 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
if (!(ch->flags & XPC_C_SETUP)) {
spin_unlock_irqrestore(&ch->lock, *irq_flags);
- ret = xpc_setup_msg_structures(ch);
+ ret = xpc_arch_ops.setup_msg_structures(ch);
spin_lock_irqsave(&ch->lock, *irq_flags);
if (ret != xpSuccess)
XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
+ else
+ ch->flags |= XPC_C_SETUP;
- ch->flags |= XPC_C_SETUP;
-
- if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
+ if (ch->flags & XPC_C_DISCONNECTING)
return;
}
if (!(ch->flags & XPC_C_OPENREPLY)) {
ch->flags |= XPC_C_OPENREPLY;
- xpc_send_chctl_openreply(ch, irq_flags);
+ xpc_arch_ops.send_chctl_openreply(ch, irq_flags);
}
if (!(ch->flags & XPC_C_ROPENREPLY))
return;
- ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
+ if (!(ch->flags & XPC_C_OPENCOMPLETE)) {
+ ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED);
+ xpc_arch_ops.send_chctl_opencomplete(ch, irq_flags);
+ }
+
+ if (!(ch->flags & XPC_C_ROPENCOMPLETE))
+ return;
dev_info(xpc_chan, "channel %d to partition %d connected\n",
ch->number, ch->partid);
- spin_unlock_irqrestore(&ch->lock, *irq_flags);
- xpc_create_kthreads(ch, 1, 0);
- spin_lock_irqsave(&ch->lock, *irq_flags);
+ ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
}
/*
@@ -96,7 +100,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
if (part->act_state == XPC_P_AS_DEACTIVATING) {
/* can't proceed until the other side disengages from us */
- if (xpc_partition_engaged(ch->partid))
+ if (xpc_arch_ops.partition_engaged(ch->partid))
return;
} else {
@@ -108,7 +112,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
if (!(ch->flags & XPC_C_CLOSEREPLY)) {
ch->flags |= XPC_C_CLOSEREPLY;
- xpc_send_chctl_closereply(ch, irq_flags);
+ xpc_arch_ops.send_chctl_closereply(ch, irq_flags);
}
if (!(ch->flags & XPC_C_RCLOSEREPLY))
@@ -118,7 +122,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
/* wake those waiting for notify completion */
if (atomic_read(&ch->n_to_notify) > 0) {
/* we do callout while holding ch->lock, callout can't block */
- xpc_notify_senders_of_disconnect(ch);
+ xpc_arch_ops.notify_senders_of_disconnect(ch);
}
/* both sides are disconnected now */
@@ -132,7 +136,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
/* it's now safe to free the channel's message queues */
- xpc_teardown_msg_structures(ch);
+ xpc_arch_ops.teardown_msg_structures(ch);
ch->func = NULL;
ch->key = NULL;
@@ -144,8 +148,9 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
/*
* Mark the channel disconnected and clear all other flags, including
- * XPC_C_SETUP (because of call to xpc_teardown_msg_structures()) but
- * not including XPC_C_WDISCONNECT (if it was set).
+ * XPC_C_SETUP (because of call to
+ * xpc_arch_ops.teardown_msg_structures()) but not including
+ * XPC_C_WDISCONNECT (if it was set).
*/
ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
@@ -184,6 +189,7 @@ xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
struct xpc_channel *ch = &part->channels[ch_number];
enum xp_retval reason;
enum xp_retval ret;
+ int create_kthread = 0;
spin_lock_irqsave(&ch->lock, irq_flags);
@@ -196,8 +202,7 @@ again:
* has had a chance to see that the channel is disconnected.
*/
ch->delayed_chctl_flags |= chctl_flags;
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
@@ -239,8 +244,7 @@ again:
XPC_CHCTL_CLOSEREQUEST;
spin_unlock(&part->chctl_lock);
}
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
XPC_SET_REASON(ch, 0, 0);
@@ -250,7 +254,8 @@ again:
ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
}
- chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY);
+ chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY |
+ XPC_CHCTL_OPENCOMPLETE);
/*
* The meaningful CLOSEREQUEST connection state fields are:
@@ -269,8 +274,7 @@ again:
XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
xpc_process_disconnect(ch, &irq_flags);
@@ -283,8 +287,7 @@ again:
if (ch->flags & XPC_C_DISCONNECTED) {
DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
@@ -299,8 +302,7 @@ again:
XPC_CHCTL_CLOSEREPLY;
spin_unlock(&part->chctl_lock);
}
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
ch->flags |= XPC_C_RCLOSEREPLY;
@@ -320,14 +322,12 @@ again:
if (part->act_state == XPC_P_AS_DEACTIVATING ||
(ch->flags & XPC_C_ROPENREQUEST)) {
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
XPC_C_OPENREQUEST)));
@@ -341,8 +341,7 @@ again:
*/
if (args->entry_size == 0 || args->local_nentries == 0) {
/* assume OPENREQUEST was delayed by mistake */
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
@@ -352,8 +351,7 @@ again:
if (args->entry_size != ch->entry_size) {
XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
&irq_flags);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
} else {
ch->entry_size = args->entry_size;
@@ -375,15 +373,13 @@ again:
args->local_msgqueue_pa, args->local_nentries,
args->remote_nentries, ch->partid, ch->number);
- if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
- }
+ if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
+ goto out;
+
if (!(ch->flags & XPC_C_OPENREQUEST)) {
XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
&irq_flags);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
@@ -400,11 +396,11 @@ again:
DBUG_ON(args->local_nentries == 0);
DBUG_ON(args->remote_nentries == 0);
- ret = xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa);
+ ret = xpc_arch_ops.save_remote_msgqueue_pa(ch,
+ args->local_msgqueue_pa);
if (ret != xpSuccess) {
XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
ch->flags |= XPC_C_ROPENREPLY;
@@ -430,7 +426,36 @@ again:
xpc_process_connect(ch, &irq_flags);
}
+ if (chctl_flags & XPC_CHCTL_OPENCOMPLETE) {
+
+ dev_dbg(xpc_chan, "XPC_CHCTL_OPENCOMPLETE received from "
+ "partid=%d, channel=%d\n", ch->partid, ch->number);
+
+ if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
+ goto out;
+
+ if (!(ch->flags & XPC_C_OPENREQUEST) ||
+ !(ch->flags & XPC_C_OPENREPLY)) {
+ XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
+ &irq_flags);
+ goto out;
+ }
+
+ DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
+ DBUG_ON(!(ch->flags & XPC_C_ROPENREPLY));
+ DBUG_ON(!(ch->flags & XPC_C_CONNECTED));
+
+ ch->flags |= XPC_C_ROPENCOMPLETE;
+
+ xpc_process_connect(ch, &irq_flags);
+ create_kthread = 1;
+ }
+
+out:
spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ if (create_kthread)
+ xpc_create_kthreads(ch, 1, 0);
}
/*
@@ -508,7 +533,7 @@ xpc_connect_channel(struct xpc_channel *ch)
/* initiate the connection */
ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
- xpc_send_chctl_openrequest(ch, &irq_flags);
+ xpc_arch_ops.send_chctl_openrequest(ch, &irq_flags);
xpc_process_connect(ch, &irq_flags);
@@ -526,7 +551,7 @@ xpc_process_sent_chctl_flags(struct xpc_partition *part)
int ch_number;
u32 ch_flags;
- chctl.all_flags = xpc_get_chctl_all_flags(part);
+ chctl.all_flags = xpc_arch_ops.get_chctl_all_flags(part);
/*
* Initiate channel connections for registered channels.
@@ -564,10 +589,6 @@ xpc_process_sent_chctl_flags(struct xpc_partition *part)
if (!(ch_flags & XPC_C_OPENREQUEST)) {
DBUG_ON(ch_flags & XPC_C_SETUP);
(void)xpc_connect_channel(ch);
- } else {
- spin_lock_irqsave(&ch->lock, irq_flags);
- xpc_process_connect(ch, &irq_flags);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
}
continue;
}
@@ -579,7 +600,7 @@ xpc_process_sent_chctl_flags(struct xpc_partition *part)
*/
if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
- xpc_process_msg_chctl_flags(part, ch_number);
+ xpc_arch_ops.process_msg_chctl_flags(part, ch_number);
}
}
@@ -755,7 +776,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
XPC_C_CONNECTING | XPC_C_CONNECTED);
- xpc_send_chctl_closerequest(ch, irq_flags);
+ xpc_arch_ops.send_chctl_closerequest(ch, irq_flags);
if (channel_was_connected)
ch->flags |= XPC_C_WASCONNECTED;
@@ -862,8 +883,8 @@ xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
DBUG_ON(payload == NULL);
if (xpc_part_ref(part)) {
- ret = xpc_send_payload(&part->channels[ch_number], flags,
- payload, payload_size, 0, NULL, NULL);
+ ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
+ flags, payload, payload_size, 0, NULL, NULL);
xpc_part_deref(part);
}
@@ -914,9 +935,8 @@ xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
DBUG_ON(func == NULL);
if (xpc_part_ref(part)) {
- ret = xpc_send_payload(&part->channels[ch_number], flags,
- payload, payload_size, XPC_N_CALL, func,
- key);
+ ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
+ flags, payload, payload_size, XPC_N_CALL, func, key);
xpc_part_deref(part);
}
return ret;
@@ -930,7 +950,7 @@ xpc_deliver_payload(struct xpc_channel *ch)
{
void *payload;
- payload = xpc_get_deliverable_payload(ch);
+ payload = xpc_arch_ops.get_deliverable_payload(ch);
if (payload != NULL) {
/*
@@ -984,7 +1004,7 @@ xpc_initiate_received(short partid, int ch_number, void *payload)
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
ch = &part->channels[ch_number];
- xpc_received_payload(ch, payload);
+ xpc_arch_ops.received_payload(ch, payload);
/* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */
xpc_msgqueue_deref(ch);
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 1ab9fda87fab..fd3688a3e23f 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
@@ -150,7 +150,6 @@ DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
static unsigned long xpc_hb_check_timeout;
static struct timer_list xpc_hb_timer;
-void *xpc_heartbeating_to_mask;
/* notification that the xpc_hb_checker thread has exited */
static DECLARE_COMPLETION(xpc_hb_checker_exited);
@@ -170,62 +169,7 @@ static struct notifier_block xpc_die_notifier = {
.notifier_call = xpc_system_die,
};
-int (*xpc_setup_partitions_sn) (void);
-void (*xpc_teardown_partitions_sn) (void);
-enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie,
- unsigned long *rp_pa,
- size_t *len);
-int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *rp);
-void (*xpc_heartbeat_init) (void);
-void (*xpc_heartbeat_exit) (void);
-void (*xpc_increment_heartbeat) (void);
-void (*xpc_offline_heartbeat) (void);
-void (*xpc_online_heartbeat) (void);
-enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *part);
-
-enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
-void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch);
-u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part);
-enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *ch);
-void (*xpc_teardown_msg_structures) (struct xpc_channel *ch);
-void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number);
-int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *ch);
-void *(*xpc_get_deliverable_payload) (struct xpc_channel *ch);
-
-void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp,
- unsigned long remote_rp_pa,
- int nasid);
-void (*xpc_request_partition_reactivation) (struct xpc_partition *part);
-void (*xpc_request_partition_deactivation) (struct xpc_partition *part);
-void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part);
-
-void (*xpc_process_activate_IRQ_rcvd) (void);
-enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *part);
-void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *part);
-
-void (*xpc_indicate_partition_engaged) (struct xpc_partition *part);
-int (*xpc_partition_engaged) (short partid);
-int (*xpc_any_partition_engaged) (void);
-void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part);
-void (*xpc_assume_partition_disengaged) (short partid);
-
-void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch,
- unsigned long *irq_flags);
-void (*xpc_send_chctl_closereply) (struct xpc_channel *ch,
- unsigned long *irq_flags);
-void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch,
- unsigned long *irq_flags);
-void (*xpc_send_chctl_openreply) (struct xpc_channel *ch,
- unsigned long *irq_flags);
-
-enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch,
- unsigned long msgqueue_pa);
-
-enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags,
- void *payload, u16 payload_size,
- u8 notify_type, xpc_notify_func func,
- void *key);
-void (*xpc_received_payload) (struct xpc_channel *ch, void *payload);
+struct xpc_arch_operations xpc_arch_ops;
/*
* Timer function to enforce the timelimit on the partition disengage.
@@ -240,7 +184,7 @@ xpc_timeout_partition_disengage(unsigned long data)
(void)xpc_partition_disengaged(part);
DBUG_ON(part->disengage_timeout != 0);
- DBUG_ON(xpc_partition_engaged(XPC_PARTID(part)));
+ DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part)));
}
/*
@@ -251,7 +195,7 @@ xpc_timeout_partition_disengage(unsigned long data)
static void
xpc_hb_beater(unsigned long dummy)
{
- xpc_increment_heartbeat();
+ xpc_arch_ops.increment_heartbeat();
if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
wake_up_interruptible(&xpc_activate_IRQ_wq);
@@ -263,7 +207,7 @@ xpc_hb_beater(unsigned long dummy)
static void
xpc_start_hb_beater(void)
{
- xpc_heartbeat_init();
+ xpc_arch_ops.heartbeat_init();
init_timer(&xpc_hb_timer);
xpc_hb_timer.function = xpc_hb_beater;
xpc_hb_beater(0);
@@ -273,7 +217,7 @@ static void
xpc_stop_hb_beater(void)
{
del_timer_sync(&xpc_hb_timer);
- xpc_heartbeat_exit();
+ xpc_arch_ops.heartbeat_exit();
}
/*
@@ -302,7 +246,7 @@ xpc_check_remote_hb(void)
continue;
}
- ret = xpc_get_remote_heartbeat(part);
+ ret = xpc_arch_ops.get_remote_heartbeat(part);
if (ret != xpSuccess)
XPC_DEACTIVATE_PARTITION(part, ret);
}
@@ -353,7 +297,7 @@ xpc_hb_checker(void *ignore)
force_IRQ = 0;
dev_dbg(xpc_part, "processing activate IRQs "
"received\n");
- xpc_process_activate_IRQ_rcvd();
+ xpc_arch_ops.process_activate_IRQ_rcvd();
}
/* wait for IRQ or timeout */
@@ -528,7 +472,7 @@ xpc_setup_ch_structures(struct xpc_partition *part)
init_waitqueue_head(&ch->idle_wq);
}
- ret = xpc_setup_ch_structures_sn(part);
+ ret = xpc_arch_ops.setup_ch_structures(part);
if (ret != xpSuccess)
goto out_2;
@@ -572,7 +516,7 @@ xpc_teardown_ch_structures(struct xpc_partition *part)
/* now we can begin tearing down the infrastructure */
- xpc_teardown_ch_structures_sn(part);
+ xpc_arch_ops.teardown_ch_structures(part);
kfree(part->remote_openclose_args_base);
part->remote_openclose_args = NULL;
@@ -620,12 +564,12 @@ xpc_activating(void *__partid)
dev_dbg(xpc_part, "activating partition %d\n", partid);
- xpc_allow_hb(partid);
+ xpc_arch_ops.allow_hb(partid);
if (xpc_setup_ch_structures(part) == xpSuccess) {
(void)xpc_part_ref(part); /* this will always succeed */
- if (xpc_make_first_contact(part) == xpSuccess) {
+ if (xpc_arch_ops.make_first_contact(part) == xpSuccess) {
xpc_mark_partition_active(part);
xpc_channel_mgr(part);
/* won't return until partition is deactivating */
@@ -635,12 +579,12 @@ xpc_activating(void *__partid)
xpc_teardown_ch_structures(part);
}
- xpc_disallow_hb(partid);
+ xpc_arch_ops.disallow_hb(partid);
xpc_mark_partition_inactive(part);
if (part->reason == xpReactivating) {
/* interrupting ourselves results in activating partition */
- xpc_request_partition_reactivation(part);
+ xpc_arch_ops.request_partition_reactivation(part);
}
return 0;
@@ -713,10 +657,13 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
static void
xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
{
+ int (*n_of_deliverable_payloads) (struct xpc_channel *) =
+ xpc_arch_ops.n_of_deliverable_payloads;
+
do {
/* deliver messages to their intended recipients */
- while (xpc_n_of_deliverable_payloads(ch) > 0 &&
+ while (n_of_deliverable_payloads(ch) > 0 &&
!(ch->flags & XPC_C_DISCONNECTING)) {
xpc_deliver_payload(ch);
}
@@ -732,7 +679,7 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
"wait_event_interruptible_exclusive()\n");
(void)wait_event_interruptible_exclusive(ch->idle_wq,
- (xpc_n_of_deliverable_payloads(ch) > 0 ||
+ (n_of_deliverable_payloads(ch) > 0 ||
(ch->flags & XPC_C_DISCONNECTING)));
atomic_dec(&ch->kthreads_idle);
@@ -749,6 +696,8 @@ xpc_kthread_start(void *args)
struct xpc_channel *ch;
int n_needed;
unsigned long irq_flags;
+ int (*n_of_deliverable_payloads) (struct xpc_channel *) =
+ xpc_arch_ops.n_of_deliverable_payloads;
dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
partid, ch_number);
@@ -777,7 +726,7 @@ xpc_kthread_start(void *args)
* additional kthreads to help deliver them. We only
* need one less than total #of messages to deliver.
*/
- n_needed = xpc_n_of_deliverable_payloads(ch) - 1;
+ n_needed = n_of_deliverable_payloads(ch) - 1;
if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
xpc_activate_kthreads(ch, n_needed);
@@ -805,7 +754,7 @@ xpc_kthread_start(void *args)
if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
atomic_dec_return(&part->nchannels_engaged) == 0) {
- xpc_indicate_partition_disengaged(part);
+ xpc_arch_ops.indicate_partition_disengaged(part);
}
xpc_msgqueue_deref(ch);
@@ -837,6 +786,8 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
struct xpc_partition *part = &xpc_partitions[ch->partid];
struct task_struct *kthread;
+ void (*indicate_partition_disengaged) (struct xpc_partition *) =
+ xpc_arch_ops.indicate_partition_disengaged;
while (needed-- > 0) {
@@ -858,7 +809,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
atomic_inc_return(&part->nchannels_engaged) == 1) {
- xpc_indicate_partition_engaged(part);
+ xpc_arch_ops.indicate_partition_engaged(part);
}
(void)xpc_part_ref(part);
xpc_msgqueue_ref(ch);
@@ -880,7 +831,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
atomic_dec_return(&part->nchannels_engaged) == 0) {
- xpc_indicate_partition_disengaged(part);
+ indicate_partition_disengaged(part);
}
xpc_msgqueue_deref(ch);
xpc_part_deref(part);
@@ -993,13 +944,13 @@ xpc_setup_partitions(void)
atomic_set(&part->references, 0);
}
- return xpc_setup_partitions_sn();
+ return xpc_arch_ops.setup_partitions();
}
static void
xpc_teardown_partitions(void)
{
- xpc_teardown_partitions_sn();
+ xpc_arch_ops.teardown_partitions();
kfree(xpc_partitions);
}
@@ -1055,7 +1006,7 @@ xpc_do_exit(enum xp_retval reason)
disengage_timeout = part->disengage_timeout;
}
- if (xpc_any_partition_engaged()) {
+ if (xpc_arch_ops.any_partition_engaged()) {
if (time_is_before_jiffies(printmsg_time)) {
dev_info(xpc_part, "waiting for remote "
"partitions to deactivate, timeout in "
@@ -1086,8 +1037,7 @@ xpc_do_exit(enum xp_retval reason)
} while (1);
- DBUG_ON(xpc_any_partition_engaged());
- DBUG_ON(xpc_any_hbs_allowed() != 0);
+ DBUG_ON(xpc_arch_ops.any_partition_engaged());
xpc_teardown_rsvd_page();
@@ -1152,15 +1102,15 @@ xpc_die_deactivate(void)
/* keep xpc_hb_checker thread from doing anything (just in case) */
xpc_exiting = 1;
- xpc_disallow_all_hbs(); /*indicate we're deactivated */
+ xpc_arch_ops.disallow_all_hbs(); /*indicate we're deactivated */
for (partid = 0; partid < xp_max_npartitions; partid++) {
part = &xpc_partitions[partid];
- if (xpc_partition_engaged(partid) ||
+ if (xpc_arch_ops.partition_engaged(partid) ||
part->act_state != XPC_P_AS_INACTIVE) {
- xpc_request_partition_deactivation(part);
- xpc_indicate_partition_disengaged(part);
+ xpc_arch_ops.request_partition_deactivation(part);
+ xpc_arch_ops.indicate_partition_disengaged(part);
}
}
@@ -1177,7 +1127,7 @@ xpc_die_deactivate(void)
wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
while (1) {
- any_engaged = xpc_any_partition_engaged();
+ any_engaged = xpc_arch_ops.any_partition_engaged();
if (!any_engaged) {
dev_info(xpc_part, "all partitions have deactivated\n");
break;
@@ -1186,7 +1136,7 @@ xpc_die_deactivate(void)
if (!keep_waiting--) {
for (partid = 0; partid < xp_max_npartitions;
partid++) {
- if (xpc_partition_engaged(partid)) {
+ if (xpc_arch_ops.partition_engaged(partid)) {
dev_info(xpc_part, "deactivate from "
"remote partition %d timed "
"out\n", partid);
@@ -1233,7 +1183,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
/* fall through */
case DIE_MCA_MONARCH_ENTER:
case DIE_INIT_MONARCH_ENTER:
- xpc_offline_heartbeat();
+ xpc_arch_ops.offline_heartbeat();
break;
case DIE_KDEBUG_LEAVE:
@@ -1244,7 +1194,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
/* fall through */
case DIE_MCA_MONARCH_LEAVE:
case DIE_INIT_MONARCH_LEAVE:
- xpc_online_heartbeat();
+ xpc_arch_ops.online_heartbeat();
break;
}
#else
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 6722f6fe4dc7..65877bc5edaa 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -70,6 +70,9 @@ xpc_get_rsvd_page_pa(int nasid)
size_t buf_len = 0;
void *buf = buf;
void *buf_base = NULL;
+ enum xp_retval (*get_partition_rsvd_page_pa)
+ (void *, u64 *, unsigned long *, size_t *) =
+ xpc_arch_ops.get_partition_rsvd_page_pa;
while (1) {
@@ -79,8 +82,7 @@ xpc_get_rsvd_page_pa(int nasid)
* ??? function or have two versions? Rename rp_pa for UV to
* ??? rp_gpa?
*/
- ret = xpc_get_partition_rsvd_page_pa(buf, &cookie, &rp_pa,
- &len);
+ ret = get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len);
dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, "
"address=0x%016lx, len=0x%016lx\n", ret,
@@ -172,7 +174,7 @@ xpc_setup_rsvd_page(void)
xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
- ret = xpc_setup_rsvd_page_sn(rp);
+ ret = xpc_arch_ops.setup_rsvd_page(rp);
if (ret != 0)
return ret;
@@ -264,7 +266,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
short partid = XPC_PARTID(part);
int disengaged;
- disengaged = !xpc_partition_engaged(partid);
+ disengaged = !xpc_arch_ops.partition_engaged(partid);
if (part->disengage_timeout) {
if (!disengaged) {
if (time_is_after_jiffies(part->disengage_timeout)) {
@@ -280,7 +282,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
dev_info(xpc_part, "deactivate request to remote "
"partition %d timed out\n", partid);
xpc_disengage_timedout = 1;
- xpc_assume_partition_disengaged(partid);
+ xpc_arch_ops.assume_partition_disengaged(partid);
disengaged = 1;
}
part->disengage_timeout = 0;
@@ -294,7 +296,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
if (part->act_state != XPC_P_AS_INACTIVE)
xpc_wakeup_channel_mgr(part);
- xpc_cancel_partition_deactivation_request(part);
+ xpc_arch_ops.cancel_partition_deactivation_request(part);
}
return disengaged;
}
@@ -339,7 +341,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
spin_unlock_irqrestore(&part->act_lock, irq_flags);
if (reason == xpReactivating) {
/* we interrupt ourselves to reactivate partition */
- xpc_request_partition_reactivation(part);
+ xpc_arch_ops.request_partition_reactivation(part);
}
return;
}
@@ -358,7 +360,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
spin_unlock_irqrestore(&part->act_lock, irq_flags);
/* ask remote partition to deactivate with regard to us */
- xpc_request_partition_deactivation(part);
+ xpc_arch_ops.request_partition_deactivation(part);
/* set a timelimit on the disengage phase of the deactivation request */
part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ);
@@ -496,7 +498,7 @@ xpc_discovery(void)
continue;
}
- xpc_request_partition_activation(remote_rp,
+ xpc_arch_ops.request_partition_activation(remote_rp,
remote_rp_pa, nasid);
}
}
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index eaaa964942de..915a3b495da5 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
@@ -60,14 +60,14 @@ static struct xpc_vars_sn2 *xpc_vars_sn2;
static struct xpc_vars_part_sn2 *xpc_vars_part_sn2;
static int
-xpc_setup_partitions_sn_sn2(void)
+xpc_setup_partitions_sn2(void)
{
/* nothing needs to be done */
return 0;
}
static void
-xpc_teardown_partitions_sn_sn2(void)
+xpc_teardown_partitions_sn2(void)
{
/* nothing needs to be done */
}
@@ -431,6 +431,13 @@ xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
}
static void
+xpc_send_chctl_opencomplete_sn2(struct xpc_channel *ch,
+ unsigned long *irq_flags)
+{
+ XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENCOMPLETE, irq_flags);
+}
+
+static void
xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch)
{
XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL);
@@ -621,7 +628,7 @@ xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa,
static int
-xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp)
+xpc_setup_rsvd_page_sn2(struct xpc_rsvd_page *rp)
{
struct amo *amos_page;
int i;
@@ -629,7 +636,7 @@ xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp)
xpc_vars_sn2 = XPC_RP_VARS(rp);
- rp->sn.vars_pa = xp_pa(xpc_vars_sn2);
+ rp->sn.sn2.vars_pa = xp_pa(xpc_vars_sn2);
/* vars_part array follows immediately after vars */
xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) +
@@ -693,6 +700,33 @@ xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp)
return 0;
}
+static int
+xpc_hb_allowed_sn2(short partid, void *heartbeating_to_mask)
+{
+ return test_bit(partid, heartbeating_to_mask);
+}
+
+static void
+xpc_allow_hb_sn2(short partid)
+{
+ DBUG_ON(xpc_vars_sn2 == NULL);
+ set_bit(partid, xpc_vars_sn2->heartbeating_to_mask);
+}
+
+static void
+xpc_disallow_hb_sn2(short partid)
+{
+ DBUG_ON(xpc_vars_sn2 == NULL);
+ clear_bit(partid, xpc_vars_sn2->heartbeating_to_mask);
+}
+
+static void
+xpc_disallow_all_hbs_sn2(void)
+{
+ DBUG_ON(xpc_vars_sn2 == NULL);
+ bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, xp_max_npartitions);
+}
+
static void
xpc_increment_heartbeat_sn2(void)
{
@@ -719,7 +753,6 @@ xpc_heartbeat_init_sn2(void)
DBUG_ON(xpc_vars_sn2 == NULL);
bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
- xpc_heartbeating_to_mask = &xpc_vars_sn2->heartbeating_to_mask[0];
xpc_online_heartbeat_sn2();
}
@@ -751,9 +784,9 @@ xpc_get_remote_heartbeat_sn2(struct xpc_partition *part)
remote_vars->heartbeating_to_mask[0]);
if ((remote_vars->heartbeat == part->last_heartbeat &&
- remote_vars->heartbeat_offline == 0) ||
- !xpc_hb_allowed(sn_partition_id,
- &remote_vars->heartbeating_to_mask)) {
+ !remote_vars->heartbeat_offline) ||
+ !xpc_hb_allowed_sn2(sn_partition_id,
+ remote_vars->heartbeating_to_mask)) {
ret = xpNoHeartbeat;
} else {
part->last_heartbeat = remote_vars->heartbeat;
@@ -972,7 +1005,7 @@ xpc_identify_activate_IRQ_req_sn2(int nasid)
return;
}
- remote_vars_pa = remote_rp->sn.vars_pa;
+ remote_vars_pa = remote_rp->sn.sn2.vars_pa;
remote_rp_version = remote_rp->version;
remote_rp_ts_jiffies = remote_rp->ts_jiffies;
@@ -1129,7 +1162,7 @@ xpc_process_activate_IRQ_rcvd_sn2(void)
* Setup the channel structures that are sn2 specific.
*/
static enum xp_retval
-xpc_setup_ch_structures_sn_sn2(struct xpc_partition *part)
+xpc_setup_ch_structures_sn2(struct xpc_partition *part)
{
struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
struct xpc_channel_sn2 *ch_sn2;
@@ -1251,7 +1284,7 @@ out_1:
* Teardown the channel structures that are sn2 specific.
*/
static void
-xpc_teardown_ch_structures_sn_sn2(struct xpc_partition *part)
+xpc_teardown_ch_structures_sn2(struct xpc_partition *part)
{
struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
short partid = XPC_PARTID(part);
@@ -2315,61 +2348,70 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
}
+static struct xpc_arch_operations xpc_arch_ops_sn2 = {
+ .setup_partitions = xpc_setup_partitions_sn2,
+ .teardown_partitions = xpc_teardown_partitions_sn2,
+ .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
+ .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2,
+ .setup_rsvd_page = xpc_setup_rsvd_page_sn2,
+
+ .allow_hb = xpc_allow_hb_sn2,
+ .disallow_hb = xpc_disallow_hb_sn2,
+ .disallow_all_hbs = xpc_disallow_all_hbs_sn2,
+ .increment_heartbeat = xpc_increment_heartbeat_sn2,
+ .offline_heartbeat = xpc_offline_heartbeat_sn2,
+ .online_heartbeat = xpc_online_heartbeat_sn2,
+ .heartbeat_init = xpc_heartbeat_init_sn2,
+ .heartbeat_exit = xpc_heartbeat_exit_sn2,
+ .get_remote_heartbeat = xpc_get_remote_heartbeat_sn2,
+
+ .request_partition_activation =
+ xpc_request_partition_activation_sn2,
+ .request_partition_reactivation =
+ xpc_request_partition_reactivation_sn2,
+ .request_partition_deactivation =
+ xpc_request_partition_deactivation_sn2,
+ .cancel_partition_deactivation_request =
+ xpc_cancel_partition_deactivation_request_sn2,
+
+ .setup_ch_structures = xpc_setup_ch_structures_sn2,
+ .teardown_ch_structures = xpc_teardown_ch_structures_sn2,
+
+ .make_first_contact = xpc_make_first_contact_sn2,
+
+ .get_chctl_all_flags = xpc_get_chctl_all_flags_sn2,
+ .send_chctl_closerequest = xpc_send_chctl_closerequest_sn2,
+ .send_chctl_closereply = xpc_send_chctl_closereply_sn2,
+ .send_chctl_openrequest = xpc_send_chctl_openrequest_sn2,
+ .send_chctl_openreply = xpc_send_chctl_openreply_sn2,
+ .send_chctl_opencomplete = xpc_send_chctl_opencomplete_sn2,
+ .process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2,
+
+ .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2,
+
+ .setup_msg_structures = xpc_setup_msg_structures_sn2,
+ .teardown_msg_structures = xpc_teardown_msg_structures_sn2,
+
+ .indicate_partition_engaged = xpc_indicate_partition_engaged_sn2,
+ .indicate_partition_disengaged = xpc_indicate_partition_disengaged_sn2,
+ .partition_engaged = xpc_partition_engaged_sn2,
+ .any_partition_engaged = xpc_any_partition_engaged_sn2,
+ .assume_partition_disengaged = xpc_assume_partition_disengaged_sn2,
+
+ .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2,
+ .send_payload = xpc_send_payload_sn2,
+ .get_deliverable_payload = xpc_get_deliverable_payload_sn2,
+ .received_payload = xpc_received_payload_sn2,
+ .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2,
+};
+
int
xpc_init_sn2(void)
{
int ret;
size_t buf_size;
- xpc_setup_partitions_sn = xpc_setup_partitions_sn_sn2;
- xpc_teardown_partitions_sn = xpc_teardown_partitions_sn_sn2;
- xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2;
- xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_sn2;
- xpc_increment_heartbeat = xpc_increment_heartbeat_sn2;
- xpc_offline_heartbeat = xpc_offline_heartbeat_sn2;
- xpc_online_heartbeat = xpc_online_heartbeat_sn2;
- xpc_heartbeat_init = xpc_heartbeat_init_sn2;
- xpc_heartbeat_exit = xpc_heartbeat_exit_sn2;
- xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_sn2;
-
- xpc_request_partition_activation = xpc_request_partition_activation_sn2;
- xpc_request_partition_reactivation =
- xpc_request_partition_reactivation_sn2;
- xpc_request_partition_deactivation =
- xpc_request_partition_deactivation_sn2;
- xpc_cancel_partition_deactivation_request =
- xpc_cancel_partition_deactivation_request_sn2;
-
- xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2;
- xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_sn2;
- xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_sn2;
- xpc_make_first_contact = xpc_make_first_contact_sn2;
-
- xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_sn2;
- xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_sn2;
- xpc_send_chctl_closereply = xpc_send_chctl_closereply_sn2;
- xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_sn2;
- xpc_send_chctl_openreply = xpc_send_chctl_openreply_sn2;
-
- xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2;
-
- xpc_setup_msg_structures = xpc_setup_msg_structures_sn2;
- xpc_teardown_msg_structures = xpc_teardown_msg_structures_sn2;
-
- xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2;
- xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2;
- xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2;
- xpc_get_deliverable_payload = xpc_get_deliverable_payload_sn2;
-
- xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_sn2;
- xpc_indicate_partition_disengaged =
- xpc_indicate_partition_disengaged_sn2;
- xpc_partition_engaged = xpc_partition_engaged_sn2;
- xpc_any_partition_engaged = xpc_any_partition_engaged_sn2;
- xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2;
-
- xpc_send_payload = xpc_send_payload_sn2;
- xpc_received_payload = xpc_received_payload_sn2;
+ xpc_arch_ops = xpc_arch_ops_sn2;
if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index f7fff4727edb..9172fcdee4e2 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -46,8 +46,7 @@ struct uv_IO_APIC_route_entry {
};
#endif
-static atomic64_t xpc_heartbeat_uv;
-static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
+static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
@@ -63,7 +62,7 @@ static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
static int
-xpc_setup_partitions_sn_uv(void)
+xpc_setup_partitions_uv(void)
{
short partid;
struct xpc_partition_uv *part_uv;
@@ -79,7 +78,7 @@ xpc_setup_partitions_sn_uv(void)
}
static void
-xpc_teardown_partitions_sn_uv(void)
+xpc_teardown_partitions_uv(void)
{
short partid;
struct xpc_partition_uv *part_uv;
@@ -423,41 +422,6 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
/* syncing of remote_act_state was just done above */
break;
- case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: {
- struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
-
- msg = container_of(msg_hdr,
- struct xpc_activate_mq_msg_heartbeat_req_uv,
- hdr);
- part_uv->heartbeat = msg->heartbeat;
- break;
- }
- case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: {
- struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
-
- msg = container_of(msg_hdr,
- struct xpc_activate_mq_msg_heartbeat_req_uv,
- hdr);
- part_uv->heartbeat = msg->heartbeat;
-
- spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
- part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV;
- spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
- break;
- }
- case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: {
- struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
-
- msg = container_of(msg_hdr,
- struct xpc_activate_mq_msg_heartbeat_req_uv,
- hdr);
- part_uv->heartbeat = msg->heartbeat;
-
- spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
- part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV;
- spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
- break;
- }
case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
struct xpc_activate_mq_msg_activate_req_uv *msg;
@@ -475,6 +439,7 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
+ part_uv->heartbeat_gpa = msg->heartbeat_gpa;
if (msg->activate_gru_mq_desc_gpa !=
part_uv->activate_gru_mq_desc_gpa) {
@@ -569,6 +534,17 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
xpc_wakeup_channel_mgr(part);
break;
}
+ case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
+ struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
+
+ msg = container_of(msg_hdr, struct
+ xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
+ spin_lock_irqsave(&part->chctl_lock, irq_flags);
+ part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
+ spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
+
+ xpc_wakeup_channel_mgr(part);
+ }
case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags |= XPC_P_ENGAGED_UV;
@@ -759,7 +735,7 @@ xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
/*
* !!! Make our side think that the remote partition sent an activate
- * !!! message our way by doing what the activate IRQ handler would
+ * !!! mq message our way by doing what the activate IRQ handler would
* !!! do had one really been sent.
*/
@@ -806,90 +782,82 @@ xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
}
static int
-xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp)
+xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
{
- rp->sn.activate_gru_mq_desc_gpa =
+ xpc_heartbeat_uv =
+ &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
+ rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
+ rp->sn.uv.activate_gru_mq_desc_gpa =
uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
return 0;
}
static void
-xpc_send_heartbeat_uv(int msg_type)
+xpc_allow_hb_uv(short partid)
{
- short partid;
- struct xpc_partition *part;
- struct xpc_activate_mq_msg_heartbeat_req_uv msg;
-
- /*
- * !!! On uv we're broadcasting a heartbeat message every 5 seconds.
- * !!! Whereas on sn2 we're bte_copy'ng the heartbeat info every 20
- * !!! seconds. This is an increase in numalink traffic.
- * ??? Is this good?
- */
-
- msg.heartbeat = atomic64_inc_return(&xpc_heartbeat_uv);
-
- partid = find_first_bit(xpc_heartbeating_to_mask_uv,
- XP_MAX_NPARTITIONS_UV);
-
- while (partid < XP_MAX_NPARTITIONS_UV) {
- part = &xpc_partitions[partid];
+}
- xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
- msg_type);
+static void
+xpc_disallow_hb_uv(short partid)
+{
+}
- partid = find_next_bit(xpc_heartbeating_to_mask_uv,
- XP_MAX_NPARTITIONS_UV, partid + 1);
- }
+static void
+xpc_disallow_all_hbs_uv(void)
+{
}
static void
xpc_increment_heartbeat_uv(void)
{
- xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV);
+ xpc_heartbeat_uv->value++;
}
static void
xpc_offline_heartbeat_uv(void)
{
- xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV);
+ xpc_increment_heartbeat_uv();
+ xpc_heartbeat_uv->offline = 1;
}
static void
xpc_online_heartbeat_uv(void)
{
- xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV);
+ xpc_increment_heartbeat_uv();
+ xpc_heartbeat_uv->offline = 0;
}
static void
xpc_heartbeat_init_uv(void)
{
- atomic64_set(&xpc_heartbeat_uv, 0);
- bitmap_zero(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
- xpc_heartbeating_to_mask = &xpc_heartbeating_to_mask_uv[0];
+ xpc_heartbeat_uv->value = 1;
+ xpc_heartbeat_uv->offline = 0;
}
static void
xpc_heartbeat_exit_uv(void)
{
- xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV);
+ xpc_offline_heartbeat_uv();
}
static enum xp_retval
xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
{
struct xpc_partition_uv *part_uv = &part->sn.uv;
- enum xp_retval ret = xpNoHeartbeat;
+ enum xp_retval ret;
- if (part_uv->remote_act_state != XPC_P_AS_INACTIVE &&
- part_uv->remote_act_state != XPC_P_AS_DEACTIVATING) {
+ ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
+ part_uv->heartbeat_gpa,
+ sizeof(struct xpc_heartbeat_uv));
+ if (ret != xpSuccess)
+ return ret;
- if (part_uv->heartbeat != part->last_heartbeat ||
- (part_uv->flags & XPC_P_HEARTBEAT_OFFLINE_UV)) {
+ if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
+ !part_uv->cached_heartbeat.offline) {
- part->last_heartbeat = part_uv->heartbeat;
- ret = xpSuccess;
- }
+ ret = xpNoHeartbeat;
+ } else {
+ part->last_heartbeat = part_uv->cached_heartbeat.value;
}
return ret;
}
@@ -904,8 +872,9 @@ xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
+ part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
part->sn.uv.activate_gru_mq_desc_gpa =
- remote_rp->sn.activate_gru_mq_desc_gpa;
+ remote_rp->sn.uv.activate_gru_mq_desc_gpa;
/*
* ??? Is it a good idea to make this conditional on what is
@@ -913,8 +882,9 @@ xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
*/
if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
msg.rp_gpa = uv_gpa(xpc_rsvd_page);
+ msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
msg.activate_gru_mq_desc_gpa =
- xpc_rsvd_page->sn.activate_gru_mq_desc_gpa;
+ xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
}
@@ -1010,7 +980,7 @@ xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
* Setup the channel structures that are uv specific.
*/
static enum xp_retval
-xpc_setup_ch_structures_sn_uv(struct xpc_partition *part)
+xpc_setup_ch_structures_uv(struct xpc_partition *part)
{
struct xpc_channel_uv *ch_uv;
int ch_number;
@@ -1029,7 +999,7 @@ xpc_setup_ch_structures_sn_uv(struct xpc_partition *part)
* Teardown the channel structures that are uv specific.
*/
static void
-xpc_teardown_ch_structures_sn_uv(struct xpc_partition *part)
+xpc_teardown_ch_structures_uv(struct xpc_partition *part)
{
/* nothing needs to be done */
return;
@@ -1243,6 +1213,16 @@ xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
}
static void
+xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+ struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
+
+ msg.ch_number = ch->number;
+ xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
+ XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
+}
+
+static void
xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
{
unsigned long irq_flags;
@@ -1669,58 +1649,67 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
msg->hdr.msg_slot_number += ch->remote_nentries;
}
+static struct xpc_arch_operations xpc_arch_ops_uv = {
+ .setup_partitions = xpc_setup_partitions_uv,
+ .teardown_partitions = xpc_teardown_partitions_uv,
+ .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
+ .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
+ .setup_rsvd_page = xpc_setup_rsvd_page_uv,
+
+ .allow_hb = xpc_allow_hb_uv,
+ .disallow_hb = xpc_disallow_hb_uv,
+ .disallow_all_hbs = xpc_disallow_all_hbs_uv,
+ .increment_heartbeat = xpc_increment_heartbeat_uv,
+ .offline_heartbeat = xpc_offline_heartbeat_uv,
+ .online_heartbeat = xpc_online_heartbeat_uv,
+ .heartbeat_init = xpc_heartbeat_init_uv,
+ .heartbeat_exit = xpc_heartbeat_exit_uv,
+ .get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
+
+ .request_partition_activation =
+ xpc_request_partition_activation_uv,
+ .request_partition_reactivation =
+ xpc_request_partition_reactivation_uv,
+ .request_partition_deactivation =
+ xpc_request_partition_deactivation_uv,
+ .cancel_partition_deactivation_request =
+ xpc_cancel_partition_deactivation_request_uv,
+
+ .setup_ch_structures = xpc_setup_ch_structures_uv,
+ .teardown_ch_structures = xpc_teardown_ch_structures_uv,
+
+ .make_first_contact = xpc_make_first_contact_uv,
+
+ .get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
+ .send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
+ .send_chctl_closereply = xpc_send_chctl_closereply_uv,
+ .send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
+ .send_chctl_openreply = xpc_send_chctl_openreply_uv,
+ .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
+ .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
+
+ .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
+
+ .setup_msg_structures = xpc_setup_msg_structures_uv,
+ .teardown_msg_structures = xpc_teardown_msg_structures_uv,
+
+ .indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
+ .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
+ .assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
+ .partition_engaged = xpc_partition_engaged_uv,
+ .any_partition_engaged = xpc_any_partition_engaged_uv,
+
+ .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
+ .send_payload = xpc_send_payload_uv,
+ .get_deliverable_payload = xpc_get_deliverable_payload_uv,
+ .received_payload = xpc_received_payload_uv,
+ .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
+};
+
int
xpc_init_uv(void)
{
- xpc_setup_partitions_sn = xpc_setup_partitions_sn_uv;
- xpc_teardown_partitions_sn = xpc_teardown_partitions_sn_uv;
- xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv;
- xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv;
- xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_uv;
- xpc_increment_heartbeat = xpc_increment_heartbeat_uv;
- xpc_offline_heartbeat = xpc_offline_heartbeat_uv;
- xpc_online_heartbeat = xpc_online_heartbeat_uv;
- xpc_heartbeat_init = xpc_heartbeat_init_uv;
- xpc_heartbeat_exit = xpc_heartbeat_exit_uv;
- xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_uv;
-
- xpc_request_partition_activation = xpc_request_partition_activation_uv;
- xpc_request_partition_reactivation =
- xpc_request_partition_reactivation_uv;
- xpc_request_partition_deactivation =
- xpc_request_partition_deactivation_uv;
- xpc_cancel_partition_deactivation_request =
- xpc_cancel_partition_deactivation_request_uv;
-
- xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_uv;
- xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_uv;
-
- xpc_make_first_contact = xpc_make_first_contact_uv;
-
- xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_uv;
- xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_uv;
- xpc_send_chctl_closereply = xpc_send_chctl_closereply_uv;
- xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_uv;
- xpc_send_chctl_openreply = xpc_send_chctl_openreply_uv;
-
- xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv;
-
- xpc_setup_msg_structures = xpc_setup_msg_structures_uv;
- xpc_teardown_msg_structures = xpc_teardown_msg_structures_uv;
-
- xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_uv;
- xpc_indicate_partition_disengaged =
- xpc_indicate_partition_disengaged_uv;
- xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_uv;
- xpc_partition_engaged = xpc_partition_engaged_uv;
- xpc_any_partition_engaged = xpc_any_partition_engaged_uv;
-
- xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv;
- xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv;
- xpc_send_payload = xpc_send_payload_uv;
- xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv;
- xpc_get_deliverable_payload = xpc_get_deliverable_payload_uv;
- xpc_received_payload = xpc_received_payload_uv;
+ xpc_arch_ops = xpc_arch_ops_uv;
if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index deb7b53167ee..83a12125b94e 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -2532,8 +2532,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- if ((pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) ||
- (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK) != 0)) {
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 9b75aa630062..30d0c81c989e 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1821,11 +1821,11 @@ static int __devinit be_probe(struct pci_dev *pdev,
be_msix_enable(adapter);
- status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+ status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!status) {
netdev->features |= NETIF_F_HIGHDMA;
} else {
- status = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (status) {
dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
goto free_netdev;
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index ece35040288c..621a7c0c46ba 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2591,13 +2591,13 @@ static int
jme_pci_dma64(struct pci_dev *pdev)
{
if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
- !pci_set_dma_mask(pdev, DMA_64BIT_MASK))
- if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+ if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
return 1;
if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
- !pci_set_dma_mask(pdev, DMA_40BIT_MASK))
- if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
+ if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)))
return 1;
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
diff --git a/drivers/net/wireless/ath9k/pci.c b/drivers/net/wireless/ath9k/pci.c
index 6dbc58580abb..168411d322a2 100644
--- a/drivers/net/wireless/ath9k/pci.c
+++ b/drivers/net/wireless/ath9k/pci.c
@@ -93,14 +93,14 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (pci_enable_device(pdev))
return -EIO;
- ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
goto bad;
}
- ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
printk(KERN_ERR "ath9k: 32-bit DMA consistent "
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index e3569a0a952d..b1610ea4bb3d 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -492,8 +492,8 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
goto err_disable_dev;
}
- if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
- pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
dev_err(&pdev->dev, "No suitable DMA available\n");
goto err_free_reg;
}
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 4fa3bb2ddfe4..33e5ade774ca 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -434,7 +434,8 @@ static void __init superio_parport_init(void)
0 /*base_hi*/,
PAR_IRQ,
PARPORT_DMA_NONE /* dma */,
- NULL /*struct pci_dev* */) )
+ NULL /*struct pci_dev* */),
+ 0 /* shared irq flags */ )
printk(KERN_WARNING PFX "Probing parallel port failed.\n");
#endif /* CONFIG_PARPORT_PC */
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 25a00ce4f24d..fa3a11365ec3 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -173,12 +173,21 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
struct dmar_drhd_unit *dmaru;
int ret = 0;
+ drhd = (struct acpi_dmar_hardware_unit *)header;
+ if (!drhd->address) {
+ /* Promote an attitude of violence to a BIOS engineer today */
+ WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ return -ENODEV;
+ }
dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
if (!dmaru)
return -ENOMEM;
dmaru->hdr = header;
- drhd = (struct acpi_dmar_hardware_unit *)header;
dmaru->reg_base_addr = drhd->address;
dmaru->segment = drhd->segment;
dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index fb3a3f3fca7a..001b328adf80 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -733,8 +733,8 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
start &= (((u64)1) << addr_width) - 1;
end &= (((u64)1) << addr_width) - 1;
/* in case it's partial page */
- start = PAGE_ALIGN(start);
- end &= PAGE_MASK;
+ start &= PAGE_MASK;
+ end = PAGE_ALIGN(end);
npages = (end - start) / VTD_PAGE_SIZE;
/* we don't need lock here, nobody else touches the iova range */
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index fdb14ec4fd47..8b7983aba8f7 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2234,10 +2234,10 @@ static int twa_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)
- || pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
- if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)
- || pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
retval = -ENODEV;
goto out_disable_device;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 280261c451d6..2a889853a106 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1378,7 +1378,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
if (dev->nondasd_support && !dev->in_reset)
printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
- if (dma_get_required_mask(&dev->pdev->dev) > DMA_32BIT_MASK)
+ if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32))
dev->needs_dac = 1;
dev->dac_support = 0;
if ((sizeof(dma_addr_t) > 4) && dev->needs_dac &&
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b1bd3fc7bae8..36fd2e75da1c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1394,7 +1394,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
*/
cmd->sense_buffer[8] = 0; /* Information */
cmd->sense_buffer[9] = 0xa; /* Add. length */
- do_div(bghm, cmd->device->sector_size);
+ bghm /= cmd->device->sector_size;
failing_sector = scsi_get_lba(cmd);
failing_sector += bghm;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 52427a8324f5..a91f5143ceac 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -855,9 +855,9 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
if (sizeof(dma_addr_t) > 4) {
const uint64_t required_mask =
dma_get_required_mask(&pdev->dev);
- if ((required_mask > DMA_32BIT_MASK) && !pci_set_dma_mask(pdev,
- DMA_64BIT_MASK) && !pci_set_consistent_dma_mask(pdev,
- DMA_64BIT_MASK)) {
+ if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev,
+ DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(64))) {
ioc->base_add_sg_single = &_base_add_sg_single_64;
ioc->sge_size = sizeof(Mpi2SGESimple64_t);
desc = "64";
@@ -865,8 +865,8 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
}
}
- if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)
- && !pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
ioc->base_add_sg_single = &_base_add_sg_single_32;
ioc->sge_size = sizeof(Mpi2SGESimple32_t);
desc = "32";
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index e1850904ff73..fbc83bebdd8e 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -38,9 +38,6 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
{ };
#endif
-/* scsi_scan.c */
-int scsi_complete_async_scans(void);
-
/* scsi_devinfo.c */
extern int scsi_get_device_flags(struct scsi_device *sdev,
const unsigned char *vendor,
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
index 8a636103083d..2f21af21269a 100644
--- a/drivers/scsi/scsi_wait_scan.c
+++ b/drivers/scsi/scsi_wait_scan.c
@@ -11,7 +11,7 @@
*/
#include <linux/module.h>
-#include "scsi_priv.h"
+#include <scsi/scsi_scan.h>
static int __init wait_scan_init(void)
{
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 7fb9b5c4669a..12d13d99b6f0 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -44,6 +44,7 @@ struct intc_handle_int {
struct intc_desc_int {
struct list_head list;
struct sys_device sysdev;
+ pm_message_t state;
unsigned long *reg;
#ifdef CONFIG_SMP
unsigned long *smp;
@@ -786,18 +787,44 @@ static int intc_suspend(struct sys_device *dev, pm_message_t state)
/* get intc controller associated with this sysdev */
d = container_of(dev, struct intc_desc_int, sysdev);
- /* enable wakeup irqs belonging to this intc controller */
- for_each_irq_desc(irq, desc) {
- if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
- intc_enable(irq);
+ switch (state.event) {
+ case PM_EVENT_ON:
+ if (d->state.event != PM_EVENT_FREEZE)
+ break;
+ for_each_irq_desc(irq, desc) {
+ if (desc->chip != &d->chip)
+ continue;
+ if (desc->status & IRQ_DISABLED)
+ intc_disable(irq);
+ else
+ intc_enable(irq);
+ }
+ break;
+ case PM_EVENT_FREEZE:
+ /* nothing has to be done */
+ break;
+ case PM_EVENT_SUSPEND:
+ /* enable wakeup irqs belonging to this intc controller */
+ for_each_irq_desc(irq, desc) {
+ if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
+ intc_enable(irq);
+ }
+ break;
}
+ d->state = state;
return 0;
}
+static int intc_resume(struct sys_device *dev)
+{
+ return intc_suspend(dev, PMSG_ON);
+}
+
static struct sysdev_class intc_sysdev_class = {
.name = "intc",
.suspend = intc_suspend,
+ .resume = intc_resume,
};
/* register this intc as sysdev to allow suspend/resume */
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 643908b74bc0..8eba98c8ed1e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -658,7 +658,7 @@ int spi_write_then_read(struct spi_device *spi,
int status;
struct spi_message message;
- struct spi_transfer x;
+ struct spi_transfer x[2];
u8 *local_buf;
/* Use preallocated DMA-safe buffer. We can't avoid copying here,
@@ -669,9 +669,15 @@ int spi_write_then_read(struct spi_device *spi,
return -EINVAL;
spi_message_init(&message);
- memset(&x, 0, sizeof x);
- x.len = n_tx + n_rx;
- spi_message_add_tail(&x, &message);
+ memset(x, 0, sizeof x);
+ if (n_tx) {
+ x[0].len = n_tx;
+ spi_message_add_tail(&x[0], &message);
+ }
+ if (n_rx) {
+ x[1].len = n_rx;
+ spi_message_add_tail(&x[1], &message);
+ }
/* ... unless someone else is using the pre-allocated buffer */
if (!mutex_trylock(&lock)) {
@@ -682,15 +688,15 @@ int spi_write_then_read(struct spi_device *spi,
local_buf = buf;
memcpy(local_buf, txbuf, n_tx);
- x.tx_buf = local_buf;
- x.rx_buf = local_buf;
+ x[0].tx_buf = local_buf;
+ x[1].rx_buf = local_buf + n_tx;
/* do the i/o */
status = spi_sync(spi, &message);
if (status == 0)
- memcpy(rxbuf, x.rx_buf + n_tx, n_rx);
+ memcpy(rxbuf, x[1].rx_buf, n_rx);
- if (x.tx_buf == buf)
+ if (x[0].tx_buf == buf)
mutex_unlock(&lock);
else
kfree(local_buf);
diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
index 0348072b3ab5..75ebe338c6f2 100644
--- a/drivers/staging/b3dfg/b3dfg.c
+++ b/drivers/staging/b3dfg/b3dfg.c
@@ -1000,7 +1000,7 @@ static int __devinit b3dfg_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- r = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ r = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (r) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
goto err_free_res;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 869d47cb6db3..0a69c0977e3f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -546,10 +546,6 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
tty->driver_data = acm;
acm->tty = tty;
- /* force low_latency on so that our tty_push actually forces the data through,
- otherwise it is scheduled, and with high data rates data can get lost. */
- tty->low_latency = 1;
-
if (usb_autopm_get_interface(acm->control) < 0)
goto early_bail;
else
diff --git a/drivers/usb/otg/nop-usb-xceiv.c b/drivers/usb/otg/nop-usb-xceiv.c
index 4b933f646f2e..c567168f89af 100644
--- a/drivers/usb/otg/nop-usb-xceiv.c
+++ b/drivers/usb/otg/nop-usb-xceiv.c
@@ -36,14 +36,14 @@ struct nop_usb_xceiv {
struct device *dev;
};
-static u64 nop_xceiv_dmamask = DMA_32BIT_MASK;
+static u64 nop_xceiv_dmamask = DMA_BIT_MASK(32);
static struct platform_device nop_xceiv_device = {
.name = "nop_usb_xceiv",
.id = -1,
.dev = {
.dma_mask = &nop_xceiv_dmamask,
- .coherent_dma_mask = DMA_32BIT_MASK,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = NULL,
},
};
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 2620bf6fe5e1..9c4c700c7cc6 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1215,20 +1215,22 @@ static void ti_bulk_in_callback(struct urb *urb)
}
tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- usb_serial_debug_data(debug, dev, __func__,
- urb->actual_length, urb->transfer_buffer);
-
- if (!tport->tp_is_open)
- dbg("%s - port closed, dropping data", __func__);
- else
- ti_recv(&urb->dev->dev, tty,
+ if (tty) {
+ if (urb->actual_length) {
+ usb_serial_debug_data(debug, dev, __func__,
+ urb->actual_length, urb->transfer_buffer);
+
+ if (!tport->tp_is_open)
+ dbg("%s - port closed, dropping data",
+ __func__);
+ else
+ ti_recv(&urb->dev->dev, tty,
urb->transfer_buffer,
urb->actual_length);
-
- spin_lock(&tport->tp_lock);
- tport->tp_icount.rx += urb->actual_length;
- spin_unlock(&tport->tp_lock);
+ spin_lock(&tport->tp_lock);
+ tport->tp_icount.rx += urb->actual_length;
+ spin_unlock(&tport->tp_lock);
+ }
tty_kref_put(tty);
}
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 16bb7e3c0310..6c37e8ee5efe 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -698,8 +698,8 @@ static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo)
found:
/*
* Some methods fail to retrieve SCLK and MCLK values, we apply default
- * settings in this case (200Mhz). If that really happne often, we could
- * fetch from registers instead...
+ * settings in this case (200Mhz). If that really happens often, we
+ * could fetch from registers instead...
*/
if (rinfo->pll.mclk == 0)
rinfo->pll.mclk = 20000;
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index dd37cbcaf8ce..157057c79ca3 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -35,8 +35,6 @@ static int fb_notifier_callback(struct notifier_block *self,
return 0;
bd = container_of(self, struct backlight_device, fb_notif);
- if (!lock_fb_info(evdata->info))
- return -ENODEV;
mutex_lock(&bd->ops_lock);
if (bd->ops)
if (!bd->ops->check_fb ||
@@ -49,7 +47,6 @@ static int fb_notifier_callback(struct notifier_block *self,
backlight_update_status(bd);
}
mutex_unlock(&bd->ops_lock);
- unlock_fb_info(evdata->info);
return 0;
}
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 0bb13df0fa89..b6449470106c 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -40,8 +40,6 @@ static int fb_notifier_callback(struct notifier_block *self,
if (!ld->ops)
return 0;
- if (!lock_fb_info(evdata->info))
- return -ENODEV;
mutex_lock(&ld->ops_lock);
if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) {
if (event == FB_EVENT_BLANK) {
@@ -53,7 +51,6 @@ static int fb_notifier_callback(struct notifier_block *self,
}
}
mutex_unlock(&ld->ops_lock);
- unlock_fb_info(evdata->info);
return 0;
}
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index d42e385f091c..4c2bf923418c 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -567,9 +567,7 @@ static int cirrusfb_check_var(struct fb_var_screeninfo *var,
default:
dev_dbg(info->device,
"Unsupported bpp size: %d\n", var->bits_per_pixel);
- assert(false);
- /* should never occur */
- break;
+ return -EINVAL;
}
if (var->xres_virtual < var->xres)
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 2cd500a304f2..471a9a60376a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -2263,9 +2263,12 @@ static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info,
}
+ if (!lock_fb_info(info))
+ return;
event.info = info;
event.data = &blank;
fb_notifier_call_chain(FB_EVENT_CONBLANK, &event);
+ unlock_fb_info(info);
}
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
@@ -2956,8 +2959,6 @@ static int fbcon_fb_unregistered(struct fb_info *info)
{
int i, idx;
- if (!lock_fb_info(info))
- return -ENODEV;
idx = info->node;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map[i] == idx)
@@ -2985,8 +2986,6 @@ static int fbcon_fb_unregistered(struct fb_info *info)
if (primary_device == idx)
primary_device = -1;
- unlock_fb_info(info);
-
if (!num_registered_fb)
unregister_con_driver(&fb_con);
@@ -3027,11 +3026,8 @@ static int fbcon_fb_registered(struct fb_info *info)
{
int ret = 0, i, idx;
- if (!lock_fb_info(info))
- return -ENODEV;
idx = info->node;
fbcon_select_primary(info);
- unlock_fb_info(info);
if (info_idx == -1) {
for (i = first_fb_vc; i <= last_fb_vc; i++) {
@@ -3152,53 +3148,23 @@ static int fbcon_event_notify(struct notifier_block *self,
switch(action) {
case FB_EVENT_SUSPEND:
- if (!lock_fb_info(info)) {
- ret = -ENODEV;
- goto done;
- }
fbcon_suspended(info);
- unlock_fb_info(info);
break;
case FB_EVENT_RESUME:
- if (!lock_fb_info(info)) {
- ret = -ENODEV;
- goto done;
- }
fbcon_resumed(info);
- unlock_fb_info(info);
break;
case FB_EVENT_MODE_CHANGE:
- if (!lock_fb_info(info)) {
- ret = -ENODEV;
- goto done;
- }
fbcon_modechanged(info);
- unlock_fb_info(info);
break;
case FB_EVENT_MODE_CHANGE_ALL:
- if (!lock_fb_info(info)) {
- ret = -ENODEV;
- goto done;
- }
fbcon_set_all_vcs(info);
- unlock_fb_info(info);
break;
case FB_EVENT_MODE_DELETE:
mode = event->data;
- if (!lock_fb_info(info)) {
- ret = -ENODEV;
- goto done;
- }
ret = fbcon_mode_deleted(info, mode);
- unlock_fb_info(info);
break;
case FB_EVENT_FB_UNBIND:
- if (!lock_fb_info(info)) {
- ret = -ENODEV;
- goto done;
- }
idx = info->node;
- unlock_fb_info(info);
ret = fbcon_fb_unbind(idx);
break;
case FB_EVENT_FB_REGISTERED:
@@ -3217,29 +3183,14 @@ static int fbcon_event_notify(struct notifier_block *self,
con2fb->framebuffer = con2fb_map[con2fb->console - 1];
break;
case FB_EVENT_BLANK:
- if (!lock_fb_info(info)) {
- ret = -ENODEV;
- goto done;
- }
fbcon_fb_blanked(info, *(int *)event->data);
- unlock_fb_info(info);
break;
case FB_EVENT_NEW_MODELIST:
- if (!lock_fb_info(info)) {
- ret = -ENODEV;
- goto done;
- }
fbcon_new_modelist(info);
- unlock_fb_info(info);
break;
case FB_EVENT_GET_REQ:
caps = event->data;
- if (!lock_fb_info(info)) {
- ret = -ENODEV;
- goto done;
- }
fbcon_get_requirement(info, caps);
- unlock_fb_info(info);
break;
}
done:
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 0c5b9a9fd56f..8dea2bc92705 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -210,12 +210,15 @@ static int __init efifb_probe(struct platform_device *dev)
unsigned int size_total;
int request_succeeded = 0;
- printk(KERN_INFO "efifb: probing for efifb\n");
-
if (!screen_info.lfb_depth)
screen_info.lfb_depth = 32;
if (!screen_info.pages)
screen_info.pages = 1;
+ if (!screen_info.lfb_base) {
+ printk(KERN_DEBUG "efifb: invalid framebuffer address\n");
+ return -ENODEV;
+ }
+ printk(KERN_INFO "efifb: probing for efifb\n");
/* just assume they're all unset if any are */
if (!screen_info.blue_size) {
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 2ac32e6b5953..d412a1ddc12f 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1097,8 +1097,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
return -EINVAL;
con2fb.framebuffer = -1;
event.data = &con2fb;
+ if (!lock_fb_info(info))
+ return -ENODEV;
event.info = info;
fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event);
+ unlock_fb_info(info);
ret = copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0;
break;
case FBIOPUT_CON2FBMAP:
@@ -1115,8 +1118,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
break;
}
event.data = &con2fb;
+ if (!lock_fb_info(info))
+ return -ENODEV;
event.info = info;
ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
+ unlock_fb_info(info);
break;
case FBIOBLANK:
if (!lock_fb_info(info))
@@ -1521,7 +1527,10 @@ register_framebuffer(struct fb_info *fb_info)
registered_fb[i] = fb_info;
event.info = fb_info;
+ if (!lock_fb_info(fb_info))
+ return -ENODEV;
fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
+ unlock_fb_info(fb_info);
return 0;
}
@@ -1555,8 +1564,12 @@ unregister_framebuffer(struct fb_info *fb_info)
goto done;
}
+
+ if (!lock_fb_info(fb_info))
+ return -ENODEV;
event.info = fb_info;
ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
+ unlock_fb_info(fb_info);
if (ret) {
ret = -EINVAL;
@@ -1590,6 +1603,8 @@ void fb_set_suspend(struct fb_info *info, int state)
{
struct fb_event event;
+ if (!lock_fb_info(info))
+ return;
event.info = info;
if (state) {
fb_notifier_call_chain(FB_EVENT_SUSPEND, &event);
@@ -1598,6 +1613,7 @@ void fb_set_suspend(struct fb_info *info, int state)
info->state = FBINFO_STATE_RUNNING;
fb_notifier_call_chain(FB_EVENT_RESUME, &event);
}
+ unlock_fb_info(info);
}
/**
@@ -1667,8 +1683,11 @@ int fb_new_modelist(struct fb_info *info)
err = 1;
if (!list_empty(&info->modelist)) {
+ if (!lock_fb_info(info))
+ return -ENODEV;
event.info = info;
err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
+ unlock_fb_info(info);
}
return err;
diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h
index a50bea614804..40984551c927 100644
--- a/drivers/video/intelfb/intelfb.h
+++ b/drivers/video/intelfb/intelfb.h
@@ -53,6 +53,7 @@
#define PCI_DEVICE_ID_INTEL_830M 0x3577
#define PCI_DEVICE_ID_INTEL_845G 0x2562
#define PCI_DEVICE_ID_INTEL_85XGM 0x3582
+#define PCI_DEVICE_ID_INTEL_854 0x358E
#define PCI_DEVICE_ID_INTEL_865G 0x2572
#define PCI_DEVICE_ID_INTEL_915G 0x2582
#define PCI_DEVICE_ID_INTEL_915GM 0x2592
@@ -154,6 +155,7 @@ enum intel_chips {
INTEL_85XGM,
INTEL_852GM,
INTEL_852GME,
+ INTEL_854,
INTEL_855GM,
INTEL_855GME,
INTEL_865G,
diff --git a/drivers/video/intelfb/intelfb_i2c.c b/drivers/video/intelfb/intelfb_i2c.c
index b3065492bb20..487f2be47460 100644
--- a/drivers/video/intelfb/intelfb_i2c.c
+++ b/drivers/video/intelfb/intelfb_i2c.c
@@ -156,6 +156,7 @@ void intelfb_create_i2c_busses(struct intelfb_info *dinfo)
switch(dinfo->chipset) {
case INTEL_830M:
case INTEL_845G:
+ case INTEL_854:
case INTEL_855GM:
case INTEL_865G:
dinfo->output[i].type = INTELFB_OUTPUT_DVO;
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 6d8e5415c809..ace14fe02fc4 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -182,6 +182,7 @@ static struct pci_device_id intelfb_pci_table[] __devinitdata = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_845G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_845G },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_85XGM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_85XGM },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_865G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_865G },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_854, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_854 },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915G },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915GM },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945G },
diff --git a/drivers/video/intelfb/intelfbhw.c b/drivers/video/intelfb/intelfbhw.c
index 8b26b27c2db6..0689f97c5238 100644
--- a/drivers/video/intelfb/intelfbhw.c
+++ b/drivers/video/intelfb/intelfbhw.c
@@ -84,6 +84,11 @@ int intelfbhw_get_chipset(struct pci_dev *pdev, struct intelfb_info *dinfo)
dinfo->mobile = 0;
dinfo->pll_index = PLLS_I8xx;
return 0;
+ case PCI_DEVICE_ID_INTEL_854:
+ dinfo->mobile = 1;
+ dinfo->name = "Intel(R) 854";
+ dinfo->chipset = INTEL_854;
+ return 0;
case PCI_DEVICE_ID_INTEL_85XGM:
tmp = 0;
dinfo->mobile = 1;
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 4dcec48a1d78..c3fad34309ed 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -45,11 +45,11 @@ struct s3fb_info {
static const struct svga_fb_format s3fb_formats[] = {
{ 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP4, FB_VISUAL_PSEUDOCOLOR, 8, 16},
- { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
+ { 4, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16},
- { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1,
+ { 4, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 1,
FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 8, 16},
- { 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
+ { 8, {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 4, 8},
{16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 4},
diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c
index fad58cf9ef73..10ddad8e17d6 100644
--- a/drivers/video/sa1100fb.c
+++ b/drivers/video/sa1100fb.c
@@ -199,16 +199,20 @@
extern void (*sa1100fb_backlight_power)(int on);
extern void (*sa1100fb_lcd_power)(int on);
-/*
- * IMHO this looks wrong. In 8BPP, length should be 8.
- */
-static struct sa1100fb_rgb rgb_8 = {
+static struct sa1100fb_rgb rgb_4 = {
.red = { .offset = 0, .length = 4, },
.green = { .offset = 0, .length = 4, },
.blue = { .offset = 0, .length = 4, },
.transp = { .offset = 0, .length = 0, },
};
+static struct sa1100fb_rgb rgb_8 = {
+ .red = { .offset = 0, .length = 8, },
+ .green = { .offset = 0, .length = 8, },
+ .blue = { .offset = 0, .length = 8, },
+ .transp = { .offset = 0, .length = 0, },
+};
+
static struct sa1100fb_rgb def_rgb_16 = {
.red = { .offset = 11, .length = 5, },
.green = { .offset = 5, .length = 6, },
@@ -613,7 +617,7 @@ sa1100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
DPRINTK("var->bits_per_pixel=%d\n", var->bits_per_pixel);
switch (var->bits_per_pixel) {
case 4:
- rgbidx = RGB_8;
+ rgbidx = RGB_4;
break;
case 8:
rgbidx = RGB_8;
@@ -1382,6 +1386,7 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev)
fbi->fb.monspecs = monspecs;
fbi->fb.pseudo_palette = (fbi + 1);
+ fbi->rgb[RGB_4] = &rgb_4;
fbi->rgb[RGB_8] = &rgb_8;
fbi->rgb[RGB_16] = &def_rgb_16;
diff --git a/drivers/video/sa1100fb.h b/drivers/video/sa1100fb.h
index 86831db9a042..1c3b459865d8 100644
--- a/drivers/video/sa1100fb.h
+++ b/drivers/video/sa1100fb.h
@@ -57,9 +57,10 @@ struct sa1100fb_lcd_reg {
unsigned long lccr3;
};
-#define RGB_8 (0)
-#define RGB_16 (1)
-#define NR_RGB 2
+#define RGB_4 (0)
+#define RGB_8 (1)
+#define RGB_16 (2)
+#define NR_RGB 3
struct sa1100fb_info {
struct fb_info fb;
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 346d6458cf76..7e17ee95a97a 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1129,7 +1129,7 @@ sisfb_bpp_to_var(struct sis_video_info *ivideo, struct fb_var_screeninfo *var)
switch(var->bits_per_pixel) {
case 8:
var->red.offset = var->green.offset = var->blue.offset = 0;
- var->red.length = var->green.length = var->blue.length = 6;
+ var->red.length = var->green.length = var->blue.length = 8;
break;
case 16:
var->red.offset = 11;
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index a439159204a8..89158bc71da2 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -308,9 +308,11 @@ static int xxxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
* color depth = SUM(var->{color}.length)
*
* Pseudocolor:
- * var->{color}.offset is 0
- * var->{color}.length contains width of DAC or the number of unique
- * colors available (color depth)
+ * var->{color}.offset is 0 unless the palette index takes less than
+ * bits_per_pixel bits and is stored in the upper
+ * bits of the pixel value
+ * var->{color}.length is set so that 1 << length is the number of
+ * available palette entries
* pseudo_palette is not used
* RAMDAC[X] is programmed to (red, green, blue)
* color depth = var->{color}.length
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 0b370aebdbfd..421770b5e6ab 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -55,6 +55,7 @@ static u16 maxvf __devinitdata; /* maximum vertical frequency */
static u16 maxhf __devinitdata; /* maximum horizontal frequency */
static u16 vbemode __devinitdata; /* force use of a specific VBE mode */
static char *mode_option __devinitdata;
+static u8 dac_width = 6;
static struct uvesafb_ktask *uvfb_tasks[UVESAFB_TASKS_MAX];
static DEFINE_MUTEX(uvfb_lock);
@@ -303,22 +304,10 @@ static void uvesafb_setup_var(struct fb_var_screeninfo *var,
var->blue.offset = 0;
var->transp.offset = 0;
- /*
- * We're assuming that we can switch the DAC to 8 bits. If
- * this proves to be incorrect, we'll update the fields
- * later in set_par().
- */
- if (par->vbe_ib.capabilities & VBE_CAP_CAN_SWITCH_DAC) {
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 0;
- } else {
- var->red.length = 6;
- var->green.length = 6;
- var->blue.length = 6;
- var->transp.length = 0;
- }
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 0;
}
}
@@ -1006,7 +995,7 @@ static int uvesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
struct fb_info *info)
{
struct uvesafb_pal_entry entry;
- int shift = 16 - info->var.green.length;
+ int shift = 16 - dac_width;
int err = 0;
if (regno >= info->cmap.len)
@@ -1055,7 +1044,7 @@ static int uvesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct uvesafb_pal_entry *entries;
- int shift = 16 - info->var.green.length;
+ int shift = 16 - dac_width;
int i, err = 0;
if (info->var.bits_per_pixel == 8) {
@@ -1317,13 +1306,9 @@ setmode:
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f ||
((task->t.regs.ebx & 0xff00) >> 8) != 8) {
- /*
- * We've failed to set the DAC palette format -
- * time to correct var.
- */
- info->var.red.length = 6;
- info->var.green.length = 6;
- info->var.blue.length = 6;
+ dac_width = 6;
+ } else {
+ dac_width = 8;
}
}
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index cc919ae46571..050d432c7d95 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -318,13 +318,16 @@ static int vfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
* {hardwarespecific} contains width of RAMDAC
* cmap[X] is programmed to (X << red.offset) | (X << green.offset) | (X << blue.offset)
* RAMDAC[X] is programmed to (red, green, blue)
- *
+ *
* Pseudocolor:
- * uses offset = 0 && length = RAMDAC register width.
- * var->{color}.offset is 0
- * var->{color}.length contains widht of DAC
+ * var->{color}.offset is 0 unless the palette index takes less than
+ * bits_per_pixel bits and is stored in the upper
+ * bits of the pixel value
+ * var->{color}.length is set so that 1 << length is the number of available
+ * palette entries
* cmap is not used
* RAMDAC[X] is programmed to (red, green, blue)
+ *
* Truecolor:
* does not use DAC. Usually 3 are present.
* var->{color}.offset contains start of bitfield
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 5f54c01c1568..bdfd584ad853 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -21,29 +21,41 @@ static void disable_hotplug_cpu(int cpu)
set_cpu_present(cpu, false);
}
-static void vcpu_hotplug(unsigned int cpu)
+static int vcpu_online(unsigned int cpu)
{
int err;
char dir[32], state[32];
- if (!cpu_possible(cpu))
- return;
-
sprintf(dir, "cpu/%u", cpu);
err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
if (err != 1) {
printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
- return;
+ return err;
}
- if (strcmp(state, "online") == 0) {
+ if (strcmp(state, "online") == 0)
+ return 1;
+ else if (strcmp(state, "offline") == 0)
+ return 0;
+
+ printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n", state, cpu);
+ return -EINVAL;
+}
+static void vcpu_hotplug(unsigned int cpu)
+{
+ if (!cpu_possible(cpu))
+ return;
+
+ switch (vcpu_online(cpu)) {
+ case 1:
enable_hotplug_cpu(cpu);
- } else if (strcmp(state, "offline") == 0) {
+ break;
+ case 0:
(void)cpu_down(cpu);
disable_hotplug_cpu(cpu);
- } else {
- printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
- state, cpu);
+ break;
+ default:
+ break;
}
}
@@ -64,12 +76,20 @@ static void handle_vcpu_hotplug_event(struct xenbus_watch *watch,
static int setup_cpu_watcher(struct notifier_block *notifier,
unsigned long event, void *data)
{
+ int cpu;
static struct xenbus_watch cpu_watch = {
.node = "cpu",
.callback = handle_vcpu_hotplug_event};
(void)register_xenbus_watch(&cpu_watch);
+ for_each_possible_cpu(cpu) {
+ if (vcpu_online(cpu) == 0) {
+ (void)cpu_down(cpu);
+ cpu_clear(cpu, cpu_present_map);
+ }
+ }
+
return NOTIFY_DONE;
}
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 0d61db1e7b49..4b5b84837ee1 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -62,14 +62,15 @@ static int xen_suspend(void *data)
gnttab_resume();
xen_mm_unpin_all();
- sysdev_resume();
-
if (!*cancelled) {
xen_irq_resume();
xen_console_resume();
xen_timer_resume();
}
+ sysdev_resume();
+ device_power_up(PMSG_RESUME);
+
return 0;
}
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index b43b95563663..acf678831103 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -590,9 +590,8 @@ static int ext2_get_blocks(struct inode *inode,
if (depth == 0)
return (err);
-reread:
- partial = ext2_get_branch(inode, depth, offsets, chain, &err);
+ partial = ext2_get_branch(inode, depth, offsets, chain, &err);
/* Simplest case - block found, no allocation needed */
if (!partial) {
first_block = le32_to_cpu(chain[depth - 1].key);
@@ -602,15 +601,16 @@ reread:
while (count < maxblocks && count <= blocks_to_boundary) {
ext2_fsblk_t blk;
- if (!verify_chain(chain, partial)) {
+ if (!verify_chain(chain, chain + depth - 1)) {
/*
* Indirect block might be removed by
* truncate while we were reading it.
* Handling of that case: forget what we've
* got now, go to reread.
*/
+ err = -EAGAIN;
count = 0;
- goto changed;
+ break;
}
blk = le32_to_cpu(*(chain[depth-1].p + count));
if (blk == first_block + count)
@@ -618,7 +618,8 @@ reread:
else
break;
}
- goto got_it;
+ if (err != -EAGAIN)
+ goto got_it;
}
/* Next simple case - plain lookup or failed read of indirect block */
@@ -626,6 +627,33 @@ reread:
goto cleanup;
mutex_lock(&ei->truncate_mutex);
+ /*
+ * If the indirect block is missing while we are reading
+ * the chain(ext3_get_branch() returns -EAGAIN err), or
+ * if the chain has been changed after we grab the semaphore,
+ * (either because another process truncated this branch, or
+ * another get_block allocated this branch) re-grab the chain to see if
+ * the request block has been allocated or not.
+ *
+ * Since we already block the truncate/other get_block
+ * at this point, we will have the current copy of the chain when we
+ * splice the branch into the tree.
+ */
+ if (err == -EAGAIN || !verify_chain(chain, partial)) {
+ while (partial > chain) {
+ brelse(partial->bh);
+ partial--;
+ }
+ partial = ext2_get_branch(inode, depth, offsets, chain, &err);
+ if (!partial) {
+ count++;
+ mutex_unlock(&ei->truncate_mutex);
+ if (err)
+ goto cleanup;
+ clear_buffer_new(bh_result);
+ goto got_it;
+ }
+ }
/*
* Okay, we need to do block allocation. Lazily initialize the block
@@ -683,12 +711,6 @@ cleanup:
partial--;
}
return err;
-changed:
- while (partial > chain) {
- brelse(partial->bh);
- partial--;
- }
- goto reread;
}
int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 9435dda8f1e0..a1cbff2b4d99 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -70,6 +70,10 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
BUG();
return 0;
}
+
+ if (!tree)
+ return 0;
+
if (tree->node_size >= PAGE_CACHE_SIZE) {
nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
spin_lock(&tree->hash_lock);
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 36ca2e1a4fa3..7b6165f25fbe 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -349,6 +349,7 @@ void hfs_mdb_put(struct super_block *sb)
if (HFS_SB(sb)->nls_disk)
unload_nls(HFS_SB(sb)->nls_disk);
+ free_pages((unsigned long)HFS_SB(sb)->bitmap, PAGE_SIZE < 8192 ? 1 : 0);
kfree(HFS_SB(sb));
sb->s_fs_info = NULL;
}
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index c7bd649bbbdc..3e9afc2a91d2 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -55,6 +55,25 @@
* need do nothing.
* RevokeValid set, Revoked set:
* buffer has been revoked.
+ *
+ * Locking rules:
+ * We keep two hash tables of revoke records. One hashtable belongs to the
+ * running transaction (is pointed to by journal->j_revoke), the other one
+ * belongs to the committing transaction. Accesses to the second hash table
+ * happen only from the kjournald and no other thread touches this table. Also
+ * journal_switch_revoke_table() which switches which hashtable belongs to the
+ * running and which to the committing transaction is called only from
+ * kjournald. Therefore we need no locks when accessing the hashtable belonging
+ * to the committing transaction.
+ *
+ * All users operating on the hash table belonging to the running transaction
+ * have a handle to the transaction. Therefore they are safe from kjournald
+ * switching hash tables under them. For operations on the lists of entries in
+ * the hash table j_revoke_lock is used.
+ *
+ * Finally, also replay code uses the hash tables but at this moment noone else
+ * can touch them (filesystem isn't mounted yet) and hence no locking is
+ * needed.
*/
#ifndef __KERNEL__
@@ -402,8 +421,6 @@ int journal_revoke(handle_t *handle, unsigned long blocknr,
* the second time we would still have a pending revoke to cancel. So,
* do not trust the Revoked bit on buffers unless RevokeValid is also
* set.
- *
- * The caller must have the journal locked.
*/
int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
{
@@ -481,10 +498,7 @@ void journal_switch_revoke_table(journal_t *journal)
/*
* Write revoke records to the journal for all entries in the current
* revoke hash, deleting the entries as we go.
- *
- * Called with the journal lock held.
*/
-
void journal_write_revoke_records(journal_t *journal,
transaction_t *transaction)
{
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c13f67300fe7..7ec89fc05b2b 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -153,23 +153,6 @@ xfs_find_bdev_for_inode(
}
/*
- * Schedule IO completion handling on a xfsdatad if this was
- * the final hold on this ioend. If we are asked to wait,
- * flush the workqueue.
- */
-STATIC void
-xfs_finish_ioend(
- xfs_ioend_t *ioend,
- int wait)
-{
- if (atomic_dec_and_test(&ioend->io_remaining)) {
- queue_work(xfsdatad_workqueue, &ioend->io_work);
- if (wait)
- flush_workqueue(xfsdatad_workqueue);
- }
-}
-
-/*
* We're now finished for good with this ioend structure.
* Update the page state via the associated buffer_heads,
* release holds on the inode and bio, and finally free
@@ -310,6 +293,27 @@ xfs_end_bio_read(
}
/*
+ * Schedule IO completion handling on a xfsdatad if this was
+ * the final hold on this ioend. If we are asked to wait,
+ * flush the workqueue.
+ */
+STATIC void
+xfs_finish_ioend(
+ xfs_ioend_t *ioend,
+ int wait)
+{
+ if (atomic_dec_and_test(&ioend->io_remaining)) {
+ struct workqueue_struct *wq = xfsdatad_workqueue;
+ if (ioend->io_work.func == xfs_end_bio_unwritten)
+ wq = xfsconvertd_workqueue;
+
+ queue_work(wq, &ioend->io_work);
+ if (wait)
+ flush_workqueue(wq);
+ }
+}
+
+/*
* Allocate and initialise an IO completion structure.
* We need to track unwritten extent write completion here initially.
* We'll need to extend this for updating the ondisk inode size later
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 1dd528849755..221b3e66ceef 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -19,6 +19,7 @@
#define __XFS_AOPS_H__
extern struct workqueue_struct *xfsdatad_workqueue;
+extern struct workqueue_struct *xfsconvertd_workqueue;
extern mempool_t *xfs_ioend_pool;
/*
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index aa1016bb9134..e28800a9f2b5 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -51,6 +51,7 @@ static struct shrinker xfs_buf_shake = {
static struct workqueue_struct *xfslogd_workqueue;
struct workqueue_struct *xfsdatad_workqueue;
+struct workqueue_struct *xfsconvertd_workqueue;
#ifdef XFS_BUF_TRACE
void
@@ -1775,6 +1776,7 @@ xfs_flush_buftarg(
xfs_buf_t *bp, *n;
int pincount = 0;
+ xfs_buf_runall_queues(xfsconvertd_workqueue);
xfs_buf_runall_queues(xfsdatad_workqueue);
xfs_buf_runall_queues(xfslogd_workqueue);
@@ -1831,9 +1833,15 @@ xfs_buf_init(void)
if (!xfsdatad_workqueue)
goto out_destroy_xfslogd_workqueue;
+ xfsconvertd_workqueue = create_workqueue("xfsconvertd");
+ if (!xfsconvertd_workqueue)
+ goto out_destroy_xfsdatad_workqueue;
+
register_shrinker(&xfs_buf_shake);
return 0;
+ out_destroy_xfsdatad_workqueue:
+ destroy_workqueue(xfsdatad_workqueue);
out_destroy_xfslogd_workqueue:
destroy_workqueue(xfslogd_workqueue);
out_free_buf_zone:
@@ -1849,6 +1857,7 @@ void
xfs_buf_terminate(void)
{
unregister_shrinker(&xfs_buf_shake);
+ destroy_workqueue(xfsconvertd_workqueue);
destroy_workqueue(xfsdatad_workqueue);
destroy_workqueue(xfslogd_workqueue);
kmem_zone_destroy(xfs_buf_zone);
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 5aeb77776961..08be36d7326c 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -74,14 +74,14 @@ xfs_flush_pages(
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
xfs_iflags_clear(ip, XFS_ITRUNCATED);
- ret = filemap_fdatawrite(mapping);
- if (flags & XFS_B_ASYNC)
- return -ret;
- ret2 = filemap_fdatawait(mapping);
- if (!ret)
- ret = ret2;
+ ret = -filemap_fdatawrite(mapping);
}
- return -ret;
+ if (flags & XFS_B_ASYNC)
+ return ret;
+ ret2 = xfs_wait_on_pages(ip, first, last);
+ if (!ret)
+ ret = ret2;
+ return ret;
}
int
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 7e90daa0d1d1..9142192ccbe6 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -751,10 +751,26 @@ start:
goto relock;
}
} else {
+ int enospc = 0;
+ ssize_t ret2 = 0;
+
+write_retry:
xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs,
*offset, ioflags);
- ret = generic_file_buffered_write(iocb, iovp, segs,
+ ret2 = generic_file_buffered_write(iocb, iovp, segs,
pos, offset, count, ret);
+ /*
+ * if we just got an ENOSPC, flush the inode now we
+ * aren't holding any page locks and retry *once*
+ */
+ if (ret2 == -ENOSPC && !enospc) {
+ error = xfs_flush_pages(xip, 0, -1, 0, FI_NONE);
+ if (error)
+ goto out_unlock_internal;
+ enospc = 1;
+ goto write_retry;
+ }
+ ret = ret2;
}
current->backing_dev_info = NULL;
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index a608e72fa405..f7ba76633c29 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -62,12 +62,6 @@ xfs_sync_inodes_ag(
uint32_t first_index = 0;
int error = 0;
int last_error = 0;
- int fflag = XFS_B_ASYNC;
-
- if (flags & SYNC_DELWRI)
- fflag = XFS_B_DELWRI;
- if (flags & SYNC_WAIT)
- fflag = 0; /* synchronous overrides all */
do {
struct inode *inode;
@@ -128,11 +122,23 @@ xfs_sync_inodes_ag(
* If we have to flush data or wait for I/O completion
* we need to hold the iolock.
*/
- if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) {
- xfs_ilock(ip, XFS_IOLOCK_SHARED);
- lock_flags |= XFS_IOLOCK_SHARED;
- error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE);
- if (flags & SYNC_IOWAIT)
+ if (flags & SYNC_DELWRI) {
+ if (VN_DIRTY(inode)) {
+ if (flags & SYNC_TRYLOCK) {
+ if (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
+ lock_flags |= XFS_IOLOCK_SHARED;
+ } else {
+ xfs_ilock(ip, XFS_IOLOCK_SHARED);
+ lock_flags |= XFS_IOLOCK_SHARED;
+ }
+ if (lock_flags & XFS_IOLOCK_SHARED) {
+ error = xfs_flush_pages(ip, 0, -1,
+ (flags & SYNC_WAIT) ? 0
+ : XFS_B_ASYNC,
+ FI_NONE);
+ }
+ }
+ if (VN_CACHED(inode) && (flags & SYNC_IOWAIT))
xfs_ioend_wait(ip);
}
xfs_ilock(ip, XFS_ILOCK_SHARED);
@@ -398,15 +404,17 @@ STATIC void
xfs_syncd_queue_work(
struct xfs_mount *mp,
void *data,
- void (*syncer)(struct xfs_mount *, void *))
+ void (*syncer)(struct xfs_mount *, void *),
+ struct completion *completion)
{
- struct bhv_vfs_sync_work *work;
+ struct xfs_sync_work *work;
- work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
+ work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
INIT_LIST_HEAD(&work->w_list);
work->w_syncer = syncer;
work->w_data = data;
work->w_mount = mp;
+ work->w_completion = completion;
spin_lock(&mp->m_sync_lock);
list_add_tail(&work->w_list, &mp->m_sync_list);
spin_unlock(&mp->m_sync_lock);
@@ -420,49 +428,26 @@ xfs_syncd_queue_work(
* heads, looking about for more room...
*/
STATIC void
-xfs_flush_inode_work(
- struct xfs_mount *mp,
- void *arg)
-{
- struct inode *inode = arg;
- filemap_flush(inode->i_mapping);
- iput(inode);
-}
-
-void
-xfs_flush_inode(
- xfs_inode_t *ip)
-{
- struct inode *inode = VFS_I(ip);
-
- igrab(inode);
- xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
- delay(msecs_to_jiffies(500));
-}
-
-/*
- * This is the "bigger hammer" version of xfs_flush_inode_work...
- * (IOW, "If at first you don't succeed, use a Bigger Hammer").
- */
-STATIC void
-xfs_flush_device_work(
+xfs_flush_inodes_work(
struct xfs_mount *mp,
void *arg)
{
struct inode *inode = arg;
- sync_blockdev(mp->m_super->s_bdev);
+ xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK);
+ xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK | SYNC_IOWAIT);
iput(inode);
}
void
-xfs_flush_device(
+xfs_flush_inodes(
xfs_inode_t *ip)
{
struct inode *inode = VFS_I(ip);
+ DECLARE_COMPLETION_ONSTACK(completion);
igrab(inode);
- xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
- delay(msecs_to_jiffies(500));
+ xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
+ wait_for_completion(&completion);
xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
}
@@ -497,7 +482,7 @@ xfssyncd(
{
struct xfs_mount *mp = arg;
long timeleft;
- bhv_vfs_sync_work_t *work, *n;
+ xfs_sync_work_t *work, *n;
LIST_HEAD (tmp);
set_freezable();
@@ -532,6 +517,8 @@ xfssyncd(
list_del(&work->w_list);
if (work == &mp->m_sync_work)
continue;
+ if (work->w_completion)
+ complete(work->w_completion);
kmem_free(work);
}
}
@@ -545,6 +532,7 @@ xfs_syncd_init(
{
mp->m_sync_work.w_syncer = xfs_sync_worker;
mp->m_sync_work.w_mount = mp;
+ mp->m_sync_work.w_completion = NULL;
mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
if (IS_ERR(mp->m_sync_task))
return -PTR_ERR(mp->m_sync_task);
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index 04f058c848ae..308d5bf6dfbd 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -21,18 +21,20 @@
struct xfs_mount;
struct xfs_perag;
-typedef struct bhv_vfs_sync_work {
+typedef struct xfs_sync_work {
struct list_head w_list;
struct xfs_mount *w_mount;
void *w_data; /* syncer routine argument */
void (*w_syncer)(struct xfs_mount *, void *);
-} bhv_vfs_sync_work_t;
+ struct completion *w_completion;
+} xfs_sync_work_t;
#define SYNC_ATTR 0x0001 /* sync attributes */
#define SYNC_DELWRI 0x0002 /* look at delayed writes */
#define SYNC_WAIT 0x0004 /* wait for i/o to complete */
#define SYNC_BDFLUSH 0x0008 /* BDFLUSH is calling -- don't block */
#define SYNC_IOWAIT 0x0010 /* wait for all I/O to complete */
+#define SYNC_TRYLOCK 0x0020 /* only try to lock inodes */
int xfs_syncd_init(struct xfs_mount *mp);
void xfs_syncd_stop(struct xfs_mount *mp);
@@ -43,8 +45,7 @@ int xfs_sync_fsdata(struct xfs_mount *mp, int flags);
int xfs_quiesce_data(struct xfs_mount *mp);
void xfs_quiesce_attr(struct xfs_mount *mp);
-void xfs_flush_inode(struct xfs_inode *ip);
-void xfs_flush_device(struct xfs_inode *ip);
+void xfs_flush_inodes(struct xfs_inode *ip);
int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode);
int xfs_reclaim_inodes(struct xfs_mount *mp, int noblock, int mode);
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 478e587087fe..89b81eedce6a 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -69,15 +69,6 @@ xfs_inode_alloc(
ASSERT(!spin_is_locked(&ip->i_flags_lock));
ASSERT(completion_done(&ip->i_flush));
- /*
- * initialise the VFS inode here to get failures
- * out of the way early.
- */
- if (!inode_init_always(mp->m_super, VFS_I(ip))) {
- kmem_zone_free(xfs_inode_zone, ip);
- return NULL;
- }
-
/* initialise the xfs inode */
ip->i_ino = ino;
ip->i_mount = mp;
@@ -113,6 +104,20 @@ xfs_inode_alloc(
#ifdef XFS_DIR2_TRACE
ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
#endif
+ /*
+ * Now initialise the VFS inode. We do this after the xfs_inode
+ * initialisation as internal failures will result in ->destroy_inode
+ * being called and that will pass down through the reclaim path and
+ * free the XFS inode. This path requires the XFS inode to already be
+ * initialised. Hence if this call fails, the xfs_inode has already
+ * been freed and we should not reference it at all in the error
+ * handling.
+ */
+ if (!inode_init_always(mp->m_super, VFS_I(ip)))
+ return NULL;
+
+ /* prevent anyone from using this yet */
+ VFS_I(ip)->i_state = I_NEW|I_LOCK;
return ip;
}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 08ce72316bfe..5aaa2d7ec155 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -338,38 +338,6 @@ xfs_iomap_eof_align_last_fsb(
}
STATIC int
-xfs_flush_space(
- xfs_inode_t *ip,
- int *fsynced,
- int *ioflags)
-{
- switch (*fsynced) {
- case 0:
- if (ip->i_delayed_blks) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_flush_inode(ip);
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- *fsynced = 1;
- } else {
- *ioflags |= BMAPI_SYNC;
- *fsynced = 2;
- }
- return 0;
- case 1:
- *fsynced = 2;
- *ioflags |= BMAPI_SYNC;
- return 0;
- case 2:
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_flush_device(ip);
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- *fsynced = 3;
- return 0;
- }
- return 1;
-}
-
-STATIC int
xfs_cmn_err_fsblock_zero(
xfs_inode_t *ip,
xfs_bmbt_irec_t *imap)
@@ -538,15 +506,9 @@ error_out:
}
/*
- * If the caller is doing a write at the end of the file,
- * then extend the allocation out to the file system's write
- * iosize. We clean up any extra space left over when the
- * file is closed in xfs_inactive().
- *
- * For sync writes, we are flushing delayed allocate space to
- * try to make additional space available for allocation near
- * the filesystem full boundary - preallocation hurts in that
- * situation, of course.
+ * If the caller is doing a write at the end of the file, then extend the
+ * allocation out to the file system's write iosize. We clean up any extra
+ * space left over when the file is closed in xfs_inactive().
*/
STATIC int
xfs_iomap_eof_want_preallocate(
@@ -565,7 +527,7 @@ xfs_iomap_eof_want_preallocate(
int n, error, imaps;
*prealloc = 0;
- if ((ioflag & BMAPI_SYNC) || (offset + count) <= ip->i_size)
+ if ((offset + count) <= ip->i_size)
return 0;
/*
@@ -611,7 +573,7 @@ xfs_iomap_write_delay(
xfs_extlen_t extsz;
int nimaps;
xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
- int prealloc, fsynced = 0;
+ int prealloc, flushed = 0;
int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
@@ -627,12 +589,12 @@ xfs_iomap_write_delay(
extsz = xfs_get_extsz_hint(ip);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
-retry:
error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
ioflag, imap, XFS_WRITE_IMAPS, &prealloc);
if (error)
return error;
+retry:
if (prealloc) {
aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
@@ -659,15 +621,22 @@ retry:
/*
* If bmapi returned us nothing, and if we didn't get back EDQUOT,
- * then we must have run out of space - flush delalloc, and retry..
+ * then we must have run out of space - flush all other inodes with
+ * delalloc blocks and retry without EOF preallocation.
*/
if (nimaps == 0) {
xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE,
ip, offset, count);
- if (xfs_flush_space(ip, &fsynced, &ioflag))
+ if (flushed)
return XFS_ERROR(ENOSPC);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_flush_inodes(ip);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+ flushed = 1;
error = 0;
+ prealloc = 0;
goto retry;
}
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index a1cc1322fc0f..fdcf7b82747f 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -40,8 +40,7 @@ typedef enum {
BMAPI_IGNSTATE = (1 << 4), /* ignore unwritten state on read */
BMAPI_DIRECT = (1 << 5), /* direct instead of buffered write */
BMAPI_MMAP = (1 << 6), /* allocate for mmap write */
- BMAPI_SYNC = (1 << 7), /* sync write to flush delalloc space */
- BMAPI_TRYLOCK = (1 << 8), /* non-blocking request */
+ BMAPI_TRYLOCK = (1 << 7), /* non-blocking request */
} bmapi_flags_t;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index f76c6d7cea21..3750f04ede0b 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -562,9 +562,8 @@ xfs_log_mount(
}
mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
- if (!mp->m_log) {
- cmn_err(CE_WARN, "XFS: Log allocation failed: No memory!");
- error = ENOMEM;
+ if (IS_ERR(mp->m_log)) {
+ error = -PTR_ERR(mp->m_log);
goto out;
}
@@ -1180,10 +1179,13 @@ xlog_alloc_log(xfs_mount_t *mp,
xfs_buf_t *bp;
int i;
int iclogsize;
+ int error = ENOMEM;
log = kmem_zalloc(sizeof(xlog_t), KM_MAYFAIL);
- if (!log)
- return NULL;
+ if (!log) {
+ xlog_warn("XFS: Log allocation failed: No memory!");
+ goto out;
+ }
log->l_mp = mp;
log->l_targ = log_target;
@@ -1201,19 +1203,35 @@ xlog_alloc_log(xfs_mount_t *mp,
log->l_grant_reserve_cycle = 1;
log->l_grant_write_cycle = 1;
+ error = EFSCORRUPTED;
if (xfs_sb_version_hassector(&mp->m_sb)) {
log->l_sectbb_log = mp->m_sb.sb_logsectlog - BBSHIFT;
- ASSERT(log->l_sectbb_log <= mp->m_sectbb_log);
+ if (log->l_sectbb_log < 0 ||
+ log->l_sectbb_log > mp->m_sectbb_log) {
+ xlog_warn("XFS: Log sector size (0x%x) out of range.",
+ log->l_sectbb_log);
+ goto out_free_log;
+ }
+
/* for larger sector sizes, must have v2 or external log */
- ASSERT(log->l_sectbb_log == 0 ||
- log->l_logBBstart == 0 ||
- xfs_sb_version_haslogv2(&mp->m_sb));
- ASSERT(mp->m_sb.sb_logsectlog >= BBSHIFT);
+ if (log->l_sectbb_log != 0 &&
+ (log->l_logBBstart != 0 &&
+ !xfs_sb_version_haslogv2(&mp->m_sb))) {
+ xlog_warn("XFS: log sector size (0x%x) invalid "
+ "for configuration.", log->l_sectbb_log);
+ goto out_free_log;
+ }
+ if (mp->m_sb.sb_logsectlog < BBSHIFT) {
+ xlog_warn("XFS: Log sector log (0x%x) too small.",
+ mp->m_sb.sb_logsectlog);
+ goto out_free_log;
+ }
}
log->l_sectbb_mask = (1 << log->l_sectbb_log) - 1;
xlog_get_iclog_buffer_size(mp, log);
+ error = ENOMEM;
bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp);
if (!bp)
goto out_free_log;
@@ -1313,7 +1331,8 @@ out_free_iclog:
xfs_buf_free(log->l_xbuf);
out_free_log:
kmem_free(log);
- return NULL;
+out:
+ return ERR_PTR(-error);
} /* xlog_alloc_log */
@@ -2541,18 +2560,19 @@ redo:
xlog_ins_ticketq(&log->l_reserve_headq, tic);
xlog_trace_loggrant(log, tic,
"xlog_grant_log_space: sleep 2");
+ spin_unlock(&log->l_grant_lock);
+ xlog_grant_push_ail(log->l_mp, need_bytes);
+ spin_lock(&log->l_grant_lock);
+
XFS_STATS_INC(xs_sleep_logspace);
sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
- if (XLOG_FORCED_SHUTDOWN(log)) {
- spin_lock(&log->l_grant_lock);
+ spin_lock(&log->l_grant_lock);
+ if (XLOG_FORCED_SHUTDOWN(log))
goto error_return;
- }
xlog_trace_loggrant(log, tic,
"xlog_grant_log_space: wake 2");
- xlog_grant_push_ail(log->l_mp, need_bytes);
- spin_lock(&log->l_grant_lock);
goto redo;
} else if (tic->t_flags & XLOG_TIC_IN_Q)
xlog_del_ticketq(&log->l_reserve_headq, tic);
@@ -2631,7 +2651,7 @@ xlog_regrant_write_log_space(xlog_t *log,
* for more free space, otherwise try to get some space for
* this transaction.
*/
-
+ need_bytes = tic->t_unit_res;
if ((ntic = log->l_write_headq)) {
free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
log->l_grant_write_bytes);
@@ -2651,26 +2671,25 @@ xlog_regrant_write_log_space(xlog_t *log,
xlog_trace_loggrant(log, tic,
"xlog_regrant_write_log_space: sleep 1");
+ spin_unlock(&log->l_grant_lock);
+ xlog_grant_push_ail(log->l_mp, need_bytes);
+ spin_lock(&log->l_grant_lock);
+
XFS_STATS_INC(xs_sleep_logspace);
sv_wait(&tic->t_wait, PINOD|PLTWAIT,
&log->l_grant_lock, s);
/* If we're shutting down, this tic is already
* off the queue */
- if (XLOG_FORCED_SHUTDOWN(log)) {
- spin_lock(&log->l_grant_lock);
+ spin_lock(&log->l_grant_lock);
+ if (XLOG_FORCED_SHUTDOWN(log))
goto error_return;
- }
xlog_trace_loggrant(log, tic,
"xlog_regrant_write_log_space: wake 1");
- xlog_grant_push_ail(log->l_mp, tic->t_unit_res);
- spin_lock(&log->l_grant_lock);
}
}
- need_bytes = tic->t_unit_res;
-
redo:
if (XLOG_FORCED_SHUTDOWN(log))
goto error_return;
@@ -2680,19 +2699,20 @@ redo:
if (free_bytes < need_bytes) {
if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
xlog_ins_ticketq(&log->l_write_headq, tic);
+ spin_unlock(&log->l_grant_lock);
+ xlog_grant_push_ail(log->l_mp, need_bytes);
+ spin_lock(&log->l_grant_lock);
+
XFS_STATS_INC(xs_sleep_logspace);
sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
/* If we're shutting down, this tic is already off the queue */
- if (XLOG_FORCED_SHUTDOWN(log)) {
- spin_lock(&log->l_grant_lock);
+ spin_lock(&log->l_grant_lock);
+ if (XLOG_FORCED_SHUTDOWN(log))
goto error_return;
- }
xlog_trace_loggrant(log, tic,
"xlog_regrant_write_log_space: wake 2");
- xlog_grant_push_ail(log->l_mp, need_bytes);
- spin_lock(&log->l_grant_lock);
goto redo;
} else if (tic->t_flags & XLOG_TIC_IN_Q)
xlog_del_ticketq(&log->l_write_headq, tic);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 7af44adffc8f..d6a64392f983 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -313,7 +313,7 @@ typedef struct xfs_mount {
#endif
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
struct task_struct *m_sync_task; /* generalised sync thread */
- bhv_vfs_sync_work_t m_sync_work; /* work item for VFS_SYNC */
+ xfs_sync_work_t m_sync_work; /* work item for VFS_SYNC */
struct list_head m_sync_list; /* sync thread work item list */
spinlock_t m_sync_lock; /* work item list lock */
int m_sync_seq; /* sync thread generation no. */
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 7394c7af5de5..19cf90a9c762 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -1457,6 +1457,13 @@ xfs_create(
error = xfs_trans_reserve(tp, resblks, log_res, 0,
XFS_TRANS_PERM_LOG_RES, log_count);
if (error == ENOSPC) {
+ /* flush outstanding delalloc blocks and retry */
+ xfs_flush_inodes(dp);
+ error = xfs_trans_reserve(tp, resblks, XFS_CREATE_LOG_RES(mp), 0,
+ XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT);
+ }
+ if (error == ENOSPC) {
+ /* No space at all so try a "no-allocation" reservation */
resblks = 0;
error = xfs_trans_reserve(tp, 0, log_res, 0,
XFS_TRANS_PERM_LOG_RES, log_count);
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
index 35752dadd6df..c840719a8c59 100644
--- a/include/asm-generic/siginfo.h
+++ b/include/asm-generic/siginfo.h
@@ -201,7 +201,7 @@ typedef struct siginfo {
#define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */
#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */
#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint/watchpoint */
-#define NSIGTRAP 2
+#define NSIGTRAP 4
/*
* SIGCHLD si_codes
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 2df74eb09563..9477af01a639 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -472,6 +472,7 @@
{0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x358e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define gamma_PCI_IDS \
@@ -533,4 +534,5 @@
{0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+ {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0, 0, 0}
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 096476f1fb35..493dedb7a67b 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -2,12 +2,19 @@
#define __LINUX_DEBUG_LOCKING_H
#include <linux/kernel.h>
+#include <asm/atomic.h>
struct task_struct;
extern int debug_locks;
extern int debug_locks_silent;
+
+static inline int __debug_locks_off(void)
+{
+ return xchg(&debug_locks, 0);
+}
+
/*
* Generic 'turn off all lock debugging' function:
*/
diff --git a/include/linux/fb.h b/include/linux/fb.h
index f563c5013932..330c4b1bfcaa 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -173,8 +173,12 @@ struct fb_fix_screeninfo {
/* Interpretation of offset for color fields: All offsets are from the right,
* inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you
* can use the offset as right argument to <<). A pixel afterwards is a bit
- * stream and is written to video memory as that unmodified. This implies
- * big-endian byte order if bits_per_pixel is greater than 8.
+ * stream and is written to video memory as that unmodified.
+ *
+ * For pseudocolor: offset and length should be the same for all color
+ * components. Offset specifies the position of the least significant bit
+ * of the pallette index in a pixel value. Length indicates the number
+ * of available palette entries (i.e. # of entries = 1 << length).
*/
struct fb_bitfield {
__u32 offset; /* beginning of bitfield */
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h
index 671decbd2aeb..934e22d65801 100644
--- a/include/linux/fiemap.h
+++ b/include/linux/fiemap.h
@@ -11,6 +11,8 @@
#ifndef _LINUX_FIEMAP_H
#define _LINUX_FIEMAP_H
+#include <linux/types.h>
+
struct fiemap_extent {
__u64 fe_logical; /* logical offset in bytes for the start of
* the extent from the beginning of the file */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index dcfb93337e9a..d87247d2641f 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -15,19 +15,6 @@
extern struct files_struct init_files;
extern struct fs_struct init_fs;
-#define INIT_KIOCTX(name, which_mm) \
-{ \
- .users = ATOMIC_INIT(1), \
- .dead = 0, \
- .mm = &which_mm, \
- .user_id = 0, \
- .next = NULL, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \
- .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \
- .reqs_active = 0U, \
- .max_reqs = ~0U, \
-}
-
#define INIT_MM(name) \
{ \
.mm_rb = RB_ROOT, \
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index ee98cd570885..06ba90c211a5 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2514,6 +2514,8 @@
#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
+#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c
+#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e
#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580
#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582
#define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590
diff --git a/include/linux/rotary_encoder.h b/include/linux/rotary_encoder.h
new file mode 100644
index 000000000000..12d63a30c347
--- /dev/null
+++ b/include/linux/rotary_encoder.h
@@ -0,0 +1,13 @@
+#ifndef __ROTARY_ENCODER_H__
+#define __ROTARY_ENCODER_H__
+
+struct rotary_encoder_platform_data {
+ unsigned int steps;
+ unsigned int axis;
+ unsigned int gpio_a;
+ unsigned int gpio_b;
+ unsigned int inverted_a;
+ unsigned int inverted_b;
+};
+
+#endif /* __ROTARY_ENCODER_H__ */
diff --git a/include/linux/sht15.h b/include/linux/sht15.h
new file mode 100644
index 000000000000..046bce05ecab
--- /dev/null
+++ b/include/linux/sht15.h
@@ -0,0 +1,24 @@
+/*
+ * sht15.h - support for the SHT15 Temperature and Humidity Sensor
+ *
+ * Copyright (c) 2009 Jonathan Cameron
+ *
+ * Copyright (c) 2007 Wouter Horre
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/**
+ * struct sht15_platform_data - sht15 connectivity info
+ * @gpio_data: no. of gpio to which bidirectional data line is connected
+ * @gpio_sck: no. of gpio to which the data clock is connected.
+ * @supply_mv: supply voltage in mv. Overridden by regulator if available.
+ **/
+struct sht15_platform_data {
+ int gpio_data;
+ int gpio_sck;
+ int supply_mv;
+};
+
diff --git a/include/linux/spi/ad7879.h b/include/linux/spi/ad7879.h
new file mode 100644
index 000000000000..4231104c9afa
--- /dev/null
+++ b/include/linux/spi/ad7879.h
@@ -0,0 +1,35 @@
+/* linux/spi/ad7879.h */
+
+/* Touchscreen characteristics vary between boards and models. The
+ * platform_data for the device's "struct device" holds this information.
+ *
+ * It's OK if the min/max values are zero.
+ */
+struct ad7879_platform_data {
+ u16 model; /* 7879 */
+ u16 x_plate_ohms;
+ u16 x_min, x_max;
+ u16 y_min, y_max;
+ u16 pressure_min, pressure_max;
+
+ /* [0..255] 0=OFF Starts at 1=550us and goes
+ * all the way to 9.440ms in steps of 35us.
+ */
+ u8 pen_down_acc_interval;
+ /* [0..15] Starts at 0=128us and goes all the
+ * way to 4.096ms in steps of 128us.
+ */
+ u8 first_conversion_delay;
+ /* [0..3] 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */
+ u8 acquisition_time;
+ /* [0..3] Average X middle samples 0 = 2, 1 = 4, 2 = 8, 3 = 16 */
+ u8 averaging;
+ /* [0..3] Perform X measurements 0 = OFF,
+ * 1 = 4, 2 = 8, 3 = 16 (median > averaging)
+ */
+ u8 median;
+ /* 1 = AUX/VBAT/GPIO set to GPIO Output */
+ u8 gpio_output;
+ /* Initial GPIO pin state (valid if gpio_output = 1) */
+ u8 gpio_default;
+};
diff --git a/include/linux/stringify.h b/include/linux/stringify.h
index 0b4388356c87..841cec8ed525 100644
--- a/include/linux/stringify.h
+++ b/include/linux/stringify.h
@@ -6,7 +6,7 @@
* converts to "bar".
*/
-#define __stringify_1(x) #x
-#define __stringify(x) __stringify_1(x)
+#define __stringify_1(x...) #x
+#define __stringify(x...) __stringify_1(x)
#endif /* !__LINUX_STRINGIFY_H */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index b95842542590..625e9e4639c6 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -29,7 +29,7 @@
/**
* usb_serial_port: structure for the specific ports of a device.
* @serial: pointer back to the struct usb_serial owner of this port.
- * @tty: pointer to the corresponding tty for this port.
+ * @port: pointer to the corresponding tty_port for this port.
* @lock: spinlock to grab when updating portions of this structure.
* @mutex: mutex used to synchronize serial_open() and serial_close()
* access for this port.
@@ -44,19 +44,22 @@
* @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe
* for this port.
* @bulk_in_buffer: pointer to the bulk in buffer for this port.
+ * @bulk_in_size: the size of the bulk_in_buffer, in bytes.
* @read_urb: pointer to the bulk in struct urb for this port.
* @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this
* port.
* @bulk_out_buffer: pointer to the bulk out buffer for this port.
* @bulk_out_size: the size of the bulk_out_buffer, in bytes.
* @write_urb: pointer to the bulk out struct urb for this port.
+ * @write_urb_busy: port`s writing status
* @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this
* port.
* @write_wait: a wait_queue_head_t used by the port.
* @work: work queue entry for the line discipline waking up.
- * @open_count: number of times this port has been opened.
* @throttled: nonzero if the read urb is inactive to throttle the device
* @throttle_req: nonzero if the tty wants to throttle us
+ * @console: attached usb serial console
+ * @dev: pointer to the serial device
*
* This structure is used by the usb-serial core and drivers for the specific
* ports of a device.
diff --git a/include/scsi/scsi_scan.h b/include/scsi/scsi_scan.h
new file mode 100644
index 000000000000..78898889243d
--- /dev/null
+++ b/include/scsi/scsi_scan.h
@@ -0,0 +1,11 @@
+#ifndef _SCSI_SCSI_SCAN_H
+#define _SCSI_SCSI_SCAN_H
+
+#ifdef CONFIG_SCSI
+/* drivers/scsi/scsi_scan.c */
+extern int scsi_complete_async_scans(void);
+#else
+static inline int scsi_complete_async_scans(void) { return 0; }
+#endif
+
+#endif /* _SCSI_SCSI_SCAN_H */
diff --git a/include/video/cyblafb.h b/include/video/cyblafb.h
deleted file mode 100644
index d3c1d4e2c8e3..000000000000
--- a/include/video/cyblafb.h
+++ /dev/null
@@ -1,175 +0,0 @@
-
-#ifndef CYBLAFB_DEBUG
-#define CYBLAFB_DEBUG 0
-#endif
-
-#if CYBLAFB_DEBUG
-#define debug(f,a...) printk("%s:" f, __func__ , ## a);
-#else
-#define debug(f,a...)
-#endif
-
-#define output(f, a...) printk("cyblafb: " f, ## a)
-
-#define Kb (1024)
-#define Mb (Kb*Kb)
-
-/* PCI IDS of supported cards temporarily here */
-
-#define CYBERBLADEi1 0x8500
-
-/* these defines are for 'lcd' variable */
-#define LCD_STRETCH 0
-#define LCD_CENTER 1
-#define LCD_BIOS 2
-
-/* display types */
-#define DISPLAY_CRT 0
-#define DISPLAY_FP 1
-
-#define ROP_S 0xCC
-
-#define point(x,y) ((y)<<16|(x))
-
-//
-// Attribute Regs, ARxx, 3c0/3c1
-//
-#define AR00 0x00
-#define AR01 0x01
-#define AR02 0x02
-#define AR03 0x03
-#define AR04 0x04
-#define AR05 0x05
-#define AR06 0x06
-#define AR07 0x07
-#define AR08 0x08
-#define AR09 0x09
-#define AR0A 0x0A
-#define AR0B 0x0B
-#define AR0C 0x0C
-#define AR0D 0x0D
-#define AR0E 0x0E
-#define AR0F 0x0F
-#define AR10 0x10
-#define AR12 0x12
-#define AR13 0x13
-
-//
-// Sequencer Regs, SRxx, 3c4/3c5
-//
-#define SR00 0x00
-#define SR01 0x01
-#define SR02 0x02
-#define SR03 0x03
-#define SR04 0x04
-#define SR0D 0x0D
-#define SR0E 0x0E
-#define SR11 0x11
-#define SR18 0x18
-#define SR19 0x19
-
-//
-//
-//
-#define CR00 0x00
-#define CR01 0x01
-#define CR02 0x02
-#define CR03 0x03
-#define CR04 0x04
-#define CR05 0x05
-#define CR06 0x06
-#define CR07 0x07
-#define CR08 0x08
-#define CR09 0x09
-#define CR0A 0x0A
-#define CR0B 0x0B
-#define CR0C 0x0C
-#define CR0D 0x0D
-#define CR0E 0x0E
-#define CR0F 0x0F
-#define CR10 0x10
-#define CR11 0x11
-#define CR12 0x12
-#define CR13 0x13
-#define CR14 0x14
-#define CR15 0x15
-#define CR16 0x16
-#define CR17 0x17
-#define CR18 0x18
-#define CR19 0x19
-#define CR1A 0x1A
-#define CR1B 0x1B
-#define CR1C 0x1C
-#define CR1D 0x1D
-#define CR1E 0x1E
-#define CR1F 0x1F
-#define CR20 0x20
-#define CR21 0x21
-#define CR27 0x27
-#define CR29 0x29
-#define CR2A 0x2A
-#define CR2B 0x2B
-#define CR2D 0x2D
-#define CR2F 0x2F
-#define CR36 0x36
-#define CR38 0x38
-#define CR39 0x39
-#define CR3A 0x3A
-#define CR55 0x55
-#define CR56 0x56
-#define CR57 0x57
-#define CR58 0x58
-
-//
-//
-//
-
-#define GR00 0x01
-#define GR01 0x01
-#define GR02 0x02
-#define GR03 0x03
-#define GR04 0x04
-#define GR05 0x05
-#define GR06 0x06
-#define GR07 0x07
-#define GR08 0x08
-#define GR0F 0x0F
-#define GR20 0x20
-#define GR23 0x23
-#define GR2F 0x2F
-#define GR30 0x30
-#define GR31 0x31
-#define GR33 0x33
-#define GR52 0x52
-#define GR53 0x53
-#define GR5D 0x5d
-
-
-//
-// Graphics Engine
-//
-#define GEBase 0x2100 // could be mapped elsewhere if we like it
-#define GE00 (GEBase+0x00) // source 1, p 111
-#define GE04 (GEBase+0x04) // source 2, p 111
-#define GE08 (GEBase+0x08) // destination 1, p 111
-#define GE0C (GEBase+0x0C) // destination 2, p 112
-#define GE10 (GEBase+0x10) // right view base & enable, p 112
-#define GE13 (GEBase+0x13) // left view base & enable, p 112
-#define GE18 (GEBase+0x18) // block write start address, p 112
-#define GE1C (GEBase+0x1C) // block write end address, p 112
-#define GE20 (GEBase+0x20) // engine status, p 113
-#define GE24 (GEBase+0x24) // reset all GE pointers
-#define GE44 (GEBase+0x44) // command register, p 126
-#define GE48 (GEBase+0x48) // raster operation, p 127
-#define GE60 (GEBase+0x60) // foreground color, p 128
-#define GE64 (GEBase+0x64) // background color, p 128
-#define GE6C (GEBase+0x6C) // Pattern and Style, p 129, ok
-#define GE9C (GEBase+0x9C) // pixel engine data port, p 125
-#define GEB8 (GEBase+0xB8) // Destination Stride / Buffer Base 0, p 133
-#define GEBC (GEBase+0xBC) // Destination Stride / Buffer Base 1, p 133
-#define GEC0 (GEBase+0xC0) // Destination Stride / Buffer Base 2, p 133
-#define GEC4 (GEBase+0xC4) // Destination Stride / Buffer Base 3, p 133
-#define GEC8 (GEBase+0xC8) // Source Stride / Buffer Base 0, p 133
-#define GECC (GEBase+0xCC) // Source Stride / Buffer Base 1, p 133
-#define GED0 (GEBase+0xD0) // Source Stride / Buffer Base 2, p 133
-#define GED4 (GEBase+0xD4) // Source Stride / Buffer Base 3, p 133
diff --git a/init/Kconfig b/init/Kconfig
index f2f9b5362b48..7be4d3836745 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -808,6 +808,14 @@ config KALLSYMS_EXTRA_PASS
you wait for kallsyms to be fixed.
+config STRIP_ASM_SYMS
+ bool "Strip assembler-generated symbols during link"
+ default n
+ help
+ Strip internal assembler-generated symbols during a link (symbols
+ that look like '.Lxxx') so they don't pollute the output of
+ get_wchan() and suchlike.
+
config HOTPLUG
bool "Support for hot-pluggable devices" if EMBEDDED
default y
diff --git a/init/initramfs.c b/init/initramfs.c
index 80cd713f6cc5..9ee7b7810417 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -310,7 +310,8 @@ static int __init do_name(void)
if (wfd >= 0) {
sys_fchown(wfd, uid, gid);
sys_fchmod(wfd, mode);
- sys_ftruncate(wfd, body_len);
+ if (body_len)
+ sys_ftruncate(wfd, body_len);
vcollected = kstrdup(collected, GFP_KERNEL);
state = CopyFile;
}
@@ -515,6 +516,7 @@ skip:
initrd_end = 0;
}
+#ifdef CONFIG_BLK_DEV_RAM
#define BUF_SIZE 1024
static void __init clean_rootfs(void)
{
@@ -561,6 +563,7 @@ static void __init clean_rootfs(void)
sys_close(fd);
kfree(buf);
}
+#endif
static int __init populate_rootfs(void)
{
diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
index 89f60ec8ee54..24ae46dfe45d 100644
--- a/ipc/mq_sysctl.c
+++ b/ipc/mq_sysctl.c
@@ -22,6 +22,7 @@
#define MIN_MSGSIZEMAX 128 /* min value for msgsize_max */
#define MAX_MSGSIZEMAX (8192*128) /* max value for msgsize_max */
+#ifdef CONFIG_PROC_SYSCTL
static void *get_mq(ctl_table *table)
{
char *which = table->data;
@@ -30,7 +31,6 @@ static void *get_mq(ctl_table *table)
return which;
}
-#ifdef CONFIG_PROC_SYSCTL
static int proc_mq_dointvec(ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
diff --git a/kernel/panic.c b/kernel/panic.c
index 3fd8c5bf8b39..934fb377f4b3 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -213,8 +213,16 @@ unsigned long get_taint(void)
void add_taint(unsigned flag)
{
- /* can't trust the integrity of the kernel anymore: */
- debug_locks = 0;
+ /*
+ * Can't trust the integrity of the kernel anymore.
+ * We don't call directly debug_locks_off() because the issue
+ * is not necessarily serious enough to set oops_in_progress to 1
+ * Also we want to keep up lockdep for staging development and
+ * post-warning case.
+ */
+ if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
+ printk(KERN_WARNING "Disabling lockdep due to kernel taint\n");
+
set_bit(flag, &tainted_mask);
}
EXPORT_SYMBOL(add_taint);
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 5f21ab2bbcdf..0854770b63b9 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -22,6 +22,7 @@
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/freezer.h>
+#include <scsi/scsi_scan.h>
#include <asm/suspend.h>
#include "power.h"
@@ -645,6 +646,13 @@ static int software_resume(void)
return 0;
/*
+ * We can't depend on SCSI devices being available after loading one of
+ * their modules if scsi_complete_async_scans() is not called and the
+ * resume device usually is a SCSI one.
+ */
+ scsi_complete_async_scans();
+
+ /*
* name_to_dev_t() below takes a sysfs buffer mutex when sysfs
* is configured into the kernel. Since the regular hibernate
* trigger path is via sysfs which takes a buffer mutex before
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 6c85359364f2..ed97375daae9 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -24,6 +24,7 @@
#include <linux/cpu.h>
#include <linux/freezer.h>
#include <linux/smp_lock.h>
+#include <scsi/scsi_scan.h>
#include <asm/uaccess.h>
@@ -92,6 +93,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
filp->private_data = data;
memset(&data->handle, 0, sizeof(struct snapshot_handle));
if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
+ /* Hibernating. The image device should be accessible. */
data->swap = swsusp_resume_device ?
swap_type_of(swsusp_resume_device, 0, NULL) : -1;
data->mode = O_RDONLY;
@@ -99,6 +101,13 @@ static int snapshot_open(struct inode *inode, struct file *filp)
if (error)
pm_notifier_call_chain(PM_POST_HIBERNATION);
} else {
+ /*
+ * Resuming. We may need to wait for the image device to
+ * appear.
+ */
+ wait_for_device_probe();
+ scsi_complete_async_scans();
+
data->swap = -1;
data->mode = O_WRONLY;
error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 64191fa09b7e..dfcd83ceee3b 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -604,10 +604,11 @@ repeat:
ret = security_ptrace_traceme(current->parent);
/*
- * Set the ptrace bit in the process ptrace flags.
- * Then link us on our parent's ptraced list.
+ * Check PF_EXITING to ensure ->real_parent has not passed
+ * exit_ptrace(). Otherwise we don't report the error but
+ * pretend ->real_parent untraces us right after return.
*/
- if (!ret) {
+ if (!ret && !(current->real_parent->flags & PF_EXITING)) {
current->ptrace |= PT_PTRACED;
__ptrace_link(current, current->real_parent);
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 51dbb55604e8..e7998cf31498 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -360,6 +360,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
void __user *, arg)
{
char buffer[256];
+ int ret = 0;
/* We only trust the superuser with rebooting the system. */
if (!capable(CAP_SYS_BOOT))
@@ -397,7 +398,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
kernel_halt();
unlock_kernel();
do_exit(0);
- break;
+ panic("cannot halt");
case LINUX_REBOOT_CMD_POWER_OFF:
kernel_power_off();
@@ -417,29 +418,22 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
#ifdef CONFIG_KEXEC
case LINUX_REBOOT_CMD_KEXEC:
- {
- int ret;
- ret = kernel_kexec();
- unlock_kernel();
- return ret;
- }
+ ret = kernel_kexec();
+ break;
#endif
#ifdef CONFIG_HIBERNATION
case LINUX_REBOOT_CMD_SW_SUSPEND:
- {
- int ret = hibernate();
- unlock_kernel();
- return ret;
- }
+ ret = hibernate();
+ break;
#endif
default:
- unlock_kernel();
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
unlock_kernel();
- return 0;
+ return ret;
}
static void deferred_cad(struct work_struct *dummy)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4286b62b34a0..e3d2c7dd59b9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -902,16 +902,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
-#ifdef CONFIG_UNEVICTABLE_LRU
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "scan_unevictable_pages",
- .data = &scan_unevictable_pages,
- .maxlen = sizeof(scan_unevictable_pages),
- .mode = 0644,
- .proc_handler = &scan_unevictable_handler,
- },
-#endif
#ifdef CONFIG_SLOW_WORK
{
.ctl_name = CTL_UNNUMBERED,
@@ -1302,6 +1292,16 @@ static struct ctl_table vm_table[] = {
.extra2 = &one,
},
#endif
+#ifdef CONFIG_UNEVICTABLE_LRU
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "scan_unevictable_pages",
+ .data = &scan_unevictable_pages,
+ .maxlen = sizeof(scan_unevictable_pages),
+ .mode = 0644,
+ .proc_handler = &scan_unevictable_handler,
+ },
+#endif
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 2246141bda4d..417d1985e299 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -312,7 +312,7 @@ config KMEMTRACE
and profile kernel code.
This requires an userspace application to use. See
- Documentation/vm/kmemtrace.txt for more information.
+ Documentation/trace/kmemtrace.txt for more information.
Saying Y will make the kernel somewhat larger and slower. However,
if you disable kmemtrace at run-time or boot-time, the performance
@@ -403,7 +403,7 @@ config MMIOTRACE
implementation and works via page faults. Tracing is disabled by
default and can be enabled at run-time.
- See Documentation/tracers/mmiotrace.txt.
+ See Documentation/trace/mmiotrace.txt.
If you are not helping to develop drivers, say N.
config MMIOTRACE_TEST
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9d28476a9851..1ce5dc6372b8 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3277,19 +3277,13 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
info->tr = &global_trace;
info->cpu = cpu;
- info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
+ info->spare = NULL;
/* Force reading ring buffer for first read */
info->read = (unsigned int)-1;
- if (!info->spare)
- goto out;
filp->private_data = info;
- return 0;
-
- out:
- kfree(info);
- return -ENOMEM;
+ return nonseekable_open(inode, filp);
}
static ssize_t
@@ -3304,6 +3298,11 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
if (!count)
return 0;
+ if (!info->spare)
+ info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
+ if (!info->spare)
+ return -ENOMEM;
+
/* Do we have previous read data to read? */
if (info->read < PAGE_SIZE)
goto read;
@@ -3342,7 +3341,8 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
{
struct ftrace_buffer_info *info = file->private_data;
- ring_buffer_free_read_page(info->tr->buffer, info->spare);
+ if (info->spare)
+ ring_buffer_free_read_page(info->tr->buffer, info->spare);
kfree(info);
return 0;
@@ -3428,14 +3428,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
int size, i;
size_t ret;
- /*
- * We can't seek on a buffer input
- */
- if (unlikely(*ppos))
- return -ESPIPE;
+ if (*ppos & (PAGE_SIZE - 1)) {
+ WARN_ONCE(1, "Ftrace: previous read must page-align\n");
+ return -EINVAL;
+ }
+ if (len & (PAGE_SIZE - 1)) {
+ WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
+ if (len < PAGE_SIZE)
+ return -EINVAL;
+ len &= PAGE_MASK;
+ }
- for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) {
+ for (i = 0; i < PIPE_BUFFERS && len; i++, len -= PAGE_SIZE) {
struct page *page;
int r;
@@ -3474,6 +3479,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
spd.partial[i].offset = 0;
spd.partial[i].private = (unsigned long)ref;
spd.nr_pages++;
+ *ppos += PAGE_SIZE;
}
spd.nr_pages = i;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 64ec4d278ffb..576f4fa2af0d 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -503,6 +503,7 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
+ buf[cnt] = '\0';
pred = kzalloc(sizeof(*pred), GFP_KERNEL);
if (!pred)
@@ -520,9 +521,10 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
return cnt;
}
- if (filter_add_pred(call, pred)) {
+ err = filter_add_pred(call, pred);
+ if (err < 0) {
filter_free_pred(pred);
- return -EINVAL;
+ return err;
}
*ppos += cnt;
@@ -569,6 +571,7 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
+ buf[cnt] = '\0';
pred = kzalloc(sizeof(*pred), GFP_KERNEL);
if (!pred)
@@ -586,10 +589,11 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
return cnt;
}
- if (filter_add_subsystem_pred(system, pred)) {
+ err = filter_add_subsystem_pred(system, pred);
+ if (err < 0) {
filter_free_subsystem_preds(system);
filter_free_pred(pred);
- return -EINVAL;
+ return err;
}
*ppos += cnt;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 026be412f356..e03cbf1e38f3 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -215,7 +215,7 @@ static int __filter_add_pred(struct ftrace_event_call *call,
}
}
- return -ENOMEM;
+ return -ENOSPC;
}
static int is_string_field(const char *type)
@@ -319,7 +319,7 @@ int filter_add_subsystem_pred(struct event_subsystem *system,
}
if (i == MAX_FILTER_PRED)
- return -EINVAL;
+ return -ENOSPC;
events_for_each(call) {
int err;
@@ -410,16 +410,22 @@ int filter_parse(char **pbuf, struct filter_pred *pred)
}
}
+ if (!val_str) {
+ pred->field_name = NULL;
+ return -EINVAL;
+ }
+
pred->field_name = kstrdup(pred->field_name, GFP_KERNEL);
if (!pred->field_name)
return -ENOMEM;
- pred->val = simple_strtoull(val_str, &tmp, 10);
+ pred->val = simple_strtoull(val_str, &tmp, 0);
if (tmp == val_str) {
pred->str_val = kstrdup(val_str, GFP_KERNEL);
if (!pred->str_val)
return -ENOMEM;
- }
+ } else if (*tmp != '\0')
+ return -EINVAL;
return 0;
}
diff --git a/kernel/trace/trace_events_stage_2.h b/kernel/trace/trace_events_stage_2.h
index 30743f7d4110..d363c6672c6c 100644
--- a/kernel/trace/trace_events_stage_2.h
+++ b/kernel/trace/trace_events_stage_2.h
@@ -105,10 +105,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
return 0;
#undef __entry
-#define __entry "REC"
+#define __entry REC
#undef TP_printk
-#define TP_printk(fmt, args...) "%s, %s\n", #fmt, #args
+#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
#undef TP_fast_assign
#define TP_fast_assign(args...) args
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 0218b4693dd8..bc3b11731b9c 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -36,7 +36,7 @@ int debug_locks_silent;
*/
int debug_locks_off(void)
{
- if (xchg(&debug_locks, 0)) {
+ if (__debug_locks_off()) {
if (!debug_locks_silent) {
oops_in_progress = 1;
console_verbose();
diff --git a/mm/Kconfig b/mm/Kconfig
index b53427ad30a3..57971d2ab848 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -213,6 +213,8 @@ config UNEVICTABLE_LRU
will use one page flag and increase the code size a little,
say Y unless you know what you are doing.
+ See Documentation/vm/unevictable-lru.txt for more information.
+
config HAVE_MLOCK
bool
default y if MMU=y
diff --git a/mm/filemap.c b/mm/filemap.c
index 2e2d38ebda4b..8bd498040f32 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -567,8 +567,8 @@ EXPORT_SYMBOL(wait_on_page_bit);
/**
* add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
- * @page - Page defining the wait queue of interest
- * @waiter - Waiter to add to the queue
+ * @page: Page defining the wait queue of interest
+ * @waiter: Waiter to add to the queue
*
* Add an arbitrary @waiter to the wait queue for the nominated @page.
*/
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2fc6d6c48238..e44fb0fbb80e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -932,7 +932,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
if (unlikely(!mem))
return 0;
- VM_BUG_ON(mem_cgroup_is_obsolete(mem));
+ VM_BUG_ON(!mem || mem_cgroup_is_obsolete(mem));
while (1) {
int ret;
diff --git a/mm/shmem.c b/mm/shmem.c
index d94d2e9146bc..f9cb20ebb990 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/vfs.h>
#include <linux/mount.h>
+#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -43,7 +44,6 @@ static struct vfsmount *shm_mnt;
#include <linux/exportfs.h>
#include <linux/generic_acl.h>
#include <linux/mman.h>
-#include <linux/pagemap.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
@@ -65,13 +65,28 @@ static struct vfsmount *shm_mnt;
#include <asm/div64.h>
#include <asm/pgtable.h>
+/*
+ * The maximum size of a shmem/tmpfs file is limited by the maximum size of
+ * its triple-indirect swap vector - see illustration at shmem_swp_entry().
+ *
+ * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
+ * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum
+ * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
+ * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
+ *
+ * We use / and * instead of shifts in the definitions below, so that the swap
+ * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
+ */
#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
-#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
-#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
+#define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
-#define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
-#define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
+#define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
+#define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
+#define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
+#define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
+
+#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
/* info->flags needs VM_flags to handle pagein/truncate races efficiently */
@@ -2581,7 +2596,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
#define shmem_acct_size(flags, size) 0
#define shmem_unacct_size(flags, size) do {} while (0)
-#define SHMEM_MAX_BYTES LLONG_MAX
+#define SHMEM_MAX_BYTES MAX_LFS_FILESIZE
#endif /* CONFIG_SHMEM */
diff --git a/mm/util.c b/mm/util.c
index 2599e83eea17..55bef160b9f1 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -223,6 +223,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
}
#endif
+/**
+ * get_user_pages_fast() - pin user pages in memory
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @write: whether pages will be written to
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long.
+ *
+ * Attempt to pin user pages in memory without taking mm->mmap_sem.
+ * If not successful, it will fall back to taking the lock and
+ * calling get_user_pages().
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno.
+ */
int __attribute__((weak)) get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages)
{
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
index 612dc13ddd85..095cfc8b9dbf 100644
--- a/scripts/Makefile.headersinst
+++ b/scripts/Makefile.headersinst
@@ -14,6 +14,8 @@ _dst := $(if $(dst),$(dst),$(obj))
kbuild-file := $(srctree)/$(obj)/Kbuild
include $(kbuild-file)
+_dst := $(if $(destination-y),$(destination-y),$(_dst))
+
include scripts/Kbuild.include
install := $(INSTALL_HDR_PATH)/$(_dst)
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 3eea8f15131b..76af5f9623e3 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -97,7 +97,7 @@ print_mtime() {
}
list_parse() {
- echo "$1 \\"
+ [ ! -L "$1" ] && echo "$1 \\" || :
}
# for each file print a line in following format
diff --git a/scripts/headerdep.pl b/scripts/headerdep.pl
index 97399da89ef2..b7f6c560e24d 100755
--- a/scripts/headerdep.pl
+++ b/scripts/headerdep.pl
@@ -19,7 +19,7 @@ my $opt_graph;
version => \&version,
all => \$opt_all,
- I => \@opt_include,
+ "I=s" => \@opt_include,
graph => \$opt_graph,
);
diff --git a/scripts/kconfig/kxgettext.c b/scripts/kconfig/kxgettext.c
index 6eb72a7f2562..8d9ce22b0fc5 100644
--- a/scripts/kconfig/kxgettext.c
+++ b/scripts/kconfig/kxgettext.c
@@ -43,6 +43,10 @@ static char *escape(const char* text, char *bf, int len)
++text;
goto next;
}
+ else if (*text == '\\') {
+ *bfp++ = '\\';
+ len--;
+ }
*bfp++ = *text++;
next:
--len;
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 8cc70612984c..df6e6286a065 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1913,7 +1913,7 @@ static void read_dump(const char *fname, unsigned int kernel)
if (!mod) {
if (is_vmlinux(modname))
have_vmlinux = 1;
- mod = new_module(NOFAIL(strdup(modname)));
+ mod = new_module(modname);
mod->skip = 1;
}
s = sym_add_exported(symname, mod, export_no(export));
@@ -1997,7 +1997,7 @@ static void read_markers(const char *fname)
mod = find_module(modname);
if (!mod) {
- mod = new_module(NOFAIL(strdup(modname)));
+ mod = new_module(modname);
mod->skip = 1;
}
if (is_vmlinux(modname)) {
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index f1c4b35bc324..47e75b69a2e9 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -21,7 +21,7 @@ if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
# Is this git on svn?
if git config --get svn-remote.svn.url >/dev/null; then
- printf -- '-svn%s' "`git-svn find-rev $head`"
+ printf -- '-svn%s' "`git svn find-rev $head`"
fi
# Are there uncommitted changes?
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index a0affd9cfca8..d4d41b3efc7c 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
- * Version: 2.2.0-pre 2009/02/01
+ * Version: 2.2.0 2009/04/01
*
*/
@@ -1773,7 +1773,7 @@ void tomoyo_load_policy(const char *filename)
envp[2] = NULL;
call_usermodehelper(argv[0], argv, envp, 1);
- printk(KERN_INFO "TOMOYO: 2.2.0-pre 2009/02/01\n");
+ printk(KERN_INFO "TOMOYO: 2.2.0 2009/04/01\n");
printk(KERN_INFO "Mandatory Access Control activated.\n");
tomoyo_policy_loaded = true;
{ /* Check all profiles currently assigned to domains are defined. */
@@ -1800,7 +1800,7 @@ void tomoyo_load_policy(const char *filename)
static int tomoyo_read_version(struct tomoyo_io_buffer *head)
{
if (!head->read_eof) {
- tomoyo_io_printf(head, "2.2.0-pre");
+ tomoyo_io_printf(head, "2.2.0");
head->read_eof = true;
}
return 0;
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index e77e6a6de0f2..678f4ff16aa4 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -5,7 +5,7 @@
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
- * Version: 2.2.0-pre 2009/02/01
+ * Version: 2.2.0 2009/04/01
*
*/
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 2f2b449ffd2d..2d6748741a26 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
- * Version: 2.2.0-pre 2009/02/01
+ * Version: 2.2.0 2009/04/01
*
*/
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 65f50c1c5ee9..2316da8ec5bc 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
- * Version: 2.2.0-pre 2009/02/01
+ * Version: 2.2.0 2009/04/01
*
*/
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 3bbe01a7a4b5..bf8e2b451687 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
- * Version: 2.2.0-pre 2009/02/01
+ * Version: 2.2.0 2009/04/01
*
*/
diff --git a/security/tomoyo/realpath.h b/security/tomoyo/realpath.h
index 7ec9fc9cbc07..78217a37960b 100644
--- a/security/tomoyo/realpath.h
+++ b/security/tomoyo/realpath.h
@@ -5,7 +5,7 @@
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
- * Version: 2.2.0-pre 2009/02/01
+ * Version: 2.2.0 2009/04/01
*
*/
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 3eeeae12c4dc..5b481912752a 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
- * Version: 2.2.0-pre 2009/02/01
+ * Version: 2.2.0 2009/04/01
*
*/
diff --git a/security/tomoyo/tomoyo.h b/security/tomoyo/tomoyo.h
index a0c8f6e0bea4..41c6ebafb9c5 100644
--- a/security/tomoyo/tomoyo.h
+++ b/security/tomoyo/tomoyo.h
@@ -5,7 +5,7 @@
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
- * Version: 2.2.0-pre 2009/02/01
+ * Version: 2.2.0 2009/04/01
*
*/
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 30829ee920c3..7ba8db5d4c42 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2260,11 +2260,11 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
gcap &= ~0x01;
/* allow 64bit DMA address if supported by H/W */
- if ((gcap & 0x01) && !pci_set_dma_mask(pci, DMA_64BIT_MASK))
- pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK);
+ if ((gcap & 0x01) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
+ pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64));
else {
- pci_set_dma_mask(pci, DMA_32BIT_MASK);
- pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK);
+ pci_set_dma_mask(pci, DMA_BIT_MASK(32));
+ pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32));
}
/* read number of streams from GCAP register instead of using