summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig15
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acconfig.h1
-rw-r--r--drivers/acpi/acpica/acevents.h17
-rw-r--r--drivers/acpi/acpica/acglobal.h13
-rw-r--r--drivers/acpi/acpica/amlcode.h15
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evglock.c335
-rw-r--r--drivers/acpi/acpica/evmisc.c303
-rw-r--r--drivers/acpi/acpica/evregion.c121
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c13
-rw-r--r--drivers/acpi/acpica/excreate.c3
-rw-r--r--drivers/acpi/acpica/nsrepair.c13
-rw-r--r--drivers/acpi/acpica/utdecode.c5
-rw-r--r--drivers/acpi/acpica/utmutex.c12
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/custom_method.c100
-rw-r--r--drivers/acpi/debugfs.c92
-rw-r--r--drivers/acpi/ec.c19
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/osl.c33
-rw-r--r--drivers/acpi/processor_core.c12
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/sysfs.c8
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/cpuidle/governors/menu.c4
-rw-r--r--drivers/md/dm-io.c27
-rw-r--r--drivers/md/dm-kcopyd.c168
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-raid1.c10
-rw-r--r--drivers/md/dm-snap-persistent.c13
-rw-r--r--drivers/md/dm-snap.c10
-rw-r--r--drivers/md/dm-table.c23
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/platform/x86/compal-laptop.c14
-rw-r--r--drivers/platform/x86/msi-laptop.c12
-rw-r--r--drivers/thermal/thermal_sys.c10
41 files changed, 839 insertions, 606 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index bc2218db5ba9..de0e3df76776 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -369,6 +369,21 @@ config ACPI_HED
which is used to report some hardware errors notified via
SCI, mainly the corrected errors.
+config ACPI_CUSTOM_METHOD
+ tristate "Allow ACPI methods to be inserted/replaced at run time"
+ depends on DEBUG_FS
+ default n
+ help
+ This debug facility allows ACPI AML methods to me inserted and/or
+ replaced without rebooting the system. For details refer to:
+ Documentation/acpi/method-customizing.txt.
+
+ NOTE: This option is security sensitive, because it allows arbitrary
+ kernel memory to be written to by root (uid=0) users, allowing them
+ to bypass certain security measures (e.g. if root is not allowed to
+ load additional kernel modules after boot, this feature may be used
+ to override that restriction).
+
source "drivers/acpi/apei/Kconfig"
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index b66fbb2fc85f..ecb26b4f29a0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_ACPI_SBS) += sbshc.o
obj-$(CONFIG_ACPI_SBS) += sbs.o
obj-$(CONFIG_ACPI_HED) += hed.o
obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
+obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index a1224712fd0c..301bd2d388ad 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,7 +14,7 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
evmisc.o evrgnini.o evxface.o evxfregn.o \
- evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o
+ evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evglock.o
acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index ab87396c2c07..bc533dde16c4 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -187,7 +187,6 @@
/* Operation regions */
-#define ACPI_NUM_PREDEFINED_REGIONS 9
#define ACPI_USER_REGION_BEGIN 0x80
/* Maximum space_ids for Operation Regions */
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 41d247daf461..bea3b4899183 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -58,12 +58,6 @@ u32 acpi_ev_fixed_event_detect(void);
*/
u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node);
-acpi_status acpi_ev_acquire_global_lock(u16 timeout);
-
-acpi_status acpi_ev_release_global_lock(void);
-
-acpi_status acpi_ev_init_global_lock_handler(void);
-
u32 acpi_ev_get_gpe_number_index(u32 gpe_number);
acpi_status
@@ -71,6 +65,17 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
u32 notify_value);
/*
+ * evglock - Global Lock support
+ */
+acpi_status acpi_ev_init_global_lock_handler(void);
+
+acpi_status acpi_ev_acquire_global_lock(u16 timeout);
+
+acpi_status acpi_ev_release_global_lock(void);
+
+acpi_status acpi_ev_remove_global_lock_handler(void);
+
+/*
* evgpe - Low-level GPE support
*/
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index d69750b83b36..73863d86f022 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -214,24 +214,23 @@ ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
/*
* Global lock mutex is an actual AML mutex object
- * Global lock semaphore works in conjunction with the HW global lock
+ * Global lock semaphore works in conjunction with the actual global lock
+ * Global lock spinlock is used for "pending" handshake
*/
ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex;
ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
+ACPI_EXTERN acpi_spinlock acpi_gbl_global_lock_pending_lock;
ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
ACPI_EXTERN u8 acpi_gbl_global_lock_present;
+ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
/*
* Spinlocks are used for interfaces that can be possibly called at
* interrupt level
*/
-ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */
-ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
-ACPI_EXTERN spinlock_t _acpi_ev_global_lock_pending_lock; /* For global lock */
-#define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock
-#define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock
-#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
+ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
+ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index f4f0998d3967..1077f17859ed 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -394,21 +394,6 @@
#define AML_CLASS_METHOD_CALL 0x09
#define AML_CLASS_UNKNOWN 0x0A
-/* Predefined Operation Region space_iDs */
-
-typedef enum {
- REGION_MEMORY = 0,
- REGION_IO,
- REGION_PCI_CONFIG,
- REGION_EC,
- REGION_SMBUS,
- REGION_CMOS,
- REGION_PCI_BAR,
- REGION_IPMI,
- REGION_DATA_TABLE, /* Internal use only */
- REGION_FIXED_HW = 0x7F
-} AML_REGION_TYPES;
-
/* Comparison operation codes for match_op operator */
typedef enum {
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 23a3b1ab20c1..324acec1179a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -450,7 +450,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
status =
acpi_ex_create_region(op->named.data,
op->named.length,
- REGION_DATA_TABLE,
+ ACPI_ADR_SPACE_DATA_TABLE,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 4be4e921dfe1..976318138c56 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -562,7 +562,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
((op->common.value.arg)->common.value.
integer);
} else {
- region_space = REGION_DATA_TABLE;
+ region_space = ACPI_ADR_SPACE_DATA_TABLE;
}
/*
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
new file mode 100644
index 000000000000..56a562a1e5d7
--- /dev/null
+++ b/drivers/acpi/acpica/evglock.c
@@ -0,0 +1,335 @@
+/******************************************************************************
+ *
+ * Module Name: evglock - Global Lock support
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evglock")
+
+/* Local prototypes */
+static u32 acpi_ev_global_lock_handler(void *context);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_init_global_lock_handler
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for the global lock release event
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_init_global_lock_handler(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
+
+ /* Attempt installation of the global lock handler */
+
+ status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
+ acpi_ev_global_lock_handler,
+ NULL);
+
+ /*
+ * If the global lock does not exist on this platform, the attempt to
+ * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
+ * Map to AE_OK, but mark global lock as not present. Any attempt to
+ * actually use the global lock will be flagged with an error.
+ */
+ acpi_gbl_global_lock_present = FALSE;
+ if (status == AE_NO_HARDWARE_RESPONSE) {
+ ACPI_ERROR((AE_INFO,
+ "No response from Global Lock hardware, disabling lock"));
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ status = acpi_os_create_lock(&acpi_gbl_global_lock_pending_lock);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_gbl_global_lock_pending = FALSE;
+ acpi_gbl_global_lock_present = TRUE;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_remove_global_lock_handler
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove the handler for the Global Lock
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_remove_global_lock_handler(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
+
+ acpi_gbl_global_lock_present = FALSE;
+ status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
+ acpi_ev_global_lock_handler);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_global_lock_handler
+ *
+ * PARAMETERS: Context - From thread interface, not used
+ *
+ * RETURN: ACPI_INTERRUPT_HANDLED
+ *
+ * DESCRIPTION: Invoked directly from the SCI handler when a global lock
+ * release interrupt occurs. If there is actually a pending
+ * request for the lock, signal the waiting thread.
+ *
+ ******************************************************************************/
+
+static u32 acpi_ev_global_lock_handler(void *context)
+{
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
+
+ /*
+ * If a request for the global lock is not actually pending,
+ * we are done. This handles "spurious" global lock interrupts
+ * which are possible (and have been seen) with bad BIOSs.
+ */
+ if (!acpi_gbl_global_lock_pending) {
+ goto cleanup_and_exit;
+ }
+
+ /*
+ * Send a unit to the global lock semaphore. The actual acquisition
+ * of the global lock will be performed by the waiting thread.
+ */
+ status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
+ }
+
+ acpi_gbl_global_lock_pending = FALSE;
+
+ cleanup_and_exit:
+
+ acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
+ return (ACPI_INTERRUPT_HANDLED);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ev_acquire_global_lock
+ *
+ * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Attempt to gain ownership of the Global Lock.
+ *
+ * MUTEX: Interpreter must be locked
+ *
+ * Note: The original implementation allowed multiple threads to "acquire" the
+ * Global Lock, and the OS would hold the lock until the last thread had
+ * released it. However, this could potentially starve the BIOS out of the
+ * lock, especially in the case where there is a tight handshake between the
+ * Embedded Controller driver and the BIOS. Therefore, this implementation
+ * allows only one thread to acquire the HW Global Lock at a time, and makes
+ * the global lock appear as a standard mutex on the OS side.
+ *
+ *****************************************************************************/
+
+acpi_status acpi_ev_acquire_global_lock(u16 timeout)
+{
+ acpi_cpu_flags flags;
+ acpi_status status;
+ u8 acquired = FALSE;
+
+ ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
+
+ /*
+ * Only one thread can acquire the GL at a time, the global_lock_mutex
+ * enforces this. This interface releases the interpreter if we must wait.
+ */
+ status =
+ acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex->mutex.
+ os_mutex, timeout);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Update the global lock handle and check for wraparound. The handle is
+ * only used for the external global lock interfaces, but it is updated
+ * here to properly handle the case where a single thread may acquire the
+ * lock via both the AML and the acpi_acquire_global_lock interfaces. The
+ * handle is therefore updated on the first acquire from a given thread
+ * regardless of where the acquisition request originated.
+ */
+ acpi_gbl_global_lock_handle++;
+ if (acpi_gbl_global_lock_handle == 0) {
+ acpi_gbl_global_lock_handle = 1;
+ }
+
+ /*
+ * Make sure that a global lock actually exists. If not, just
+ * treat the lock as a standard mutex.
+ */
+ if (!acpi_gbl_global_lock_present) {
+ acpi_gbl_global_lock_acquired = TRUE;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
+
+ do {
+
+ /* Attempt to acquire the actual hardware lock */
+
+ ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
+ if (acquired) {
+ acpi_gbl_global_lock_acquired = TRUE;
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Acquired hardware Global Lock\n"));
+ break;
+ }
+
+ /*
+ * Did not get the lock. The pending bit was set above, and
+ * we must now wait until we receive the global lock
+ * released interrupt.
+ */
+ acpi_gbl_global_lock_pending = TRUE;
+ acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Waiting for hardware Global Lock\n"));
+
+ /*
+ * Wait for handshake with the global lock interrupt handler.
+ * This interface releases the interpreter if we must wait.
+ */
+ status =
+ acpi_ex_system_wait_semaphore
+ (acpi_gbl_global_lock_semaphore, ACPI_WAIT_FOREVER);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
+
+ } while (ACPI_SUCCESS(status));
+
+ acpi_gbl_global_lock_pending = FALSE;
+ acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_release_global_lock
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Releases ownership of the Global Lock.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_release_global_lock(void)
+{
+ u8 pending = FALSE;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ev_release_global_lock);
+
+ /* Lock must be already acquired */
+
+ if (!acpi_gbl_global_lock_acquired) {
+ ACPI_WARNING((AE_INFO,
+ "Cannot release the ACPI Global Lock, it has not been acquired"));
+ return_ACPI_STATUS(AE_NOT_ACQUIRED);
+ }
+
+ if (acpi_gbl_global_lock_present) {
+
+ /* Allow any thread to release the lock */
+
+ ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
+
+ /*
+ * If the pending bit was set, we must write GBL_RLS to the control
+ * register
+ */
+ if (pending) {
+ status =
+ acpi_write_bit_register
+ (ACPI_BITREG_GLOBAL_LOCK_RELEASE,
+ ACPI_ENABLE_EVENT);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Released hardware Global Lock\n"));
+ }
+
+ acpi_gbl_global_lock_acquired = FALSE;
+
+ /* Release the local GL mutex */
+
+ acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 7dc80946f7bd..d0b331844427 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -45,7 +45,6 @@
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
-#include "acinterp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evmisc")
@@ -53,10 +52,6 @@ ACPI_MODULE_NAME("evmisc")
/* Local prototypes */
static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
-static u32 acpi_ev_global_lock_handler(void *context);
-
-static acpi_status acpi_ev_remove_global_lock_handler(void);
-
/*******************************************************************************
*
* FUNCTION: acpi_ev_is_notify_object
@@ -275,304 +270,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
acpi_ut_delete_generic_state(notify_info);
}
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_global_lock_handler
- *
- * PARAMETERS: Context - From thread interface, not used
- *
- * RETURN: ACPI_INTERRUPT_HANDLED
- *
- * DESCRIPTION: Invoked directly from the SCI handler when a global lock
- * release interrupt occurs. If there's a thread waiting for
- * the global lock, signal it.
- *
- * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
- * this is not possible for some reason, a separate thread will have to be
- * scheduled to do this.
- *
- ******************************************************************************/
-static u8 acpi_ev_global_lock_pending;
-
-static u32 acpi_ev_global_lock_handler(void *context)
-{
- acpi_status status;
- acpi_cpu_flags flags;
-
- flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
-
- if (!acpi_ev_global_lock_pending) {
- goto out;
- }
-
- /* Send a unit to the semaphore */
-
- status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
- }
-
- acpi_ev_global_lock_pending = FALSE;
-
- out:
- acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
-
- return (ACPI_INTERRUPT_HANDLED);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_init_global_lock_handler
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Install a handler for the global lock release event
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_init_global_lock_handler(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
-
- /* Attempt installation of the global lock handler */
-
- status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
- acpi_ev_global_lock_handler,
- NULL);
-
- /*
- * If the global lock does not exist on this platform, the attempt to
- * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
- * Map to AE_OK, but mark global lock as not present. Any attempt to
- * actually use the global lock will be flagged with an error.
- */
- if (status == AE_NO_HARDWARE_RESPONSE) {
- ACPI_ERROR((AE_INFO,
- "No response from Global Lock hardware, disabling lock"));
-
- acpi_gbl_global_lock_present = FALSE;
- return_ACPI_STATUS(AE_OK);
- }
-
- acpi_gbl_global_lock_present = TRUE;
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_remove_global_lock_handler
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Remove the handler for the Global Lock
- *
- ******************************************************************************/
-
-static acpi_status acpi_ev_remove_global_lock_handler(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
-
- acpi_gbl_global_lock_present = FALSE;
- status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
- acpi_ev_global_lock_handler);
-
- return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION: acpi_ev_acquire_global_lock
- *
- * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Attempt to gain ownership of the Global Lock.
- *
- * MUTEX: Interpreter must be locked
- *
- * Note: The original implementation allowed multiple threads to "acquire" the
- * Global Lock, and the OS would hold the lock until the last thread had
- * released it. However, this could potentially starve the BIOS out of the
- * lock, especially in the case where there is a tight handshake between the
- * Embedded Controller driver and the BIOS. Therefore, this implementation
- * allows only one thread to acquire the HW Global Lock at a time, and makes
- * the global lock appear as a standard mutex on the OS side.
- *
- *****************************************************************************/
-static acpi_thread_id acpi_ev_global_lock_thread_id;
-static int acpi_ev_global_lock_acquired;
-
-acpi_status acpi_ev_acquire_global_lock(u16 timeout)
-{
- acpi_cpu_flags flags;
- acpi_status status = AE_OK;
- u8 acquired = FALSE;
-
- ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
-
- /*
- * Only one thread can acquire the GL at a time, the global_lock_mutex
- * enforces this. This interface releases the interpreter if we must wait.
- */
- status = acpi_ex_system_wait_mutex(
- acpi_gbl_global_lock_mutex->mutex.os_mutex, 0);
- if (status == AE_TIME) {
- if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
- acpi_ev_global_lock_acquired++;
- return AE_OK;
- }
- }
-
- if (ACPI_FAILURE(status)) {
- status = acpi_ex_system_wait_mutex(
- acpi_gbl_global_lock_mutex->mutex.os_mutex,
- timeout);
- }
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
- acpi_ev_global_lock_acquired++;
-
- /*
- * Update the global lock handle and check for wraparound. The handle is
- * only used for the external global lock interfaces, but it is updated
- * here to properly handle the case where a single thread may acquire the
- * lock via both the AML and the acpi_acquire_global_lock interfaces. The
- * handle is therefore updated on the first acquire from a given thread
- * regardless of where the acquisition request originated.
- */
- acpi_gbl_global_lock_handle++;
- if (acpi_gbl_global_lock_handle == 0) {
- acpi_gbl_global_lock_handle = 1;
- }
-
- /*
- * Make sure that a global lock actually exists. If not, just treat the
- * lock as a standard mutex.
- */
- if (!acpi_gbl_global_lock_present) {
- acpi_gbl_global_lock_acquired = TRUE;
- return_ACPI_STATUS(AE_OK);
- }
-
- flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
-
- do {
-
- /* Attempt to acquire the actual hardware lock */
-
- ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
- if (acquired) {
- acpi_gbl_global_lock_acquired = TRUE;
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Acquired hardware Global Lock\n"));
- break;
- }
-
- acpi_ev_global_lock_pending = TRUE;
-
- acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
-
- /*
- * Did not get the lock. The pending bit was set above, and we
- * must wait until we get the global lock released interrupt.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Waiting for hardware Global Lock\n"));
-
- /*
- * Wait for handshake with the global lock interrupt handler.
- * This interface releases the interpreter if we must wait.
- */
- status = acpi_ex_system_wait_semaphore(
- acpi_gbl_global_lock_semaphore,
- ACPI_WAIT_FOREVER);
-
- flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
-
- } while (ACPI_SUCCESS(status));
-
- acpi_ev_global_lock_pending = FALSE;
-
- acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
-
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_release_global_lock
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Releases ownership of the Global Lock.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_release_global_lock(void)
-{
- u8 pending = FALSE;
- acpi_status status = AE_OK;
-
- ACPI_FUNCTION_TRACE(ev_release_global_lock);
-
- /* Lock must be already acquired */
-
- if (!acpi_gbl_global_lock_acquired) {
- ACPI_WARNING((AE_INFO,
- "Cannot release the ACPI Global Lock, it has not been acquired"));
- return_ACPI_STATUS(AE_NOT_ACQUIRED);
- }
-
- acpi_ev_global_lock_acquired--;
- if (acpi_ev_global_lock_acquired > 0) {
- return AE_OK;
- }
-
- if (acpi_gbl_global_lock_present) {
-
- /* Allow any thread to release the lock */
-
- ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
-
- /*
- * If the pending bit was set, we must write GBL_RLS to the control
- * register
- */
- if (pending) {
- status =
- acpi_write_bit_register
- (ACPI_BITREG_GLOBAL_LOCK_RELEASE,
- ACPI_ENABLE_EVENT);
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Released hardware Global Lock\n"));
- }
-
- acpi_gbl_global_lock_acquired = FALSE;
-
- /* Release the local GL mutex */
- acpi_ev_global_lock_thread_id = 0;
- acpi_ev_global_lock_acquired = 0;
- acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
- return_ACPI_STATUS(status);
-}
-
/******************************************************************************
*
* FUNCTION: acpi_ev_terminate
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index bea7223d7a71..f0edf5c43c03 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -55,6 +55,8 @@ static u8
acpi_ev_has_default_handler(struct acpi_namespace_node *node,
acpi_adr_space_type space_id);
+static void acpi_ev_orphan_ec_reg_method(void);
+
static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
@@ -561,7 +563,9 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
/* Now stop region accesses by executing the _REG method */
- status = acpi_ev_execute_reg_method(region_obj, 0);
+ status =
+ acpi_ev_execute_reg_method(region_obj,
+ ACPI_REG_DISCONNECT);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"from region _REG, [%s]",
@@ -1062,6 +1066,12 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
NULL, &space_id, NULL);
+ /* Special case for EC: handle "orphan" _REG methods with no region */
+
+ if (space_id == ACPI_ADR_SPACE_EC) {
+ acpi_ev_orphan_ec_reg_method();
+ }
+
return_ACPI_STATUS(status);
}
@@ -1120,6 +1130,113 @@ acpi_ev_reg_run(acpi_handle obj_handle,
return (AE_OK);
}
- status = acpi_ev_execute_reg_method(obj_desc, 1);
+ status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT);
return (status);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_orphan_ec_reg_method
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Execute an "orphan" _REG method that appears under the EC
+ * device. This is a _REG method that has no corresponding region
+ * within the EC device scope. The orphan _REG method appears to
+ * have been enabled by the description of the ECDT in the ACPI
+ * specification: "The availability of the region space can be
+ * detected by providing a _REG method object underneath the
+ * Embedded Controller device."
+ *
+ * To quickly access the EC device, we use the EC_ID that appears
+ * within the ECDT. Otherwise, we would need to perform a time-
+ * consuming namespace walk, executing _HID methods to find the
+ * EC device.
+ *
+ ******************************************************************************/
+
+static void acpi_ev_orphan_ec_reg_method(void)
+{
+ struct acpi_table_ecdt *table;
+ acpi_status status;
+ struct acpi_object_list args;
+ union acpi_object objects[2];
+ struct acpi_namespace_node *ec_device_node;
+ struct acpi_namespace_node *reg_method;
+ struct acpi_namespace_node *next_node;
+
+ ACPI_FUNCTION_TRACE(ev_orphan_ec_reg_method);
+
+ /* Get the ECDT (if present in system) */
+
+ status = acpi_get_table(ACPI_SIG_ECDT, 0,
+ ACPI_CAST_INDIRECT_PTR(struct acpi_table_header,
+ &table));
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
+ /* We need a valid EC_ID string */
+
+ if (!(*table->id)) {
+ return_VOID;
+ }
+
+ /* Namespace is currently locked, must release */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+ /* Get a handle to the EC device referenced in the ECDT */
+
+ status = acpi_get_handle(NULL,
+ ACPI_CAST_PTR(char, table->id),
+ ACPI_CAST_PTR(acpi_handle, &ec_device_node));
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Get a handle to a _REG method immediately under the EC device */
+
+ status = acpi_get_handle(ec_device_node,
+ METHOD_NAME__REG, ACPI_CAST_PTR(acpi_handle,
+ &reg_method));
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /*
+ * Execute the _REG method only if there is no Operation Region in
+ * this scope with the Embedded Controller space ID. Otherwise, it
+ * will already have been executed. Note, this allows for Regions
+ * with other space IDs to be present; but the code below will then
+ * execute the _REG method with the EC space ID argument.
+ */
+ next_node = acpi_ns_get_next_node(ec_device_node, NULL);
+ while (next_node) {
+ if ((next_node->type == ACPI_TYPE_REGION) &&
+ (next_node->object) &&
+ (next_node->object->region.space_id == ACPI_ADR_SPACE_EC)) {
+ goto exit; /* Do not execute _REG */
+ }
+ next_node = acpi_ns_get_next_node(ec_device_node, next_node);
+ }
+
+ /* Evaluate the _REG(EC,Connect) method */
+
+ args.count = 2;
+ args.pointer = objects;
+ objects[0].type = ACPI_TYPE_INTEGER;
+ objects[0].integer.value = ACPI_ADR_SPACE_EC;
+ objects[1].type = ACPI_TYPE_INTEGER;
+ objects[1].integer.value = ACPI_REG_CONNECT;
+
+ status = acpi_evaluate_object(reg_method, NULL, &args, NULL);
+
+ exit:
+ /* We ignore all errors from above, don't care */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ return_VOID;
+}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 9659cee6093e..55a5d35ef34a 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -637,7 +637,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
status =
acpi_ev_execute_reg_method
- (region_obj, 1);
+ (region_obj, ACPI_REG_CONNECT);
if (acpi_ns_locked) {
status =
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index c85c8c45599d..00cd95692a91 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -130,20 +130,21 @@ acpi_install_address_space_handler(acpi_handle device,
case ACPI_ADR_SPACE_PCI_CONFIG:
case ACPI_ADR_SPACE_DATA_TABLE:
- if (acpi_gbl_reg_methods_executed) {
+ if (!acpi_gbl_reg_methods_executed) {
- /* Run all _REG methods for this address space */
-
- status = acpi_ev_execute_reg_methods(node, space_id);
+ /* We will defer execution of the _REG methods for this space */
+ goto unlock_and_exit;
}
break;
default:
-
- status = acpi_ev_execute_reg_methods(node, space_id);
break;
}
+ /* Run all _REG methods for this address space */
+
+ status = acpi_ev_execute_reg_methods(node, space_id);
+
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index e7b372d17667..110711afada8 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -305,7 +305,8 @@ acpi_ex_create_region(u8 * aml_start,
* range
*/
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
- (region_space < ACPI_USER_REGION_BEGIN)) {
+ (region_space < ACPI_USER_REGION_BEGIN) &&
+ (region_space != ACPI_ADR_SPACE_DATA_TABLE)) {
ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 1d76ac85b5e7..ac7b854b0bd7 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -74,7 +74,6 @@ ACPI_MODULE_NAME("nsrepair")
*
* Additional possible repairs:
*
- * Optional/unnecessary NULL package elements removed
* Required package elements that are NULL replaced by Integer/String/Buffer
* Incorrect standalone package wrapped with required outer package
*
@@ -623,16 +622,12 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
ACPI_FUNCTION_NAME(ns_remove_null_elements);
/*
- * PTYPE1 packages contain no subpackages.
- * PTYPE2 packages contain a variable number of sub-packages. We can
- * safely remove all NULL elements from the PTYPE2 packages.
+ * We can safely remove all NULL elements from these package types:
+ * PTYPE1_VAR packages contain a variable number of simple data types.
+ * PTYPE2 packages contain a variable number of sub-packages.
*/
switch (package_type) {
- case ACPI_PTYPE1_FIXED:
case ACPI_PTYPE1_VAR:
- case ACPI_PTYPE1_OPTION:
- return;
-
case ACPI_PTYPE2:
case ACPI_PTYPE2_COUNT:
case ACPI_PTYPE2_PKG_COUNT:
@@ -642,6 +637,8 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
break;
default:
+ case ACPI_PTYPE1_FIXED:
+ case ACPI_PTYPE1_OPTION:
return;
}
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 136a814cec69..97cb36f85ce9 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -170,8 +170,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"SMBus",
"SystemCMOS",
"PCIBARTarget",
- "IPMI",
- "DataTable"
+ "IPMI"
};
char *acpi_ut_get_region_name(u8 space_id)
@@ -179,6 +178,8 @@ char *acpi_ut_get_region_name(u8 space_id)
if (space_id >= ACPI_USER_REGION_BEGIN) {
return ("UserDefinedRegion");
+ } else if (space_id == ACPI_ADR_SPACE_DATA_TABLE) {
+ return ("DataTable");
} else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
return ("FunctionalFixedHW");
} else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index a946c689f03b..7d797e2baecd 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -83,9 +83,15 @@ acpi_status acpi_ut_mutex_initialize(void)
/* Create the spinlocks for use at interrupt level */
- spin_lock_init(acpi_gbl_gpe_lock);
- spin_lock_init(acpi_gbl_hardware_lock);
- spin_lock_init(acpi_ev_global_lock_pending_lock);
+ status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
+ if (ACPI_FAILURE (status)) {
+ return_ACPI_STATUS (status);
+ }
+
+ status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
+ if (ACPI_FAILURE (status)) {
+ return_ACPI_STATUS (status);
+ }
/* Mutex for _OSI support */
status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 9749980ca6ca..d1e06c182cdb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -227,7 +227,7 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
acpi_status status = AE_OK;
char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
- if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
+ if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
/* Make sure this is a valid target state */
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
new file mode 100644
index 000000000000..5d42c2414ae5
--- /dev/null
+++ b/drivers/acpi/custom_method.c
@@ -0,0 +1,100 @@
+/*
+ * debugfs.c - ACPI debugfs interface to userspace.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <acpi/acpi_drivers.h>
+
+#include "internal.h"
+
+#define _COMPONENT ACPI_SYSTEM_COMPONENT
+ACPI_MODULE_NAME("custom_method");
+MODULE_LICENSE("GPL");
+
+static struct dentry *cm_dentry;
+
+/* /sys/kernel/debug/acpi/custom_method */
+
+static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ size_t count, loff_t *ppos)
+{
+ static char *buf;
+ static u32 max_size;
+ static u32 uncopied_bytes;
+
+ struct acpi_table_header table;
+ acpi_status status;
+
+ if (!(*ppos)) {
+ /* parse the table header to get the table length */
+ if (count <= sizeof(struct acpi_table_header))
+ return -EINVAL;
+ if (copy_from_user(&table, user_buf,
+ sizeof(struct acpi_table_header)))
+ return -EFAULT;
+ uncopied_bytes = max_size = table.length;
+ buf = kzalloc(max_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ if (buf == NULL)
+ return -EINVAL;
+
+ if ((*ppos > max_size) ||
+ (*ppos + count > max_size) ||
+ (*ppos + count < count) ||
+ (count > uncopied_bytes))
+ return -EINVAL;
+
+ if (copy_from_user(buf + (*ppos), user_buf, count)) {
+ kfree(buf);
+ buf = NULL;
+ return -EFAULT;
+ }
+
+ uncopied_bytes -= count;
+ *ppos += count;
+
+ if (!uncopied_bytes) {
+ status = acpi_install_method(buf);
+ kfree(buf);
+ buf = NULL;
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+ }
+
+ return count;
+}
+
+static const struct file_operations cm_fops = {
+ .write = cm_write,
+ .llseek = default_llseek,
+};
+
+static int __init acpi_custom_method_init(void)
+{
+ if (acpi_debugfs_dir == NULL)
+ return -ENOENT;
+
+ cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
+ acpi_debugfs_dir, NULL, &cm_fops);
+ if (cm_dentry == NULL)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit acpi_custom_method_exit(void)
+{
+ if (cm_dentry)
+ debugfs_remove(cm_dentry);
+ }
+
+module_init(acpi_custom_method_init);
+module_exit(acpi_custom_method_exit);
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 384f7abcff77..182a9fc36355 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -3,100 +3,16 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <acpi/acpi_drivers.h>
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("debugfs");
+struct dentry *acpi_debugfs_dir;
+EXPORT_SYMBOL_GPL(acpi_debugfs_dir);
-/* /sys/modules/acpi/parameters/aml_debug_output */
-
-module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
- bool, 0644);
-MODULE_PARM_DESC(aml_debug_output,
- "To enable/disable the ACPI Debug Object output.");
-
-/* /sys/kernel/debug/acpi/custom_method */
-
-static ssize_t cm_write(struct file *file, const char __user * user_buf,
- size_t count, loff_t *ppos)
+void __init acpi_debugfs_init(void)
{
- static char *buf;
- static u32 max_size;
- static u32 uncopied_bytes;
-
- struct acpi_table_header table;
- acpi_status status;
-
- if (!(*ppos)) {
- /* parse the table header to get the table length */
- if (count <= sizeof(struct acpi_table_header))
- return -EINVAL;
- if (copy_from_user(&table, user_buf,
- sizeof(struct acpi_table_header)))
- return -EFAULT;
- uncopied_bytes = max_size = table.length;
- buf = kzalloc(max_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- if (buf == NULL)
- return -EINVAL;
-
- if ((*ppos > max_size) ||
- (*ppos + count > max_size) ||
- (*ppos + count < count) ||
- (count > uncopied_bytes))
- return -EINVAL;
-
- if (copy_from_user(buf + (*ppos), user_buf, count)) {
- kfree(buf);
- buf = NULL;
- return -EFAULT;
- }
-
- uncopied_bytes -= count;
- *ppos += count;
-
- if (!uncopied_bytes) {
- status = acpi_install_method(buf);
- kfree(buf);
- buf = NULL;
- if (ACPI_FAILURE(status))
- return -EINVAL;
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
- }
-
- return count;
-}
-
-static const struct file_operations cm_fops = {
- .write = cm_write,
- .llseek = default_llseek,
-};
-
-int __init acpi_debugfs_init(void)
-{
- struct dentry *acpi_dir, *cm_dentry;
-
- acpi_dir = debugfs_create_dir("acpi", NULL);
- if (!acpi_dir)
- goto err;
-
- cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
- acpi_dir, NULL, &cm_fops);
- if (!cm_dentry)
- goto err;
-
- return 0;
-
-err:
- if (acpi_dir)
- debugfs_remove(acpi_dir);
- return -EINVAL;
+ acpi_debugfs_dir = debugfs_create_dir("acpi", NULL);
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index fa848c4116a8..b19a18dd994f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -69,7 +69,6 @@ enum ec_command {
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
-#define ACPI_EC_CDELAY 10 /* Wait 10us before polling EC */
#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
@@ -433,8 +432,7 @@ EXPORT_SYMBOL(ec_write);
int ec_transaction(u8 command,
const u8 * wdata, unsigned wdata_len,
- u8 * rdata, unsigned rdata_len,
- int force_poll)
+ u8 * rdata, unsigned rdata_len)
{
struct transaction t = {.command = command,
.wdata = wdata, .rdata = rdata,
@@ -592,8 +590,6 @@ static void acpi_ec_gpe_query(void *ec_cxt)
mutex_unlock(&ec->lock);
}
-static void acpi_ec_gpe_query(void *ec_cxt);
-
static int ec_check_sci(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
@@ -808,8 +804,6 @@ static int acpi_ec_add(struct acpi_device *device)
return -EINVAL;
}
- ec->handle = device->handle;
-
/* Find and register all query methods */
acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
acpi_ec_register_query_methods, NULL, ec, NULL);
@@ -938,8 +932,19 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
ec_flag_msi, "MSI hardware", {
DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL},
{
+ ec_flag_msi, "Quanta hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW8/SW8/DW8"),}, NULL},
+ {
+ ec_flag_msi, "Quanta hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL},
+ {
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
+ {
+ ec_validate_ecdt, "ASUS hardware", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
{},
};
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 4bfb759deb10..ca75b9ce0489 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -28,9 +28,10 @@ int acpi_scan_init(void);
int acpi_sysfs_init(void);
#ifdef CONFIG_DEBUG_FS
+extern struct dentry *acpi_debugfs_dir;
int acpi_debugfs_init(void);
#else
-static inline int acpi_debugfs_init(void) { return 0; }
+static inline void acpi_debugfs_init(void) { return; }
#endif
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 45ad4ffef533..52ca9649d769 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -902,14 +902,6 @@ void acpi_os_wait_events_complete(void *context)
EXPORT_SYMBOL(acpi_os_wait_events_complete);
-/*
- * Deallocate the memory for a spinlock.
- */
-void acpi_os_delete_lock(acpi_spinlock handle)
-{
- return;
-}
-
acpi_status
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
{
@@ -1341,6 +1333,31 @@ int acpi_resources_are_enforced(void)
EXPORT_SYMBOL(acpi_resources_are_enforced);
/*
+ * Create and initialize a spinlock.
+ */
+acpi_status
+acpi_os_create_lock(acpi_spinlock *out_handle)
+{
+ spinlock_t *lock;
+
+ lock = ACPI_ALLOCATE(sizeof(spinlock_t));
+ if (!lock)
+ return AE_NO_MEMORY;
+ spin_lock_init(lock);
+ *out_handle = lock;
+
+ return AE_OK;
+}
+
+/*
+ * Deallocate the memory for a spinlock.
+ */
+void acpi_os_delete_lock(acpi_spinlock handle)
+{
+ ACPI_FREE(handle);
+}
+
+/*
* Acquire a spinlock.
*
* handle is a pointer to the spinlock_t.
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 25bf17da69fd..02d2a4c9084d 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -37,7 +37,6 @@ static struct dmi_system_id __initdata processor_idle_dmi_table[] = {
{},
};
-#ifdef CONFIG_SMP
static int map_lapic_id(struct acpi_subtable_header *entry,
u32 acpi_id, int *apic_id)
{
@@ -165,7 +164,9 @@ exit:
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
+#ifdef CONFIG_SMP
int i;
+#endif
int apic_id = -1;
apic_id = map_mat_entry(handle, type, acpi_id);
@@ -174,14 +175,19 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
if (apic_id == -1)
return apic_id;
+#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
if (cpu_physical_id(i) == apic_id)
return i;
}
+#else
+ /* In UP kernel, only processor 0 is valid */
+ if (apic_id == 0)
+ return apic_id;
+#endif
return -1;
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
-#endif
static bool __init processor_physically_present(acpi_handle handle)
{
@@ -217,7 +223,7 @@ static bool __init processor_physically_present(acpi_handle handle)
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
cpuid = acpi_get_cpuid(handle, type, acpi_id);
- if ((cpuid == -1) && (num_possible_cpus() > 1))
+ if (cpuid == -1)
return false;
return true;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d615b7d69bca..431ab11c8c1b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -161,7 +161,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
return;
- if (c1e_detected)
+ if (amd_e400_c1e_detected)
type = ACPI_STATE_C1;
/*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 61891e75583d..77255f250dbb 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -220,6 +220,14 @@ module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
NULL, 0644);
#endif /* CONFIG_ACPI_DEBUG */
+
+/* /sys/modules/acpi/parameters/aml_debug_output */
+
+module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
+ bool, 0644);
+MODULE_PARM_DESC(aml_debug_output,
+ "To enable/disable the ACPI Debug Object output.");
+
/* /sys/module/acpi/parameters/acpica_version */
static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
{
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index db8f88586c8d..98de8f418676 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1038,6 +1038,7 @@ static void floppy_disable_hlt(void)
{
unsigned long flags;
+ WARN_ONCE(1, "floppy_disable_hlt() scheduled for removal in 2012");
spin_lock_irqsave(&floppy_hlt_lock, flags);
if (!hlt_disabled) {
hlt_disabled = 1;
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f508690eb958..c47f3d09c1ee 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -237,6 +237,7 @@ static int menu_select(struct cpuidle_device *dev)
unsigned int power_usage = -1;
int i;
int multiplier;
+ struct timespec t;
if (data->needs_update) {
menu_update(dev);
@@ -251,8 +252,9 @@ static int menu_select(struct cpuidle_device *dev)
return 0;
/* determine the expected residency time, round up */
+ t = ktime_to_timespec(tick_nohz_get_sleep_length());
data->expected_us =
- DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000);
+ t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
data->bucket = which_bucket(data->expected_us);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 76a5af00a26b..2067288f61f9 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -19,6 +19,8 @@
#define DM_MSG_PREFIX "io"
#define DM_IO_MAX_REGIONS BITS_PER_LONG
+#define MIN_IOS 16
+#define MIN_BIOS 16
struct dm_io_client {
mempool_t *pool;
@@ -41,33 +43,21 @@ struct io {
static struct kmem_cache *_dm_io_cache;
/*
- * io contexts are only dynamically allocated for asynchronous
- * io. Since async io is likely to be the majority of io we'll
- * have the same number of io contexts as bios! (FIXME: must reduce this).
- */
-
-static unsigned int pages_to_ios(unsigned int pages)
-{
- return 4 * pages; /* too many ? */
-}
-
-/*
* Create a client with mempool and bioset.
*/
-struct dm_io_client *dm_io_client_create(unsigned num_pages)
+struct dm_io_client *dm_io_client_create(void)
{
- unsigned ios = pages_to_ios(num_pages);
struct dm_io_client *client;
client = kmalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);
- client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
+ client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
if (!client->pool)
goto bad;
- client->bios = bioset_create(16, 0);
+ client->bios = bioset_create(MIN_BIOS, 0);
if (!client->bios)
goto bad;
@@ -81,13 +71,6 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
}
EXPORT_SYMBOL(dm_io_client_create);
-int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
-{
- return mempool_resize(client->pool, pages_to_ios(num_pages),
- GFP_KERNEL);
-}
-EXPORT_SYMBOL(dm_io_client_resize);
-
void dm_io_client_destroy(struct dm_io_client *client)
{
mempool_destroy(client->pool);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 1bb73a13ca40..819e37eaaeba 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -27,15 +27,19 @@
#include "dm.h"
+#define SUB_JOB_SIZE 128
+#define SPLIT_COUNT 8
+#define MIN_JOBS 8
+#define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE))
+
/*-----------------------------------------------------------------
* Each kcopyd client has its own little pool of preallocated
* pages for kcopyd io.
*---------------------------------------------------------------*/
struct dm_kcopyd_client {
- spinlock_t lock;
struct page_list *pages;
- unsigned int nr_pages;
- unsigned int nr_free_pages;
+ unsigned nr_reserved_pages;
+ unsigned nr_free_pages;
struct dm_io_client *io_client;
@@ -67,15 +71,18 @@ static void wake(struct dm_kcopyd_client *kc)
queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
}
-static struct page_list *alloc_pl(void)
+/*
+ * Obtain one page for the use of kcopyd.
+ */
+static struct page_list *alloc_pl(gfp_t gfp)
{
struct page_list *pl;
- pl = kmalloc(sizeof(*pl), GFP_KERNEL);
+ pl = kmalloc(sizeof(*pl), gfp);
if (!pl)
return NULL;
- pl->page = alloc_page(GFP_KERNEL);
+ pl->page = alloc_page(gfp);
if (!pl->page) {
kfree(pl);
return NULL;
@@ -90,41 +97,56 @@ static void free_pl(struct page_list *pl)
kfree(pl);
}
-static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
- unsigned int nr, struct page_list **pages)
+/*
+ * Add the provided pages to a client's free page list, releasing
+ * back to the system any beyond the reserved_pages limit.
+ */
+static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
{
- struct page_list *pl;
-
- spin_lock(&kc->lock);
- if (kc->nr_free_pages < nr) {
- spin_unlock(&kc->lock);
- return -ENOMEM;
- }
-
- kc->nr_free_pages -= nr;
- for (*pages = pl = kc->pages; --nr; pl = pl->next)
- ;
+ struct page_list *next;
- kc->pages = pl->next;
- pl->next = NULL;
+ do {
+ next = pl->next;
- spin_unlock(&kc->lock);
+ if (kc->nr_free_pages >= kc->nr_reserved_pages)
+ free_pl(pl);
+ else {
+ pl->next = kc->pages;
+ kc->pages = pl;
+ kc->nr_free_pages++;
+ }
- return 0;
+ pl = next;
+ } while (pl);
}
-static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
+static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
+ unsigned int nr, struct page_list **pages)
{
- struct page_list *cursor;
+ struct page_list *pl;
+
+ *pages = NULL;
+
+ do {
+ pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY);
+ if (unlikely(!pl)) {
+ /* Use reserved pages */
+ pl = kc->pages;
+ if (unlikely(!pl))
+ goto out_of_memory;
+ kc->pages = pl->next;
+ kc->nr_free_pages--;
+ }
+ pl->next = *pages;
+ *pages = pl;
+ } while (--nr);
- spin_lock(&kc->lock);
- for (cursor = pl; cursor->next; cursor = cursor->next)
- kc->nr_free_pages++;
+ return 0;
- kc->nr_free_pages++;
- cursor->next = kc->pages;
- kc->pages = pl;
- spin_unlock(&kc->lock);
+out_of_memory:
+ if (*pages)
+ kcopyd_put_pages(kc, *pages);
+ return -ENOMEM;
}
/*
@@ -141,13 +163,16 @@ static void drop_pages(struct page_list *pl)
}
}
-static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
+/*
+ * Allocate and reserve nr_pages for the use of a specific client.
+ */
+static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
{
- unsigned int i;
+ unsigned i;
struct page_list *pl = NULL, *next;
- for (i = 0; i < nr; i++) {
- next = alloc_pl();
+ for (i = 0; i < nr_pages; i++) {
+ next = alloc_pl(GFP_KERNEL);
if (!next) {
if (pl)
drop_pages(pl);
@@ -157,17 +182,18 @@ static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
pl = next;
}
+ kc->nr_reserved_pages += nr_pages;
kcopyd_put_pages(kc, pl);
- kc->nr_pages += nr;
+
return 0;
}
static void client_free_pages(struct dm_kcopyd_client *kc)
{
- BUG_ON(kc->nr_free_pages != kc->nr_pages);
+ BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
drop_pages(kc->pages);
kc->pages = NULL;
- kc->nr_free_pages = kc->nr_pages = 0;
+ kc->nr_free_pages = kc->nr_reserved_pages = 0;
}
/*-----------------------------------------------------------------
@@ -216,16 +242,17 @@ struct kcopyd_job {
struct mutex lock;
atomic_t sub_jobs;
sector_t progress;
-};
-/* FIXME: this should scale with the number of pages */
-#define MIN_JOBS 512
+ struct kcopyd_job *master_job;
+};
static struct kmem_cache *_job_cache;
int __init dm_kcopyd_init(void)
{
- _job_cache = KMEM_CACHE(kcopyd_job, 0);
+ _job_cache = kmem_cache_create("kcopyd_job",
+ sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1),
+ __alignof__(struct kcopyd_job), 0, NULL);
if (!_job_cache)
return -ENOMEM;
@@ -299,7 +326,12 @@ static int run_complete_job(struct kcopyd_job *job)
if (job->pages)
kcopyd_put_pages(kc, job->pages);
- mempool_free(job, kc->job_pool);
+ /*
+ * If this is the master job, the sub jobs have already
+ * completed so we can free everything.
+ */
+ if (job->master_job == job)
+ mempool_free(job, kc->job_pool);
fn(read_err, write_err, context);
if (atomic_dec_and_test(&kc->nr_jobs))
@@ -460,14 +492,14 @@ static void dispatch_job(struct kcopyd_job *job)
wake(kc);
}
-#define SUB_JOB_SIZE 128
static void segment_complete(int read_err, unsigned long write_err,
void *context)
{
/* FIXME: tidy this function */
sector_t progress = 0;
sector_t count = 0;
- struct kcopyd_job *job = (struct kcopyd_job *) context;
+ struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
+ struct kcopyd_job *job = sub_job->master_job;
struct dm_kcopyd_client *kc = job->kc;
mutex_lock(&job->lock);
@@ -498,8 +530,6 @@ static void segment_complete(int read_err, unsigned long write_err,
if (count) {
int i;
- struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
- GFP_NOIO);
*sub_job = *job;
sub_job->source.sector += progress;
@@ -511,7 +541,7 @@ static void segment_complete(int read_err, unsigned long write_err,
}
sub_job->fn = segment_complete;
- sub_job->context = job;
+ sub_job->context = sub_job;
dispatch_job(sub_job);
} else if (atomic_dec_and_test(&job->sub_jobs)) {
@@ -531,19 +561,19 @@ static void segment_complete(int read_err, unsigned long write_err,
}
/*
- * Create some little jobs that will do the move between
- * them.
+ * Create some sub jobs to share the work between them.
*/
-#define SPLIT_COUNT 8
-static void split_job(struct kcopyd_job *job)
+static void split_job(struct kcopyd_job *master_job)
{
int i;
- atomic_inc(&job->kc->nr_jobs);
+ atomic_inc(&master_job->kc->nr_jobs);
- atomic_set(&job->sub_jobs, SPLIT_COUNT);
- for (i = 0; i < SPLIT_COUNT; i++)
- segment_complete(0, 0u, job);
+ atomic_set(&master_job->sub_jobs, SPLIT_COUNT);
+ for (i = 0; i < SPLIT_COUNT; i++) {
+ master_job[i + 1].master_job = master_job;
+ segment_complete(0, 0u, &master_job[i + 1]);
+ }
}
int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
@@ -553,7 +583,8 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
struct kcopyd_job *job;
/*
- * Allocate a new job.
+ * Allocate an array of jobs consisting of one master job
+ * followed by SPLIT_COUNT sub jobs.
*/
job = mempool_alloc(kc->job_pool, GFP_NOIO);
@@ -577,10 +608,10 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->fn = fn;
job->context = context;
+ job->master_job = job;
- if (job->source.count < SUB_JOB_SIZE)
+ if (job->source.count <= SUB_JOB_SIZE)
dispatch_job(job);
-
else {
mutex_init(&job->lock);
job->progress = 0;
@@ -606,17 +637,15 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
/*-----------------------------------------------------------------
* Client setup
*---------------------------------------------------------------*/
-int dm_kcopyd_client_create(unsigned int nr_pages,
- struct dm_kcopyd_client **result)
+struct dm_kcopyd_client *dm_kcopyd_client_create(void)
{
int r = -ENOMEM;
struct dm_kcopyd_client *kc;
kc = kmalloc(sizeof(*kc), GFP_KERNEL);
if (!kc)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- spin_lock_init(&kc->lock);
spin_lock_init(&kc->job_lock);
INIT_LIST_HEAD(&kc->complete_jobs);
INIT_LIST_HEAD(&kc->io_jobs);
@@ -633,12 +662,12 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
goto bad_workqueue;
kc->pages = NULL;
- kc->nr_pages = kc->nr_free_pages = 0;
- r = client_alloc_pages(kc, nr_pages);
+ kc->nr_reserved_pages = kc->nr_free_pages = 0;
+ r = client_reserve_pages(kc, RESERVE_PAGES);
if (r)
goto bad_client_pages;
- kc->io_client = dm_io_client_create(nr_pages);
+ kc->io_client = dm_io_client_create();
if (IS_ERR(kc->io_client)) {
r = PTR_ERR(kc->io_client);
goto bad_io_client;
@@ -647,8 +676,7 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
init_waitqueue_head(&kc->destroyq);
atomic_set(&kc->nr_jobs, 0);
- *result = kc;
- return 0;
+ return kc;
bad_io_client:
client_free_pages(kc);
@@ -659,7 +687,7 @@ bad_workqueue:
bad_slab:
kfree(kc);
- return r;
+ return ERR_PTR(r);
}
EXPORT_SYMBOL(dm_kcopyd_client_create);
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index a1f321889676..948e3f4925bf 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -449,8 +449,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
lc->io_req.mem.type = DM_IO_VMA;
lc->io_req.notify.fn = NULL;
- lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
- PAGE_SIZE));
+ lc->io_req.client = dm_io_client_create();
if (IS_ERR(lc->io_req.client)) {
r = PTR_ERR(lc->io_req.client);
DMWARN("couldn't allocate disk io client");
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index a550a057d991..aa4e570c2cb5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1290,7 +1290,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!error && !clone->errors)
return 0; /* I/O complete */
- if (error == -EOPNOTSUPP || error == -EREMOTEIO)
+ if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
return error;
if (mpio->pgpath)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 976ad4688afc..9bfd057be686 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -22,8 +22,6 @@
#define DM_MSG_PREFIX "raid1"
#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
-#define DM_IO_PAGES 64
-#define DM_KCOPYD_PAGES 64
#define DM_RAID1_HANDLE_ERRORS 0x01
#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
@@ -887,7 +885,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
return NULL;
}
- ms->io_client = dm_io_client_create(DM_IO_PAGES);
+ ms->io_client = dm_io_client_create();
if (IS_ERR(ms->io_client)) {
ti->error = "Error creating dm_io client";
mempool_destroy(ms->read_record_pool);
@@ -1117,9 +1115,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err_destroy_wq;
}
- r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
- if (r)
+ ms->kcopyd_client = dm_kcopyd_client_create();
+ if (IS_ERR(ms->kcopyd_client)) {
+ r = PTR_ERR(ms->kcopyd_client);
goto err_destroy_wq;
+ }
wakeup_mirrord(ms);
return 0;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 95891dfcbca0..135c2f1fdbfc 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -154,11 +154,6 @@ struct pstore {
struct workqueue_struct *metadata_wq;
};
-static unsigned sectors_to_pages(unsigned sectors)
-{
- return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
-}
-
static int alloc_area(struct pstore *ps)
{
int r = -ENOMEM;
@@ -318,8 +313,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
chunk_size_supplied = 0;
}
- ps->io_client = dm_io_client_create(sectors_to_pages(ps->store->
- chunk_size));
+ ps->io_client = dm_io_client_create();
if (IS_ERR(ps->io_client))
return PTR_ERR(ps->io_client);
@@ -368,11 +362,6 @@ static int read_header(struct pstore *ps, int *new_snapshot)
return r;
}
- r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
- ps->io_client);
- if (r)
- return r;
-
r = alloc_area(ps);
return r;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a2d330942cb2..9ecff5f3023a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -40,11 +40,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
#define SNAPSHOT_COPY_PRIORITY 2
/*
- * Reserve 1MB for each snapshot initially (with minimum of 1 page).
- */
-#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
-
-/*
* The size of the mempool used to track chunks in use.
*/
#define MIN_IOS 256
@@ -1116,8 +1111,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
- r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
- if (r) {
+ s->kcopyd_client = dm_kcopyd_client_create();
+ if (IS_ERR(s->kcopyd_client)) {
+ r = PTR_ERR(s->kcopyd_client);
ti->error = "Could not create kcopyd client";
goto bad_kcopyd;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index cb8380c9767f..451c3bb176d2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -362,6 +362,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
+ struct request_queue *q;
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
sector_t dev_size =
@@ -370,6 +371,22 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
limits->logical_block_size >> SECTOR_SHIFT;
char b[BDEVNAME_SIZE];
+ /*
+ * Some devices exist without request functions,
+ * such as loop devices not yet bound to backing files.
+ * Forbid the use of such devices.
+ */
+ q = bdev_get_queue(bdev);
+ if (!q || !q->make_request_fn) {
+ DMWARN("%s: %s is not yet initialised: "
+ "start=%llu, len=%llu, dev_size=%llu",
+ dm_device_name(ti->table->md), bdevname(bdev, b),
+ (unsigned long long)start,
+ (unsigned long long)len,
+ (unsigned long long)dev_size);
+ return 1;
+ }
+
if (!dev_size)
return 0;
@@ -1346,7 +1363,8 @@ bool dm_table_supports_discards(struct dm_table *t)
return 0;
/*
- * Ensure that at least one underlying device supports discards.
+ * Unless any target used by the table set discards_supported,
+ * require at least one underlying device to support discards.
* t->devices includes internal dm devices such as mirror logs
* so we need to use iterate_devices here, which targets
* supporting discard must provide.
@@ -1354,6 +1372,9 @@ bool dm_table_supports_discards(struct dm_table *t)
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
+ if (ti->discards_supported)
+ return 1;
+
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, device_discard_capable, NULL))
return 1;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 7c3b18e78cee..d36f41ea8cbf 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -195,6 +195,8 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
return PCI_D2;
case ACPI_STATE_D3:
return PCI_D3hot;
+ case ACPI_STATE_D3_COLD:
+ return PCI_D3cold;
}
return PCI_POWER_ERROR;
}
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 4c4a7422c5e8..3f204fde1b02 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -202,8 +202,8 @@ static bool extra_features;
* watching the output of address 0x4F (do an ec_transaction writing 0x33
* into 0x4F and read a few bytes from the output, like so:
* u8 writeData = 0x33;
- * ec_transaction(0x4F, &writeData, 1, buffer, 32, 0);
- * That address is labelled "fan1 table information" in the service manual.
+ * ec_transaction(0x4F, &writeData, 1, buffer, 32);
+ * That address is labeled "fan1 table information" in the service manual.
* It should be clear which value in 'buffer' changes). This seems to be
* related to fan speed. It isn't a proper 'realtime' fan speed value
* though, because physically stopping or speeding up the fan doesn't
@@ -288,7 +288,7 @@ static int get_backlight_level(void)
static void set_backlight_state(bool on)
{
u8 data = on ? BACKLIGHT_STATE_ON_DATA : BACKLIGHT_STATE_OFF_DATA;
- ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0, 0);
+ ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0);
}
@@ -296,24 +296,24 @@ static void set_backlight_state(bool on)
static void pwm_enable_control(void)
{
unsigned char writeData = PWM_ENABLE_DATA;
- ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0, 0);
+ ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0);
}
static void pwm_disable_control(void)
{
unsigned char writeData = PWM_DISABLE_DATA;
- ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0, 0);
+ ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0);
}
static void set_pwm(int pwm)
{
- ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0, 0);
+ ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0);
}
static int get_fan_rpm(void)
{
u8 value, data = FAN_DATA;
- ec_transaction(FAN_ADDRESS, &data, 1, &value, 1, 0);
+ ec_transaction(FAN_ADDRESS, &data, 1, &value, 1);
return 100 * (int)value;
}
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index d7213e406867..3ff629df9f01 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -135,7 +135,7 @@ static int set_lcd_level(int level)
buf[1] = (u8) (level*31);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf),
- NULL, 0, 1);
+ NULL, 0);
}
static int get_lcd_level(void)
@@ -144,7 +144,7 @@ static int get_lcd_level(void)
int result;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
- &rdata, 1, 1);
+ &rdata, 1);
if (result < 0)
return result;
@@ -157,7 +157,7 @@ static int get_auto_brightness(void)
int result;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
- &rdata, 1, 1);
+ &rdata, 1);
if (result < 0)
return result;
@@ -172,7 +172,7 @@ static int set_auto_brightness(int enable)
wdata[0] = 4;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1,
- &rdata, 1, 1);
+ &rdata, 1);
if (result < 0)
return result;
@@ -180,7 +180,7 @@ static int set_auto_brightness(int enable)
wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2,
- NULL, 0, 1);
+ NULL, 0);
}
static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
@@ -217,7 +217,7 @@ static int get_wireless_state(int *wlan, int *bluetooth)
u8 wdata = 0, rdata;
int result;
- result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1, 1);
+ result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
if (result < 0)
return -1;
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index fc6f2a5bde01..0b1c82ad6805 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -499,7 +499,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
dev_set_drvdata(hwmon->device, hwmon);
result = device_create_file(hwmon->device, &dev_attr_name);
if (result)
- goto unregister_hwmon_device;
+ goto free_mem;
register_sys_interface:
tz->hwmon = hwmon;
@@ -513,7 +513,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
sysfs_attr_init(&tz->temp_input.attr.attr);
result = device_create_file(hwmon->device, &tz->temp_input.attr);
if (result)
- goto unregister_hwmon_device;
+ goto unregister_name;
if (tz->ops->get_crit_temp) {
unsigned long temperature;
@@ -527,7 +527,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
result = device_create_file(hwmon->device,
&tz->temp_crit.attr);
if (result)
- goto unregister_hwmon_device;
+ goto unregister_input;
}
}
@@ -539,9 +539,9 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
return 0;
- unregister_hwmon_device:
- device_remove_file(hwmon->device, &tz->temp_crit.attr);
+ unregister_input:
device_remove_file(hwmon->device, &tz->temp_input.attr);
+ unregister_name:
if (new_hwmon_device) {
device_remove_file(hwmon->device, &dev_attr_name);
hwmon_device_unregister(hwmon->device);