diff options
Diffstat (limited to 'include/linux')
196 files changed, 4546 insertions, 1356 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 2fc8e14cc24a..9aa9bcadf869 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -276,6 +276,7 @@ ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm_para.h \ $(srctree)/include/asm-$(SRCARCH)/kvm_para.h),) unifdef-y += kvm_para.h endif +unifdef-y += l2tp.h unifdef-y += llc.h unifdef-y += loop.h unifdef-y += lp.h diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 224a38c960d4..ccf94dc5acdf 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -253,7 +253,7 @@ int acpi_resources_are_enforced(void); #ifdef CONFIG_PM_SLEEP void __init acpi_no_s4_hw_signature(void); void __init acpi_old_suspend_ordering(void); -void __init acpi_s4_no_nvs(void); +void __init acpi_nvs_nosave(void); #endif /* CONFIG_PM_SLEEP */ struct acpi_osc_context { diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h index f7dd576dd5a4..be3d9a77d6ed 100644 --- a/include/linux/ahci_platform.h +++ b/include/linux/ahci_platform.h @@ -15,11 +15,13 @@ #ifndef _AHCI_PLATFORM_H #define _AHCI_PLATFORM_H +#include <linux/compiler.h> + struct device; struct ata_port_info; struct ahci_platform_data { - int (*init)(struct device *dev); + int (*init)(struct device *dev, void __iomem *addr); void (*exit)(struct device *dev); const struct ata_port_info *ata_port_info; unsigned int force_port_map; diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 8b1038607831..b0c174012436 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -14,14 +14,19 @@ #ifndef ASMARM_AMBA_H #define ASMARM_AMBA_H +#include <linux/clk.h> #include <linux/device.h> +#include <linux/err.h> #include <linux/resource.h> #define AMBA_NR_IRQS 2 +struct clk; + struct amba_device { struct device dev; struct resource res; + struct clk *pclk; u64 dma_mask; unsigned int periphid; unsigned int irq[AMBA_NR_IRQS]; @@ -59,6 +64,12 @@ struct amba_device *amba_find_device(const char *, struct device *, unsigned int int amba_request_regions(struct amba_device *, const char *); void amba_release_regions(struct amba_device *); +#define amba_pclk_enable(d) \ + (IS_ERR((d)->pclk) ? 0 : clk_enable((d)->pclk)) + +#define amba_pclk_disable(d) \ + do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) + #define amba_config(d) (((d)->periphid >> 24) & 0xff) #define amba_rev(d) (((d)->periphid >> 20) & 0x0f) #define amba_manf(d) (((d)->periphid >> 12) & 0xff) diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h index 7e466fe72025..ca84ce70d5d5 100644 --- a/include/linux/amba/mmci.h +++ b/include/linux/amba/mmci.h @@ -15,9 +15,10 @@ * @ocr_mask: available voltages on the 4 pins from the block, this * is ignored if a regulator is used, see the MMC_VDD_* masks in * mmc/host.h - * @translate_vdd: a callback function to translate a MMC_VDD_* - * mask into a value to be binary or:ed and written into the - * MMCIPWR register of the block + * @vdd_handler: a callback function to translate a MMC_VDD_* + * mask into a value to be binary (or set some other custom bits + * in MMCIPWR) or:ed and written into the MMCIPWR register of the + * block. May also control external power based on the power_mode. * @status: if no GPIO read function was given to the block in * gpio_wp (below) this function will be called to determine * whether a card is present in the MMC slot or not @@ -29,7 +30,8 @@ struct mmci_platform_data { unsigned int f_max; unsigned int ocr_mask; - u32 (*translate_vdd)(struct device *, unsigned int); + u32 (*vdd_handler)(struct device *, unsigned int vdd, + unsigned char power_mode); unsigned int (*status)(struct device *); int gpio_wp; int gpio_cd; diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index 5a5a7fd62490..e1b634b635f2 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h @@ -38,10 +38,12 @@ #define UART01x_FR 0x18 /* Flag register (Read only). */ #define UART010_IIR 0x1C /* Interrupt indentification register (Read). */ #define UART010_ICR 0x1C /* Interrupt clear register (Write). */ +#define ST_UART011_LCRH_RX 0x1C /* Rx line control register. */ #define UART01x_ILPR 0x20 /* IrDA low power counter register. */ #define UART011_IBRD 0x24 /* Integer baud rate divisor register. */ #define UART011_FBRD 0x28 /* Fractional baud rate divisor register. */ #define UART011_LCRH 0x2c /* Line control register. */ +#define ST_UART011_LCRH_TX 0x2c /* Tx Line control register. */ #define UART011_CR 0x30 /* Control register. */ #define UART011_IFLS 0x34 /* Interrupt fifo level select. */ #define UART011_IMSC 0x38 /* Interrupt mask. */ @@ -84,6 +86,7 @@ #define UART010_CR_TIE 0x0020 #define UART010_CR_RIE 0x0010 #define UART010_CR_MSIE 0x0008 +#define ST_UART011_CR_OVSFACT 0x0008 /* Oversampling factor */ #define UART01x_CR_IIRLP 0x0004 /* SIR low power mode */ #define UART01x_CR_SIREN 0x0002 /* SIR enable */ #define UART01x_CR_UARTEN 0x0001 /* UART enable */ diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 817b23705c91..f6481daf6e52 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -431,6 +431,14 @@ struct atm_dev *atm_dev_register(const char *type,const struct atmdev_ops *ops, int number,unsigned long *flags); /* number == -1: pick first available */ struct atm_dev *atm_dev_lookup(int number); void atm_dev_deregister(struct atm_dev *dev); + +/* atm_dev_signal_change + * + * Propagate lower layer signal change in atm_dev->signal to netdevice. + * The event will be sent via a notifier call chain. + */ +void atm_dev_signal_change(struct atm_dev *dev, char signal); + void vcc_insert_socket(struct sock *sk); @@ -510,6 +518,15 @@ void register_atm_ioctl(struct atm_ioctl *); */ void deregister_atm_ioctl(struct atm_ioctl *); + +/* register_atmdevice_notifier - register atm_dev notify events + * + * Clients like br2684 will register notify events + * Currently we notify of signal found/lost + */ +int register_atmdevice_notifier(struct notifier_block *nb); +void unregister_atmdevice_notifier(struct notifier_block *nb); + #endif /* __KERNEL__ */ #endif diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 9ae2889096b6..e9aec0d099df 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -82,8 +82,6 @@ struct backing_dev_info { struct bdi_writeback wb; /* default writeback info for this bdi */ spinlock_t wb_lock; /* protects update side of wb_list */ struct list_head wb_list; /* the flusher threads hanging off this bdi */ - unsigned long wb_mask; /* bitmask of registered tasks */ - unsigned int wb_cnt; /* number of registered tasks */ struct list_head work_list; diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 7f437ca1ed44..b840a4960282 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -1,6 +1,13 @@ #define PHY_ID_BCM50610 0x0143bd60 #define PHY_ID_BCM50610M 0x0143bd70 +#define PHY_ID_BCM5241 0x0143bc30 #define PHY_ID_BCMAC131 0x0143bc70 +#define PHY_ID_BCM5481 0x0143bca0 +#define PHY_ID_BCM5482 0x0143bcb0 +#define PHY_ID_BCM5411 0x00206070 +#define PHY_ID_BCM5421 0x002060e0 +#define PHY_ID_BCM5464 0x002060b0 +#define PHY_ID_BCM5461 0x002060c0 #define PHY_ID_BCM57780 0x03625d90 #define PHY_BCM_OUI_MASK 0xfffffc00 diff --git a/include/linux/caif/caif_socket.h b/include/linux/caif/caif_socket.h index 2a61eb1beb85..d9cb19b7cff7 100644 --- a/include/linux/caif/caif_socket.h +++ b/include/linux/caif/caif_socket.h @@ -62,6 +62,7 @@ enum caif_channel_priority { * @CAIFPROTO_DATAGRAM_LOOP: Datagram loopback channel, used for testing. * @CAIFPROTO_UTIL: Utility (Psock) channel. * @CAIFPROTO_RFM: Remote File Manager + * @CAIFPROTO_DEBUG: Debug link * * This enum defines the CAIF Channel type to be used. This defines * the service to connect to on the modem. @@ -72,6 +73,7 @@ enum caif_protocol_type { CAIFPROTO_DATAGRAM_LOOP, CAIFPROTO_UTIL, CAIFPROTO_RFM, + CAIFPROTO_DEBUG, _CAIFPROTO_MAX }; #define CAIFPROTO_MAX _CAIFPROTO_MAX @@ -83,6 +85,28 @@ enum caif_protocol_type { enum caif_at_type { CAIF_ATTYPE_PLAIN = 2 }; + /** + * enum caif_debug_type - Content selection for debug connection + * @CAIF_DEBUG_TRACE_INTERACTIVE: Connection will contain + * both trace and interactive debug. + * @CAIF_DEBUG_TRACE: Connection contains trace only. + * @CAIF_DEBUG_INTERACTIVE: Connection to interactive debug. + */ +enum caif_debug_type { + CAIF_DEBUG_TRACE_INTERACTIVE = 0, + CAIF_DEBUG_TRACE, + CAIF_DEBUG_INTERACTIVE, +}; + +/** + * enum caif_debug_service - Debug Service Endpoint + * @CAIF_RADIO_DEBUG_SERVICE: Debug service on the Radio sub-system + * @CAIF_APP_DEBUG_SERVICE: Debug for the applications sub-system + */ +enum caif_debug_service { + CAIF_RADIO_DEBUG_SERVICE = 1, + CAIF_APP_DEBUG_SERVICE +}; /** * struct sockaddr_caif - the sockaddr structure for CAIF sockets. @@ -109,6 +133,12 @@ enum caif_at_type { * * @u.rfm.volume: Volume to mount. * + * @u.dbg: Applies when family = CAIFPROTO_DEBUG. + * + * @u.dbg.type: Type of debug connection to set up + * (caif_debug_type). + * + * @u.dbg.service: Service sub-system to connect (caif_debug_service * Description: * This structure holds the connect parameters used for setting up a * CAIF Channel. It defines the service to connect to on the modem. @@ -130,6 +160,10 @@ struct sockaddr_caif { __u32 connection_id; char volume[16]; } rfm; /* CAIFPROTO_RFM */ + struct { + __u8 type; /* type:enum caif_debug_type */ + __u8 service; /* service:caif_debug_service */ + } dbg; /* CAIFPROTO_DEBUG */ } u; }; diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h new file mode 100644 index 000000000000..72b713ab57e9 --- /dev/null +++ b/include/linux/can/platform/flexcan.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2010 Marc Kleine-Budde <kernel@pengutronix.de> + * + * This file is released under the GPLv2 + * + */ + +#ifndef __CAN_PLATFORM_FLEXCAN_H +#define __CAN_PLATFORM_FLEXCAN_H + +/** + * struct flexcan_platform_data - flex CAN controller platform data + * @transceiver_enable: - called to power on/off the transceiver + * + */ +struct flexcan_platform_data { + void (*transceiver_switch)(int enable); +}; + +#endif /* __CAN_PLATFORM_FLEXCAN_H */ diff --git a/include/linux/capability.h b/include/linux/capability.h index 39e5ff512fbe..90012b9ddbf3 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -49,9 +49,6 @@ typedef struct __user_cap_data_struct { } __user *cap_user_data_t; -#define XATTR_CAPS_SUFFIX "capability" -#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX - #define VFS_CAP_REVISION_MASK 0xFF000000 #define VFS_CAP_REVISION_SHIFT 24 #define VFS_CAP_FLAGS_MASK ~VFS_CAP_REVISION_MASK diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index e3d00fdb858d..ed3e92e41c6e 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -578,6 +578,7 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp, void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); int cgroup_scan_tasks(struct cgroup_scanner *scan); int cgroup_attach_task(struct cgroup *, struct task_struct *); +int cgroup_attach_task_current_cg(struct task_struct *); /* * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works @@ -634,6 +635,12 @@ static inline int cgroupstats_build(struct cgroupstats *stats, return -EINVAL; } +/* No cgroups - nothing to do */ +static inline int cgroup_attach_task_current_cg(struct task_struct *t) +{ + return 0; +} + #endif /* !CONFIG_CGROUPS */ #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 5ea3c60c160c..c37b21ad5a3b 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -292,6 +292,8 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); */ extern int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); +extern void +__clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq); static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) { @@ -303,6 +305,15 @@ static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) return __clocksource_register_scale(cs, 1000, khz); } +static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz) +{ + __clocksource_updatefreq_scale(cs, 1, hz); +} + +static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz) +{ + __clocksource_updatefreq_scale(cs, 1000, khz); +} static inline void clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec) @@ -313,11 +324,13 @@ clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec) #ifdef CONFIG_GENERIC_TIME_VSYSCALL extern void -update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult); +update_vsyscall(struct timespec *ts, struct timespec *wtm, + struct clocksource *c, u32 mult); extern void update_vsyscall_tz(void); #else static inline void -update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult) +update_vsyscall(struct timespec *ts, struct timespec *wtm, + struct clocksource *c, u32 mult) { } diff --git a/include/linux/compiler.h b/include/linux/compiler.h index a5a472b10746..c1a62c56a660 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -16,6 +16,7 @@ # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) # define __percpu __attribute__((noderef, address_space(3))) +# define __rcu extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); #else @@ -34,6 +35,7 @@ extern void __chk_io_ptr(const volatile void __iomem *); # define __release(x) (void)0 # define __cond_lock(x,c) (c) # define __percpu +# define __rcu #endif #ifdef __KERNEL__ diff --git a/include/linux/console.h b/include/linux/console.h index dcca5339ceb3..95cf6f08a59d 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -55,6 +55,16 @@ struct consw { void (*con_invert_region)(struct vc_data *, u16 *, int); u16 *(*con_screen_pos)(struct vc_data *, int); unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *); + /* + * Prepare the console for the debugger. This includes, but is not + * limited to, unblanking the console, loading an appropriate + * palette, and allowing debugger generated output. + */ + int (*con_debug_enter)(struct vc_data *); + /* + * Restore the console to its pre-debug state as closely as possible. + */ + int (*con_debug_leave)(struct vc_data *); }; extern const struct consw *conswitchp; @@ -69,6 +79,14 @@ int register_con_driver(const struct consw *csw, int first, int last); int unregister_con_driver(const struct consw *csw); int take_over_console(const struct consw *sw, int first, int last, int deflt); void give_up_console(const struct consw *sw); +#ifdef CONFIG_HW_CONSOLE +int con_debug_enter(struct vc_data *vc); +int con_debug_leave(void); +#else +#define con_debug_enter(vc) (0) +#define con_debug_leave() (0) +#endif + /* scroll */ #define SM_UP (1) #define SM_DOWN (2) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index e287863ac053..4823af64e9db 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -48,6 +48,33 @@ extern ssize_t arch_cpu_release(const char *, size_t); #endif struct notifier_block; +/* + * CPU notifier priorities. + */ +enum { + /* + * SCHED_ACTIVE marks a cpu which is coming up active during + * CPU_ONLINE and CPU_DOWN_FAILED and must be the first + * notifier. CPUSET_ACTIVE adjusts cpuset according to + * cpu_active mask right after SCHED_ACTIVE. During + * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are + * ordered in the similar way. + * + * This ordering guarantees consistent cpu_active mask and + * migration behavior to all cpu notifiers. + */ + CPU_PRI_SCHED_ACTIVE = INT_MAX, + CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1, + CPU_PRI_SCHED_INACTIVE = INT_MIN + 1, + CPU_PRI_CPUSET_INACTIVE = INT_MIN, + + /* migration should happen before other stuff but after perf */ + CPU_PRI_PERF = 20, + CPU_PRI_MIGRATION = 10, + /* prepare workqueues for other notifiers */ + CPU_PRI_WORKQUEUE = 5, +}; + #ifdef CONFIG_SMP /* Need to know about CPUs going up/down? */ #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 9f15150ce8d6..c3e9de8321c6 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -196,11 +196,6 @@ extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy, int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); -int lock_policy_rwsem_read(int cpu); -int lock_policy_rwsem_write(int cpu); -void unlock_policy_rwsem_read(int cpu); -void unlock_policy_rwsem_write(int cpu); - /********************************************************************* * CPUFREQ DRIVER INTERFACE * diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 457ed765a116..f20eb8f16025 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -20,6 +20,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */ extern int cpuset_init(void); extern void cpuset_init_smp(void); +extern void cpuset_update_active_cpus(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); extern int cpuset_cpus_allowed_fallback(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); @@ -132,6 +133,11 @@ static inline void set_mems_allowed(nodemask_t nodemask) static inline int cpuset_init(void) { return 0; } static inline void cpuset_init_smp(void) {} +static inline void cpuset_update_active_cpus(void) +{ + partition_sched_domains(1, NULL, NULL); +} + static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask) { diff --git a/include/linux/cred.h b/include/linux/cred.h index 75c0fa881308..4d2c39573f36 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -153,6 +153,7 @@ struct cred { extern void __put_cred(struct cred *); extern void exit_creds(struct task_struct *); extern int copy_creds(struct task_struct *, unsigned long); +extern const struct cred *get_task_cred(struct task_struct *); extern struct cred *cred_alloc_blank(void); extern struct cred *prepare_creds(void); extern struct cred *prepare_exec_creds(void); @@ -273,33 +274,18 @@ static inline void put_cred(const struct cred *_cred) * @task: The task to query * * Access the objective credentials of a task. The caller must hold the RCU - * readlock. + * readlock or the task must be dead and unable to change its own credentials. * - * The caller must make sure task doesn't go away, either by holding a ref on - * task or by holding tasklist_lock to prevent it from being unlinked. + * The result of this function should not be passed directly to get_cred(); + * rather get_task_cred() should be used instead. */ -#define __task_cred(task) \ - ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_tasklist_lock_is_held()))) - -/** - * get_task_cred - Get another task's objective credentials - * @task: The task to query - * - * Get the objective credentials of a task, pinning them so that they can't go - * away. Accessing a task's credentials directly is not permitted. - * - * The caller must make sure task doesn't go away, either by holding a ref on - * task or by holding tasklist_lock to prevent it from being unlinked. - */ -#define get_task_cred(task) \ -({ \ - struct cred *__cred; \ - rcu_read_lock(); \ - __cred = (struct cred *) __task_cred((task)); \ - get_cred(__cred); \ - rcu_read_unlock(); \ - __cred; \ -}) +#define __task_cred(task) \ + ({ \ + const struct task_struct *__t = (task); \ + rcu_dereference_check(__t->real_cred, \ + rcu_read_lock_held() || \ + task_is_dead(__t)); \ + }) /** * get_current_cred - Get the current task's subjective credentials diff --git a/include/linux/delay.h b/include/linux/delay.h index fd832c6d419e..a6ecb34cf547 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -45,6 +45,7 @@ extern unsigned long lpj_fine; void calibrate_delay(void); void msleep(unsigned int msecs); unsigned long msleep_interruptible(unsigned int msecs); +void usleep_range(unsigned long min, unsigned long max); static inline void ssleep(unsigned int seconds) { diff --git a/include/linux/device.h b/include/linux/device.h index 0713e10571dd..516fecacf27b 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -84,9 +84,8 @@ struct device *bus_find_device_by_name(struct bus_type *bus, struct device *start, const char *name); -int __must_check bus_for_each_drv(struct bus_type *bus, - struct device_driver *start, void *data, - int (*fn)(struct device_driver *, void *)); +int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, + void *data, int (*fn)(struct device_driver *, void *)); void bus_sort_breadthfirst(struct bus_type *bus, int (*compare)(const struct device *a, @@ -110,10 +109,12 @@ extern int bus_unregister_notifier(struct bus_type *bus, */ #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */ -#define BUS_NOTIFY_BOUND_DRIVER 0x00000003 /* driver bound to device */ -#define BUS_NOTIFY_UNBIND_DRIVER 0x00000004 /* driver about to be +#define BUS_NOTIFY_BIND_DRIVER 0x00000003 /* driver about to be + bound */ +#define BUS_NOTIFY_BOUND_DRIVER 0x00000004 /* driver bound to device */ +#define BUS_NOTIFY_UNBIND_DRIVER 0x00000005 /* driver about to be unbound */ -#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000005 /* driver is unbound +#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000006 /* driver is unbound from the device */ extern struct kset *bus_get_kset(struct bus_type *bus); @@ -551,7 +552,7 @@ extern int device_for_each_child(struct device *dev, void *data, int (*fn)(struct device *dev, void *data)); extern struct device *device_find_child(struct device *dev, void *data, int (*match)(struct device *dev, void *data)); -extern int device_rename(struct device *dev, char *new_name); +extern int device_rename(struct device *dev, const char *new_name); extern int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order); extern const char *device_get_devnode(struct device *dev, @@ -638,43 +639,103 @@ extern void sysdev_shutdown(void); /* debugging and troubleshooting/diagnostic helpers. */ extern const char *dev_driver_string(const struct device *dev); -#define dev_printk(level, dev, format, arg...) \ - printk(level "%s %s: " format , dev_driver_string(dev) , \ - dev_name(dev) , ## arg) - -#define dev_emerg(dev, format, arg...) \ - dev_printk(KERN_EMERG , dev , format , ## arg) -#define dev_alert(dev, format, arg...) \ - dev_printk(KERN_ALERT , dev , format , ## arg) -#define dev_crit(dev, format, arg...) \ - dev_printk(KERN_CRIT , dev , format , ## arg) -#define dev_err(dev, format, arg...) \ - dev_printk(KERN_ERR , dev , format , ## arg) -#define dev_warn(dev, format, arg...) \ - dev_printk(KERN_WARNING , dev , format , ## arg) -#define dev_notice(dev, format, arg...) \ - dev_printk(KERN_NOTICE , dev , format , ## arg) -#define dev_info(dev, format, arg...) \ - dev_printk(KERN_INFO , dev , format , ## arg) + + +#ifdef CONFIG_PRINTK + +extern int dev_printk(const char *level, const struct device *dev, + const char *fmt, ...) + __attribute__ ((format (printf, 3, 4))); +extern int dev_emerg(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern int dev_alert(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern int dev_crit(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern int dev_err(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern int dev_warn(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern int dev_notice(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern int _dev_info(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); + +#else + +static inline int dev_printk(const char *level, const struct device *dev, + const char *fmt, ...) + __attribute__ ((format (printf, 3, 4))); +static inline int dev_printk(const char *level, const struct device *dev, + const char *fmt, ...) + { return 0; } + +static inline int dev_emerg(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +static inline int dev_emerg(const struct device *dev, const char *fmt, ...) + { return 0; } +static inline int dev_crit(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +static inline int dev_crit(const struct device *dev, const char *fmt, ...) + { return 0; } +static inline int dev_alert(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +static inline int dev_alert(const struct device *dev, const char *fmt, ...) + { return 0; } +static inline int dev_err(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +static inline int dev_err(const struct device *dev, const char *fmt, ...) + { return 0; } +static inline int dev_warn(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +static inline int dev_warn(const struct device *dev, const char *fmt, ...) + { return 0; } +static inline int dev_notice(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +static inline int dev_notice(const struct device *dev, const char *fmt, ...) + { return 0; } +static inline int _dev_info(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +static inline int _dev_info(const struct device *dev, const char *fmt, ...) + { return 0; } + +#endif + +/* + * Stupid hackaround for existing uses of non-printk uses dev_info + * + * Note that the definition of dev_info below is actually _dev_info + * and a macro is used to avoid redefining dev_info + */ + +#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg) #if defined(DEBUG) #define dev_dbg(dev, format, arg...) \ - dev_printk(KERN_DEBUG , dev , format , ## arg) + dev_printk(KERN_DEBUG, dev, format, ##arg) #elif defined(CONFIG_DYNAMIC_DEBUG) -#define dev_dbg(dev, format, ...) do { \ +#define dev_dbg(dev, format, ...) \ +do { \ dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \ - } while (0) +} while (0) #else -#define dev_dbg(dev, format, arg...) \ - ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) +#define dev_dbg(dev, format, arg...) \ +({ \ + if (0) \ + dev_printk(KERN_DEBUG, dev, format, ##arg); \ + 0; \ +}) #endif #ifdef VERBOSE_DEBUG #define dev_vdbg dev_dbg #else - -#define dev_vdbg(dev, format, arg...) \ - ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) +#define dev_vdbg(dev, format, arg...) \ +({ \ + if (0) \ + dev_printk(KERN_DEBUG, dev, format, ##arg); \ + 0; \ +}) #endif /* diff --git a/include/linux/dmi.h b/include/linux/dmi.h index a8a3e1ac281d..90e087f8d951 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -20,6 +20,7 @@ enum dmi_device_type { DMI_DEV_TYPE_SAS, DMI_DEV_TYPE_IPMI = -1, DMI_DEV_TYPE_OEM_STRING = -2, + DMI_DEV_TYPE_DEV_ONBOARD = -3, }; struct dmi_header { @@ -37,6 +38,14 @@ struct dmi_device { #ifdef CONFIG_DMI +struct dmi_dev_onboard { + struct dmi_device dev; + int instance; + int segment; + int bus; + int devfn; +}; + extern int dmi_check_system(const struct dmi_system_id *list); const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list); extern const char * dmi_get_system_info(int field); diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h new file mode 100644 index 000000000000..cc92268af89a --- /dev/null +++ b/include/linux/dns_resolver.h @@ -0,0 +1,34 @@ +/* + * DNS Resolver upcall management for CIFS DFS and AFS + * Handles host name to IP address resolution and DNS query for AFSDB RR. + * + * Copyright (c) International Business Machines Corp., 2008 + * Author(s): Steve French (sfrench@us.ibm.com) + * Wang Lei (wang840925@gmail.com) + * + * This library is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_DNS_RESOLVER_H +#define _LINUX_DNS_RESOLVER_H + +#ifdef __KERNEL__ + +extern int dns_query(const char *type, const char *name, size_t namelen, + const char *options, char **_result, time_t *_expiry); + +#endif /* KERNEL */ + +#endif /* _LINUX_DNS_RESOLVER_H */ diff --git a/include/linux/dqblk_xfs.h b/include/linux/dqblk_xfs.h index 4389ae72024e..86552807aed9 100644 --- a/include/linux/dqblk_xfs.h +++ b/include/linux/dqblk_xfs.h @@ -49,7 +49,7 @@ #define FS_DQUOT_VERSION 1 /* fs_disk_quota.d_version */ typedef struct fs_disk_quota { __s8 d_version; /* version of this structure */ - __s8 d_flags; /* XFS_{USER,PROJ,GROUP}_QUOTA */ + __s8 d_flags; /* FS_{USER,PROJ,GROUP}_QUOTA */ __u16 d_fieldmask; /* field specifier */ __u32 d_id; /* user, project, or group ID */ __u64 d_blk_hardlimit;/* absolute limit on disk blks */ @@ -119,18 +119,18 @@ typedef struct fs_disk_quota { #define FS_DQ_ACCT_MASK (FS_DQ_BCOUNT | FS_DQ_ICOUNT | FS_DQ_RTBCOUNT) /* - * Various flags related to quotactl(2). Only relevant to XFS filesystems. + * Various flags related to quotactl(2). */ -#define XFS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */ -#define XFS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */ -#define XFS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */ -#define XFS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */ -#define XFS_QUOTA_PDQ_ACCT (1<<4) /* project quota accounting */ -#define XFS_QUOTA_PDQ_ENFD (1<<5) /* project quota limits enforcement */ +#define FS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */ +#define FS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */ +#define FS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */ +#define FS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */ +#define FS_QUOTA_PDQ_ACCT (1<<4) /* project quota accounting */ +#define FS_QUOTA_PDQ_ENFD (1<<5) /* project quota limits enforcement */ -#define XFS_USER_QUOTA (1<<0) /* user quota type */ -#define XFS_PROJ_QUOTA (1<<1) /* project quota type */ -#define XFS_GROUP_QUOTA (1<<2) /* group quota type */ +#define FS_USER_QUOTA (1<<0) /* user quota type */ +#define FS_PROJ_QUOTA (1<<1) /* project quota type */ +#define FS_GROUP_QUOTA (1<<2) /* group quota type */ /* * fs_quota_stat is the struct returned in Q_XGETQSTAT for a given file system. @@ -151,7 +151,7 @@ typedef struct fs_qfilestat { typedef struct fs_quota_stat { __s8 qs_version; /* version number for future changes */ - __u16 qs_flags; /* XFS_QUOTA_{U,P,G}DQ_{ACCT,ENFD} */ + __u16 qs_flags; /* FS_QUOTA_{U,P,G}DQ_{ACCT,ENFD} */ __s8 qs_pad; /* unused */ fs_qfilestat_t qs_uquota; /* user quota storage information */ fs_qfilestat_t qs_gquota; /* group quota storage information */ diff --git a/include/linux/dvb/frontend.h b/include/linux/dvb/frontend.h index b6cb5425cde3..493a2bf85f62 100644 --- a/include/linux/dvb/frontend.h +++ b/include/linux/dvb/frontend.h @@ -62,6 +62,7 @@ typedef enum fe_caps { FE_CAN_8VSB = 0x200000, FE_CAN_16VSB = 0x400000, FE_HAS_EXTENDED_CAPS = 0x800000, /* We need more bitspace for newer APIs, indicate this. */ + FE_CAN_TURBO_FEC = 0x8000000, /* frontend supports "turbo fec modulation" */ FE_CAN_2G_MODULATION = 0x10000000, /* frontend supports "2nd generation modulation" (DVB-S2) */ FE_NEEDS_BENDING = 0x20000000, /* not supported anymore, don't use (frontend requires frequency bending) */ FE_CAN_RECOVER = 0x40000000, /* frontend can recover from a cable unplug automatically */ diff --git a/include/linux/dvb/version.h b/include/linux/dvb/version.h index 540b0583d9fb..5a7546c12688 100644 --- a/include/linux/dvb/version.h +++ b/include/linux/dvb/version.h @@ -24,6 +24,6 @@ #define _DVBVERSION_H_ #define DVB_API_VERSION 5 -#define DVB_API_VERSION_MINOR 1 +#define DVB_API_VERSION_MINOR 2 #endif /*_DVBVERSION_H_*/ diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h index a55c873e8b66..c4627cbdb8e0 100644 --- a/include/linux/eeprom_93cx6.h +++ b/include/linux/eeprom_93cx6.h @@ -30,6 +30,7 @@ #define PCI_EEPROM_WIDTH_93C46 6 #define PCI_EEPROM_WIDTH_93C56 8 #define PCI_EEPROM_WIDTH_93C66 8 +#define PCI_EEPROM_WIDTH_93C86 8 #define PCI_EEPROM_WIDTH_OPCODE 3 #define PCI_EEPROM_WRITE_OPCODE 0x05 #define PCI_EEPROM_READ_OPCODE 0x06 diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 3d7a6687d247..848480bc2bf9 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -127,6 +127,20 @@ static inline void random_ether_addr(u8 *addr) } /** + * dev_hw_addr_random - Create random MAC and set device flag + * @dev: pointer to net_device structure + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Generate random MAC to be used by a device and set addr_assign_type + * so the state can be read by sysfs and be used by udev. + */ +static inline void dev_hw_addr_random(struct net_device *dev, u8 *hwaddr) +{ + dev->addr_assign_type |= NET_ADDR_RANDOM; + random_ether_addr(hwaddr); +} + +/** * compare_ether_addr - Compare two Ethernet addresses * @addr1: Pointer to a six-byte array containing the Ethernet address * @addr2: Pointer other six-byte array containing the Ethernet address diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 276b40a16835..991269e5b152 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -379,11 +379,22 @@ struct ethtool_rxnfc { __u32 flow_type; /* The rx flow hash value or the rule DB size */ __u64 data; + /* The following fields are not valid and must not be used for + * the ETHTOOL_{G,X}RXFH commands. */ struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0]; }; +struct ethtool_rxfh_indir { + __u32 cmd; + /* On entry, this is the array size of the user buffer. On + * return from ETHTOOL_GRXFHINDIR, this is the array size of + * the hardware indirection table. */ + __u32 size; + __u32 ring_index[0]; /* ring/queue index for each hash value */ +}; + struct ethtool_rx_ntuple_flow_spec { __u32 flow_type; union { @@ -457,7 +468,7 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data); u32 ethtool_op_get_ufo(struct net_device *dev); int ethtool_op_set_ufo(struct net_device *dev, u32 data); u32 ethtool_op_get_flags(struct net_device *dev); -int ethtool_op_set_flags(struct net_device *dev, u32 data); +int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported); void ethtool_ntuple_flush(struct net_device *dev); /** @@ -576,6 +587,10 @@ struct ethtool_ops { int (*set_rx_ntuple)(struct net_device *, struct ethtool_rx_ntuple *); int (*get_rx_ntuple)(struct net_device *, u32 stringset, void *); + int (*get_rxfh_indir)(struct net_device *, + struct ethtool_rxfh_indir *); + int (*set_rxfh_indir)(struct net_device *, + const struct ethtool_rxfh_indir *); }; #endif /* __KERNEL__ */ @@ -586,29 +601,29 @@ struct ethtool_ops { #define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */ #define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */ #define ETHTOOL_SWOL 0x00000006 /* Set wake-on-lan options. */ -#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ -#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */ +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */ #define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */ #define ETHTOOL_GLINK 0x0000000a /* Get link status (ethtool_value) */ -#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ -#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */ +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */ #define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ #define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ #define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ #define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters. */ #define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ #define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ -#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ -#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ -#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ -#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ #define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable * (ethtool_value) */ #define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable * (ethtool_value). */ #define ETHTOOL_TEST 0x0000001a /* execute NIC self-test. */ #define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ -#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ #define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ #define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ #define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ @@ -619,8 +634,8 @@ struct ethtool_ops { #define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */ #define ETHTOOL_GFLAGS 0x00000025 /* Get flags bitmap(ethtool_value) */ #define ETHTOOL_SFLAGS 0x00000026 /* Set flags bitmap(ethtool_value) */ -#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */ -#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */ +#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */ +#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */ #define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */ #define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */ @@ -637,6 +652,8 @@ struct ethtool_ops { #define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */ #define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */ #define ETHTOOL_GSSET_INFO 0x00000037 /* Get string set info */ +#define ETHTOOL_GRXFHINDIR 0x00000038 /* Get RX flow hash indir'n table */ +#define ETHTOOL_SRXFHINDIR 0x00000039 /* Set RX flow hash indir'n table */ /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET @@ -645,18 +662,18 @@ struct ethtool_ops { /* Indicates what features are supported by the interface. */ #define SUPPORTED_10baseT_Half (1 << 0) #define SUPPORTED_10baseT_Full (1 << 1) -#define SUPPORTED_100baseT_Half (1 << 2) -#define SUPPORTED_100baseT_Full (1 << 3) +#define SUPPORTED_100baseT_Half (1 << 2) +#define SUPPORTED_100baseT_Full (1 << 3) #define SUPPORTED_1000baseT_Half (1 << 4) #define SUPPORTED_1000baseT_Full (1 << 5) #define SUPPORTED_Autoneg (1 << 6) #define SUPPORTED_TP (1 << 7) #define SUPPORTED_AUI (1 << 8) #define SUPPORTED_MII (1 << 9) -#define SUPPORTED_FIBRE (1 << 10) +#define SUPPORTED_FIBRE (1 << 10) #define SUPPORTED_BNC (1 << 11) #define SUPPORTED_10000baseT_Full (1 << 12) -#define SUPPORTED_Pause (1 << 13) +#define SUPPORTED_Pause (1 << 13) #define SUPPORTED_Asym_Pause (1 << 14) #define SUPPORTED_2500baseX_Full (1 << 15) #define SUPPORTED_Backplane (1 << 16) @@ -666,8 +683,8 @@ struct ethtool_ops { #define SUPPORTED_10000baseR_FEC (1 << 20) /* Indicates what features are advertised by the interface. */ -#define ADVERTISED_10baseT_Half (1 << 0) -#define ADVERTISED_10baseT_Full (1 << 1) +#define ADVERTISED_10baseT_Half (1 << 0) +#define ADVERTISED_10baseT_Full (1 << 1) #define ADVERTISED_100baseT_Half (1 << 2) #define ADVERTISED_100baseT_Full (1 << 3) #define ADVERTISED_1000baseT_Half (1 << 4) @@ -706,12 +723,12 @@ struct ethtool_ops { #define DUPLEX_FULL 0x01 /* Which connector port. */ -#define PORT_TP 0x00 +#define PORT_TP 0x00 #define PORT_AUI 0x01 #define PORT_MII 0x02 #define PORT_FIBRE 0x03 #define PORT_BNC 0x04 -#define PORT_DA 0x05 +#define PORT_DA 0x05 #define PORT_NONE 0xef #define PORT_OTHER 0xff @@ -725,7 +742,7 @@ struct ethtool_ops { /* Enable or disable autonegotiation. If this is set to enable, * the forced link modes above are completely ignored. */ -#define AUTONEG_DISABLE 0x00 +#define AUTONEG_DISABLE 0x00 #define AUTONEG_ENABLE 0x01 /* Mode MDI or MDI-X */ diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 7fc62d4550b2..3d3a9915dde2 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -400,7 +400,6 @@ struct ext3_inode { #define EXT3_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */ #define EXT3_MOUNT_RESERVATION 0x10000 /* Preallocation */ #define EXT3_MOUNT_BARRIER 0x20000 /* Use block barriers */ -#define EXT3_MOUNT_NOBH 0x40000 /* No bufferheads */ #define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */ #define EXT3_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ #define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ diff --git a/include/linux/fb.h b/include/linux/fb.h index 8e5a9dfb76bf..0c5659c41b01 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -3,6 +3,9 @@ #include <linux/types.h> #include <linux/i2c.h> +#ifdef __KERNEL__ +#include <linux/kgdb.h> +#endif /* __KERNEL__ */ /* Definitions of frame buffers */ @@ -607,6 +610,12 @@ struct fb_deferred_io { * LOCKING NOTE: those functions must _ALL_ be called with the console * semaphore held, this is the only suitable locking mechanism we have * in 2.6. Some may be called at interrupt time at this point though. + * + * The exception to this is the debug related hooks. Putting the fb + * into a debug state (e.g. flipping to the kernel console) and restoring + * it must be done in a lock-free manner, so low level drivers should + * keep track of the initial console (if applicable) and may need to + * perform direct, unlocked hardware writes in these hooks. */ struct fb_ops { @@ -676,6 +685,10 @@ struct fb_ops { /* teardown any resources to do with this framebuffer */ void (*fb_destroy)(struct fb_info *info); + + /* called at KDB enter and leave time to prepare the console */ + int (*fb_debug_enter)(struct fb_info *info); + int (*fb_debug_leave)(struct fb_info *info); }; #ifdef CONFIG_FB_TILEBLITTING @@ -873,6 +886,8 @@ struct fb_info { static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct) + max_num * sizeof(struct aperture), GFP_KERNEL); + if (!a) + return NULL; a->count = max_num; return a; } diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 013dc529e95f..f59ed297b661 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -11,6 +11,7 @@ #include <linux/rcupdate.h> #include <linux/types.h> #include <linux/init.h> +#include <linux/fs.h> #include <asm/atomic.h> @@ -61,7 +62,8 @@ struct files_struct { (rcu_dereference_check((fdtfd), \ rcu_read_lock_held() || \ lockdep_is_held(&(files)->file_lock) || \ - atomic_read(&(files)->count) == 1)) + atomic_read(&(files)->count) == 1 || \ + rcu_my_thread_group_empty())) #define files_fdtable(files) \ (rcu_dereference_check_fdtable((files), (files)->fdt)) diff --git a/include/linux/filter.h b/include/linux/filter.h index 151f5d703b7e..69b43dbea6c6 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -91,6 +91,54 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */ #define BPF_TAX 0x00 #define BPF_TXA 0x80 +enum { + BPF_S_RET_K = 0, + BPF_S_RET_A, + BPF_S_ALU_ADD_K, + BPF_S_ALU_ADD_X, + BPF_S_ALU_SUB_K, + BPF_S_ALU_SUB_X, + BPF_S_ALU_MUL_K, + BPF_S_ALU_MUL_X, + BPF_S_ALU_DIV_X, + BPF_S_ALU_AND_K, + BPF_S_ALU_AND_X, + BPF_S_ALU_OR_K, + BPF_S_ALU_OR_X, + BPF_S_ALU_LSH_K, + BPF_S_ALU_LSH_X, + BPF_S_ALU_RSH_K, + BPF_S_ALU_RSH_X, + BPF_S_ALU_NEG, + BPF_S_LD_W_ABS, + BPF_S_LD_H_ABS, + BPF_S_LD_B_ABS, + BPF_S_LD_W_LEN, + BPF_S_LD_W_IND, + BPF_S_LD_H_IND, + BPF_S_LD_B_IND, + BPF_S_LD_IMM, + BPF_S_LDX_W_LEN, + BPF_S_LDX_B_MSH, + BPF_S_LDX_IMM, + BPF_S_MISC_TAX, + BPF_S_MISC_TXA, + BPF_S_ALU_DIV_K, + BPF_S_LD_MEM, + BPF_S_LDX_MEM, + BPF_S_ST, + BPF_S_STX, + BPF_S_JMP_JA, + BPF_S_JMP_JEQ_K, + BPF_S_JMP_JEQ_X, + BPF_S_JMP_JGE_K, + BPF_S_JMP_JGE_X, + BPF_S_JMP_JGT_K, + BPF_S_JMP_JGT_X, + BPF_S_JMP_JSET_K, + BPF_S_JMP_JSET_X, +}; + #ifndef BPF_MAXINSNS #define BPF_MAXINSNS 4096 #endif diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 68f883b30a53..68c642d8843d 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -30,12 +30,18 @@ #include <linux/types.h> #include <linux/firewire-constants.h> -#define FW_CDEV_EVENT_BUS_RESET 0x00 -#define FW_CDEV_EVENT_RESPONSE 0x01 -#define FW_CDEV_EVENT_REQUEST 0x02 -#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 -#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04 -#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05 +#define FW_CDEV_EVENT_BUS_RESET 0x00 +#define FW_CDEV_EVENT_RESPONSE 0x01 +#define FW_CDEV_EVENT_REQUEST 0x02 +#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 +#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04 +#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05 + +/* available since kernel version 2.6.36 */ +#define FW_CDEV_EVENT_REQUEST2 0x06 +#define FW_CDEV_EVENT_PHY_PACKET_SENT 0x07 +#define FW_CDEV_EVENT_PHY_PACKET_RECEIVED 0x08 +#define FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 0x09 /** * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types @@ -68,6 +74,10 @@ struct fw_cdev_event_common { * This event is sent when the bus the device belongs to goes through a bus * reset. It provides information about the new bus configuration, such as * new node ID for this device, new root ID, and others. + * + * If @bm_node_id is 0xffff right after bus reset it can be reread by an + * %FW_CDEV_IOC_GET_INFO ioctl after bus manager selection was finished. + * Kernels with ABI version < 4 do not set @bm_node_id. */ struct fw_cdev_event_bus_reset { __u64 closure; @@ -82,8 +92,9 @@ struct fw_cdev_event_bus_reset { /** * struct fw_cdev_event_response - Sent when a response packet was received - * @closure: See &fw_cdev_event_common; - * set by %FW_CDEV_IOC_SEND_REQUEST ioctl + * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_REQUEST + * or %FW_CDEV_IOC_SEND_BROADCAST_REQUEST + * or %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE * @rcode: Response code returned by the remote node * @length: Data length, i.e. the response's payload size in bytes @@ -93,6 +104,11 @@ struct fw_cdev_event_bus_reset { * sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses * carrying data (read and lock responses) follows immediately and can be * accessed through the @data field. + * + * The event is also generated after conclusions of transactions that do not + * involve response packets. This includes unified write transactions, + * broadcast write transactions, and transmission of asynchronous stream + * packets. @rcode indicates success or failure of such transmissions. */ struct fw_cdev_event_response { __u64 closure; @@ -103,11 +119,46 @@ struct fw_cdev_event_response { }; /** - * struct fw_cdev_event_request - Sent on incoming request to an address region + * struct fw_cdev_event_request - Old version of &fw_cdev_event_request2 * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST + * @tcode: See &fw_cdev_event_request2 + * @offset: See &fw_cdev_event_request2 + * @handle: See &fw_cdev_event_request2 + * @length: See &fw_cdev_event_request2 + * @data: See &fw_cdev_event_request2 + * + * This event is sent instead of &fw_cdev_event_request2 if the kernel or + * the client implements ABI version <= 3. + * + * Unlike &fw_cdev_event_request2, the sender identity cannot be established, + * broadcast write requests cannot be distinguished from unicast writes, and + * @tcode of lock requests is %TCODE_LOCK_REQUEST. + * + * Requests to the FCP_REQUEST or FCP_RESPONSE register are responded to as + * with &fw_cdev_event_request2, except in kernel 2.6.32 and older which send + * the response packet of the client's %FW_CDEV_IOC_SEND_RESPONSE ioctl. + */ +struct fw_cdev_event_request { + __u64 closure; + __u32 type; + __u32 tcode; + __u64 offset; + __u32 handle; + __u32 length; + __u32 data[0]; +}; + +/** + * struct fw_cdev_event_request2 - Sent on incoming request to an address region + * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl + * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST2 * @tcode: Transaction code of the incoming request * @offset: The offset into the 48-bit per-node address space + * @source_node_id: Sender node ID + * @destination_node_id: Destination node ID + * @card: The index of the card from which the request came + * @generation: Bus generation in which the request is valid * @handle: Reference to the kernel-side pending request * @length: Data length, i.e. the request's payload size in bytes * @data: Incoming data, if any @@ -120,12 +171,42 @@ struct fw_cdev_event_response { * * The payload data for requests carrying data (write and lock requests) * follows immediately and can be accessed through the @data field. + * + * Unlike &fw_cdev_event_request, @tcode of lock requests is one of the + * firewire-core specific %TCODE_LOCK_MASK_SWAP...%TCODE_LOCK_VENDOR_DEPENDENT, + * i.e. encodes the extended transaction code. + * + * @card may differ from &fw_cdev_get_info.card because requests are received + * from all cards of the Linux host. @source_node_id, @destination_node_id, and + * @generation pertain to that card. Destination node ID and bus generation may + * therefore differ from the corresponding fields of the last + * &fw_cdev_event_bus_reset. + * + * @destination_node_id may also differ from the current node ID because of a + * non-local bus ID part or in case of a broadcast write request. Note, a + * client must call an %FW_CDEV_IOC_SEND_RESPONSE ioctl even in case of a + * broadcast write request; the kernel will then release the kernel-side pending + * request but will not actually send a response packet. + * + * In case of a write request to FCP_REQUEST or FCP_RESPONSE, the kernel already + * sent a write response immediately after the request was received; in this + * case the client must still call an %FW_CDEV_IOC_SEND_RESPONSE ioctl to + * release the kernel-side pending request, though another response won't be + * sent. + * + * If the client subsequently needs to initiate requests to the sender node of + * an &fw_cdev_event_request2, it needs to use a device file with matching + * card index, node ID, and generation for outbound requests. */ -struct fw_cdev_event_request { +struct fw_cdev_event_request2 { __u64 closure; __u32 type; __u32 tcode; __u64 offset; + __u32 source_node_id; + __u32 destination_node_id; + __u32 card; + __u32 generation; __u32 handle; __u32 length; __u32 data[0]; @@ -141,26 +222,43 @@ struct fw_cdev_event_request { * @header: Stripped headers, if any * * This event is sent when the controller has completed an &fw_cdev_iso_packet - * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers - * stripped of all packets up until and including the interrupt packet are - * returned in the @header field. The amount of header data per packet is as - * specified at iso context creation by &fw_cdev_create_iso_context.header_size. + * with the %FW_CDEV_ISO_INTERRUPT bit set. * - * In version 1 of this ABI, header data consisted of the 1394 isochronous - * packet header, followed by quadlets from the packet payload if - * &fw_cdev_create_iso_context.header_size > 4. + * Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT): * - * In version 2 of this ABI, header data consist of the 1394 isochronous - * packet header, followed by a timestamp quadlet if - * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the - * packet payload if &fw_cdev_create_iso_context.header_size > 8. + * In version 3 and some implementations of version 2 of the ABI, &header_length + * is a multiple of 4 and &header contains timestamps of all packets up until + * the interrupt packet. The format of the timestamps is as described below for + * isochronous reception. In version 1 of the ABI, &header_length was 0. * - * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2. + * Isochronous receive events (context type %FW_CDEV_ISO_CONTEXT_RECEIVE): + * + * The headers stripped of all packets up until and including the interrupt + * packet are returned in the @header field. The amount of header data per + * packet is as specified at iso context creation by + * &fw_cdev_create_iso_context.header_size. + * + * Hence, _interrupt.header_length / _context.header_size is the number of + * packets received in this interrupt event. The client can now iterate + * through the mmap()'ed DMA buffer according to this number of packets and + * to the buffer sizes as the client specified in &fw_cdev_queue_iso. + * + * Since version 2 of this ABI, the portion for each packet in _interrupt.header + * consists of the 1394 isochronous packet header, followed by a timestamp + * quadlet if &fw_cdev_create_iso_context.header_size > 4, followed by quadlets + * from the packet payload if &fw_cdev_create_iso_context.header_size > 8. * - * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel, - * 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp: - * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte - * order. + * Format of 1394 iso packet header: 16 bits data_length, 2 bits tag, 6 bits + * channel, 4 bits tcode, 4 bits sy, in big endian byte order. + * data_length is the actual received size of the packet without the four + * 1394 iso packet header bytes. + * + * Format of timestamp: 16 bits invalid, 3 bits cycleSeconds, 13 bits + * cycleCount, in big endian byte order. + * + * In version 1 of the ABI, no timestamp quadlet was inserted; instead, payload + * data followed directly after the 1394 is header if header_size > 4. + * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2. */ struct fw_cdev_event_iso_interrupt { __u64 closure; @@ -171,6 +269,43 @@ struct fw_cdev_event_iso_interrupt { }; /** + * struct fw_cdev_event_iso_interrupt_mc - An iso buffer chunk was completed + * @closure: See &fw_cdev_event_common; + * set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl + * @type: %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL + * @completed: Offset into the receive buffer; data before this offest is valid + * + * This event is sent in multichannel contexts (context type + * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer + * chunks that have the %FW_CDEV_ISO_INTERRUPT bit set. Whether this happens + * when a packet is completed and/or when a buffer chunk is completed depends + * on the hardware implementation. + * + * The buffer is continuously filled with the following data, per packet: + * - the 1394 iso packet header as described at &fw_cdev_event_iso_interrupt, + * but in little endian byte order, + * - packet payload (as many bytes as specified in the data_length field of + * the 1394 iso packet header) in big endian byte order, + * - 0...3 padding bytes as needed to align the following trailer quadlet, + * - trailer quadlet, containing the reception timestamp as described at + * &fw_cdev_event_iso_interrupt, but in little endian byte order. + * + * Hence the per-packet size is data_length (rounded up to a multiple of 4) + 8. + * When processing the data, stop before a packet that would cross the + * @completed offset. + * + * A packet near the end of a buffer chunk will typically spill over into the + * next queued buffer chunk. It is the responsibility of the client to check + * for this condition, assemble a broken-up packet from its parts, and not to + * re-queue any buffer chunks in which as yet unread packet parts reside. + */ +struct fw_cdev_event_iso_interrupt_mc { + __u64 closure; + __u32 type; + __u32 completed; +}; + +/** * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed * @closure: See &fw_cdev_event_common; * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl @@ -200,15 +335,45 @@ struct fw_cdev_event_iso_resource { }; /** + * struct fw_cdev_event_phy_packet - A PHY packet was transmitted or received + * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_PHY_PACKET + * or %FW_CDEV_IOC_RECEIVE_PHY_PACKETS ioctl + * @type: %FW_CDEV_EVENT_PHY_PACKET_SENT or %..._RECEIVED + * @rcode: %RCODE_..., indicates success or failure of transmission + * @length: Data length in bytes + * @data: Incoming data + * + * If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT, @length is 0 and @data empty, + * except in case of a ping packet: Then, @length is 4, and @data[0] is the + * ping time in 49.152MHz clocks if @rcode is %RCODE_COMPLETE. + * + * If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED, @length is 8 and @data + * consists of the two PHY packet quadlets, in host byte order. + */ +struct fw_cdev_event_phy_packet { + __u64 closure; + __u32 type; + __u32 rcode; + __u32 length; + __u32 data[0]; +}; + +/** * union fw_cdev_event - Convenience union of fw_cdev_event_ types - * @common: Valid for all types - * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET - * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE - * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST - * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT - * @iso_resource: Valid if @common.type == + * @common: Valid for all types + * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET + * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE + * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST + * @request2: Valid if @common.type == %FW_CDEV_EVENT_REQUEST2 + * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT + * @iso_interrupt_mc: Valid if @common.type == + * %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL + * @iso_resource: Valid if @common.type == * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED + * @phy_packet: Valid if @common.type == + * %FW_CDEV_EVENT_PHY_PACKET_SENT or + * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED * * Convenience union for userspace use. Events could be read(2) into an * appropriately aligned char buffer and then cast to this union for further @@ -223,8 +388,11 @@ union fw_cdev_event { struct fw_cdev_event_bus_reset bus_reset; struct fw_cdev_event_response response; struct fw_cdev_event_request request; + struct fw_cdev_event_request2 request2; /* added in 2.6.36 */ struct fw_cdev_event_iso_interrupt iso_interrupt; - struct fw_cdev_event_iso_resource iso_resource; + struct fw_cdev_event_iso_interrupt_mc iso_interrupt_mc; /* added in 2.6.36 */ + struct fw_cdev_event_iso_resource iso_resource; /* added in 2.6.30 */ + struct fw_cdev_event_phy_packet phy_packet; /* added in 2.6.36 */ }; /* available since kernel version 2.6.22 */ @@ -256,23 +424,46 @@ union fw_cdev_event { /* available since kernel version 2.6.34 */ #define FW_CDEV_IOC_GET_CYCLE_TIMER2 _IOWR('#', 0x14, struct fw_cdev_get_cycle_timer2) +/* available since kernel version 2.6.36 */ +#define FW_CDEV_IOC_SEND_PHY_PACKET _IOWR('#', 0x15, struct fw_cdev_send_phy_packet) +#define FW_CDEV_IOC_RECEIVE_PHY_PACKETS _IOW('#', 0x16, struct fw_cdev_receive_phy_packets) +#define FW_CDEV_IOC_SET_ISO_CHANNELS _IOW('#', 0x17, struct fw_cdev_set_iso_channels) + /* - * FW_CDEV_VERSION History + * ABI version history * 1 (2.6.22) - initial version + * (2.6.24) - added %FW_CDEV_IOC_GET_CYCLE_TIMER * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if * &fw_cdev_create_iso_context.header_size is 8 or more + * - added %FW_CDEV_IOC_*_ISO_RESOURCE*, + * %FW_CDEV_IOC_GET_SPEED, %FW_CDEV_IOC_SEND_BROADCAST_REQUEST, + * %FW_CDEV_IOC_SEND_STREAM_PACKET * (2.6.32) - added time stamp to xmit &fw_cdev_event_iso_interrupt * (2.6.33) - IR has always packet-per-buffer semantics now, not one of * dual-buffer or packet-per-buffer depending on hardware + * - shared use and auto-response for FCP registers * 3 (2.6.34) - made &fw_cdev_get_cycle_timer reliable + * - added %FW_CDEV_IOC_GET_CYCLE_TIMER2 + * 4 (2.6.36) - added %FW_CDEV_EVENT_REQUEST2, %FW_CDEV_EVENT_PHY_PACKET_*, + * and &fw_cdev_allocate.region_end + * - implemented &fw_cdev_event_bus_reset.bm_node_id + * - added %FW_CDEV_IOC_SEND_PHY_PACKET, _RECEIVE_PHY_PACKETS + * - added %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL, + * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL, and + * %FW_CDEV_IOC_SET_ISO_CHANNELS */ -#define FW_CDEV_VERSION 3 +#define FW_CDEV_VERSION 3 /* Meaningless; don't use this macro. */ /** * struct fw_cdev_get_info - General purpose information ioctl - * @version: The version field is just a running serial number. - * We never break backwards compatibility, but may add more - * structs and ioctls in later revisions. + * @version: The version field is just a running serial number. Both an + * input parameter (ABI version implemented by the client) and + * output parameter (ABI version implemented by the kernel). + * A client must not fill in an %FW_CDEV_VERSION defined from an + * included kernel header file but the actual version for which + * the client was implemented. This is necessary for forward + * compatibility. We never break backwards compatibility, but + * may add more structs, events, and ioctls in later revisions. * @rom_length: If @rom is non-zero, at most rom_length bytes of configuration * ROM will be copied into that user space address. In either * case, @rom_length is updated with the actual length of the @@ -339,28 +530,48 @@ struct fw_cdev_send_response { }; /** - * struct fw_cdev_allocate - Allocate a CSR address range + * struct fw_cdev_allocate - Allocate a CSR in an address range * @offset: Start offset of the address range * @closure: To be passed back to userspace in request events - * @length: Length of the address range, in bytes + * @length: Length of the CSR, in bytes * @handle: Handle to the allocation, written by the kernel + * @region_end: First address above the address range (added in ABI v4, 2.6.36) * * Allocate an address range in the 48-bit address space on the local node * (the controller). This allows userspace to listen for requests with an - * offset within that address range. When the kernel receives a request - * within the range, an &fw_cdev_event_request event will be written back. - * The @closure field is passed back to userspace in the response event. + * offset within that address range. Every time when the kernel receives a + * request within the range, an &fw_cdev_event_request2 event will be emitted. + * (If the kernel or the client implements ABI version <= 3, an + * &fw_cdev_event_request will be generated instead.) + * + * The @closure field is passed back to userspace in these request events. * The @handle field is an out parameter, returning a handle to the allocated * range to be used for later deallocation of the range. * * The address range is allocated on all local nodes. The address allocation - * is exclusive except for the FCP command and response registers. + * is exclusive except for the FCP command and response registers. If an + * exclusive address region is already in use, the ioctl fails with errno set + * to %EBUSY. + * + * If kernel and client implement ABI version >= 4, the kernel looks up a free + * spot of size @length inside [@offset..@region_end) and, if found, writes + * the start address of the new CSR back in @offset. I.e. @offset is an + * in and out parameter. If this automatic placement of a CSR in a bigger + * address range is not desired, the client simply needs to set @region_end + * = @offset + @length. + * + * If the kernel or the client implements ABI version <= 3, @region_end is + * ignored and effectively assumed to be @offset + @length. + * + * @region_end is only present in a kernel header >= 2.6.36. If necessary, + * this can for example be tested by #ifdef FW_CDEV_EVENT_REQUEST2. */ struct fw_cdev_allocate { __u64 offset; __u64 closure; __u32 length; __u32 handle; + __u64 region_end; /* available since kernel version 2.6.36 */ }; /** @@ -382,9 +593,14 @@ struct fw_cdev_deallocate { * Initiate a bus reset for the bus this device is on. The bus reset can be * either the original (long) bus reset or the arbitrated (short) bus reset * introduced in 1394a-2000. + * + * The ioctl returns immediately. A subsequent &fw_cdev_event_bus_reset + * indicates when the reset actually happened. Since ABI v4, this may be + * considerably later than the ioctl because the kernel ensures a grace period + * between subsequent bus resets as per IEEE 1394 bus management specification. */ struct fw_cdev_initiate_bus_reset { - __u32 type; /* FW_CDEV_SHORT_RESET or FW_CDEV_LONG_RESET */ + __u32 type; }; /** @@ -408,9 +624,10 @@ struct fw_cdev_initiate_bus_reset { * * @immediate, @key, and @data array elements are CPU-endian quadlets. * - * If successful, the kernel adds the descriptor and writes back a handle to the - * kernel-side object to be used for later removal of the descriptor block and - * immediate key. + * If successful, the kernel adds the descriptor and writes back a @handle to + * the kernel-side object to be used for later removal of the descriptor block + * and immediate key. The kernel will also generate a bus reset to signal the + * change of the configuration ROM to other nodes. * * This ioctl affects the configuration ROMs of all local nodes. * The ioctl only succeeds on device files which represent a local node. @@ -429,38 +646,50 @@ struct fw_cdev_add_descriptor { * descriptor was added * * Remove a descriptor block and accompanying immediate key from the local - * nodes' configuration ROMs. + * nodes' configuration ROMs. The kernel will also generate a bus reset to + * signal the change of the configuration ROM to other nodes. */ struct fw_cdev_remove_descriptor { __u32 handle; }; -#define FW_CDEV_ISO_CONTEXT_TRANSMIT 0 -#define FW_CDEV_ISO_CONTEXT_RECEIVE 1 +#define FW_CDEV_ISO_CONTEXT_TRANSMIT 0 +#define FW_CDEV_ISO_CONTEXT_RECEIVE 1 +#define FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 /* added in 2.6.36 */ /** - * struct fw_cdev_create_iso_context - Create a context for isochronous IO - * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE - * @header_size: Header size to strip for receive contexts - * @channel: Channel to bind to - * @speed: Speed for transmit contexts - * @closure: To be returned in &fw_cdev_event_iso_interrupt + * struct fw_cdev_create_iso_context - Create a context for isochronous I/O + * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE or + * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL + * @header_size: Header size to strip in single-channel reception + * @channel: Channel to bind to in single-channel reception or transmission + * @speed: Transmission speed + * @closure: To be returned in &fw_cdev_event_iso_interrupt or + * &fw_cdev_event_iso_interrupt_multichannel * @handle: Handle to context, written back by kernel * * Prior to sending or receiving isochronous I/O, a context must be created. * The context records information about the transmit or receive configuration * and typically maps to an underlying hardware resource. A context is set up * for either sending or receiving. It is bound to a specific isochronous - * channel. + * @channel. + * + * In case of multichannel reception, @header_size and @channel are ignored + * and the channels are selected by %FW_CDEV_IOC_SET_ISO_CHANNELS. + * + * For %FW_CDEV_ISO_CONTEXT_RECEIVE contexts, @header_size must be at least 4 + * and must be a multiple of 4. It is ignored in other context types. + * + * @speed is ignored in receive context types. * * If a context was successfully created, the kernel writes back a handle to the * context, which must be passed in for subsequent operations on that context. * - * For receive contexts, @header_size must be at least 4 and must be a multiple - * of 4. - * - * Note that the effect of a @header_size > 4 depends on - * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. + * Limitations: + * No more than one iso context can be created per fd. + * The total number of contexts that all userspace and kernelspace drivers can + * create on a card at a time is a hardware limit, typically 4 or 8 contexts per + * direction, and of them at most one multichannel receive context. */ struct fw_cdev_create_iso_context { __u32 type; @@ -471,6 +700,22 @@ struct fw_cdev_create_iso_context { __u32 handle; }; +/** + * struct fw_cdev_set_iso_channels - Select channels in multichannel reception + * @channels: Bitmask of channels to listen to + * @handle: Handle of the mutichannel receive context + * + * @channels is the bitwise or of 1ULL << n for each channel n to listen to. + * + * The ioctl fails with errno %EBUSY if there is already another receive context + * on a channel in @channels. In that case, the bitmask of all unoccupied + * channels is returned in @channels. + */ +struct fw_cdev_set_iso_channels { + __u64 channels; + __u32 handle; +}; + #define FW_CDEV_ISO_PAYLOAD_LENGTH(v) (v) #define FW_CDEV_ISO_INTERRUPT (1 << 16) #define FW_CDEV_ISO_SKIP (1 << 17) @@ -481,42 +726,72 @@ struct fw_cdev_create_iso_context { /** * struct fw_cdev_iso_packet - Isochronous packet - * @control: Contains the header length (8 uppermost bits), the sy field - * (4 bits), the tag field (2 bits), a sync flag (1 bit), - * a skip flag (1 bit), an interrupt flag (1 bit), and the + * @control: Contains the header length (8 uppermost bits), + * the sy field (4 bits), the tag field (2 bits), a sync flag + * or a skip flag (1 bit), an interrupt flag (1 bit), and the * payload length (16 lowermost bits) - * @header: Header and payload + * @header: Header and payload in case of a transmit context. * * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. - * * Use the FW_CDEV_ISO_ macros to fill in @control. + * The @header array is empty in case of receive contexts. + * + * Context type %FW_CDEV_ISO_CONTEXT_TRANSMIT: + * + * @control.HEADER_LENGTH must be a multiple of 4. It specifies the numbers of + * bytes in @header that will be prepended to the packet's payload. These bytes + * are copied into the kernel and will not be accessed after the ioctl has + * returned. + * + * The @control.SY and TAG fields are copied to the iso packet header. These + * fields are specified by IEEE 1394a and IEC 61883-1. + * + * The @control.SKIP flag specifies that no packet is to be sent in a frame. + * When using this, all other fields except @control.INTERRUPT must be zero. + * + * When a packet with the @control.INTERRUPT flag set has been completed, an + * &fw_cdev_event_iso_interrupt event will be sent. + * + * Context type %FW_CDEV_ISO_CONTEXT_RECEIVE: + * + * @control.HEADER_LENGTH must be a multiple of the context's header_size. + * If the HEADER_LENGTH is larger than the context's header_size, multiple + * packets are queued for this entry. + * + * The @control.SY and TAG fields are ignored. + * + * If the @control.SYNC flag is set, the context drops all packets until a + * packet with a sy field is received which matches &fw_cdev_start_iso.sync. + * + * @control.PAYLOAD_LENGTH defines how many payload bytes can be received for + * one packet (in addition to payload quadlets that have been defined as headers + * and are stripped and returned in the &fw_cdev_event_iso_interrupt structure). + * If more bytes are received, the additional bytes are dropped. If less bytes + * are received, the remaining bytes in this part of the payload buffer will not + * be written to, not even by the next packet. I.e., packets received in + * consecutive frames will not necessarily be consecutive in memory. If an + * entry has queued multiple packets, the PAYLOAD_LENGTH is divided equally + * among them. * - * For transmit packets, the header length must be a multiple of 4 and specifies - * the numbers of bytes in @header that will be prepended to the packet's - * payload; these bytes are copied into the kernel and will not be accessed - * after the ioctl has returned. The sy and tag fields are copied to the iso - * packet header (these fields are specified by IEEE 1394a and IEC 61883-1). - * The skip flag specifies that no packet is to be sent in a frame; when using - * this, all other fields except the interrupt flag must be zero. - * - * For receive packets, the header length must be a multiple of the context's - * header size; if the header length is larger than the context's header size, - * multiple packets are queued for this entry. The sy and tag fields are - * ignored. If the sync flag is set, the context drops all packets until - * a packet with a matching sy field is received (the sync value to wait for is - * specified in the &fw_cdev_start_iso structure). The payload length defines - * how many payload bytes can be received for one packet (in addition to payload - * quadlets that have been defined as headers and are stripped and returned in - * the &fw_cdev_event_iso_interrupt structure). If more bytes are received, the - * additional bytes are dropped. If less bytes are received, the remaining - * bytes in this part of the payload buffer will not be written to, not even by - * the next packet, i.e., packets received in consecutive frames will not - * necessarily be consecutive in memory. If an entry has queued multiple - * packets, the payload length is divided equally among them. - * - * When a packet with the interrupt flag set has been completed, the + * When a packet with the @control.INTERRUPT flag set has been completed, an * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued * multiple receive packets is completed when its last packet is completed. + * + * Context type %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + * + * Here, &fw_cdev_iso_packet would be more aptly named _iso_buffer_chunk since + * it specifies a chunk of the mmap()'ed buffer, while the number and alignment + * of packets to be placed into the buffer chunk is not known beforehand. + * + * @control.PAYLOAD_LENGTH is the size of the buffer chunk and specifies room + * for header, payload, padding, and trailer bytes of one or more packets. + * It must be a multiple of 4. + * + * @control.HEADER_LENGTH, TAG and SY are ignored. SYNC is treated as described + * for single-channel reception. + * + * When a buffer chunk with the @control.INTERRUPT flag set has been filled + * entirely, an &fw_cdev_event_iso_interrupt_mc event will be sent. */ struct fw_cdev_iso_packet { __u32 control; @@ -525,9 +800,9 @@ struct fw_cdev_iso_packet { /** * struct fw_cdev_queue_iso - Queue isochronous packets for I/O - * @packets: Userspace pointer to packet data + * @packets: Userspace pointer to an array of &fw_cdev_iso_packet * @data: Pointer into mmap()'ed payload buffer - * @size: Size of packet data in bytes + * @size: Size of the @packets array, in bytes * @handle: Isochronous context handle * * Queue a number of isochronous packets for reception or transmission. @@ -540,6 +815,9 @@ struct fw_cdev_iso_packet { * The kernel may or may not queue all packets, but will write back updated * values of the @packets, @data and @size fields, so the ioctl can be * resubmitted easily. + * + * In case of a multichannel receive context, @data must be quadlet-aligned + * relative to the buffer start. */ struct fw_cdev_queue_iso { __u64 packets; @@ -698,4 +976,39 @@ struct fw_cdev_send_stream_packet { __u32 speed; }; +/** + * struct fw_cdev_send_phy_packet - send a PHY packet + * @closure: Passed back to userspace in the PHY-packet-sent event + * @data: First and second quadlet of the PHY packet + * @generation: The bus generation where packet is valid + * + * The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes + * on the same card as this device. After transmission, an + * %FW_CDEV_EVENT_PHY_PACKET_SENT event is generated. + * + * The payload @data[] shall be specified in host byte order. Usually, + * @data[1] needs to be the bitwise inverse of @data[0]. VersaPHY packets + * are an exception to this rule. + * + * The ioctl is only permitted on device files which represent a local node. + */ +struct fw_cdev_send_phy_packet { + __u64 closure; + __u32 data[2]; + __u32 generation; +}; + +/** + * struct fw_cdev_receive_phy_packets - start reception of PHY packets + * @closure: Passed back to userspace in phy packet events + * + * This ioctl activates issuing of %FW_CDEV_EVENT_PHY_PACKET_RECEIVED due to + * incoming PHY packets from any node on the same bus as the device. + * + * The ioctl is only permitted on device files which represent a local node. + */ +struct fw_cdev_receive_phy_packets { + __u64 closure; +}; + #endif /* _LINUX_FIREWIRE_CDEV_H */ diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 72e2b8ac2a5a..1cd637ef62d2 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -32,11 +32,13 @@ #define CSR_CYCLE_TIME 0x200 #define CSR_BUS_TIME 0x204 #define CSR_BUSY_TIMEOUT 0x210 +#define CSR_PRIORITY_BUDGET 0x218 #define CSR_BUS_MANAGER_ID 0x21c #define CSR_BANDWIDTH_AVAILABLE 0x220 #define CSR_CHANNELS_AVAILABLE 0x224 #define CSR_CHANNELS_AVAILABLE_HI 0x224 #define CSR_CHANNELS_AVAILABLE_LO 0x228 +#define CSR_MAINT_UTILITY 0x230 #define CSR_BROADCAST_CHANNEL 0x234 #define CSR_CONFIG_ROM 0x400 #define CSR_CONFIG_ROM_END 0x800 @@ -89,6 +91,11 @@ struct fw_card { struct list_head transaction_list; unsigned long reset_jiffies; + u32 split_timeout_hi; + u32 split_timeout_lo; + unsigned int split_timeout_cycles; + unsigned int split_timeout_jiffies; + unsigned long long guid; unsigned max_receive; int link_speed; @@ -104,18 +111,28 @@ struct fw_card { bool beta_repeaters_present; int index; - struct list_head link; - /* Work struct for BM duties. */ - struct delayed_work work; + struct list_head phy_receiver_list; + + struct delayed_work br_work; /* bus reset job */ + bool br_short; + + struct delayed_work bm_work; /* bus manager job */ int bm_retries; int bm_generation; __be32 bm_transaction_data[2]; + int bm_node_id; + bool bm_abdicate; + + bool priority_budget_implemented; /* controller feature */ + bool broadcast_channel_auto_allocated; /* controller feature */ bool broadcast_channel_allocated; u32 broadcast_channel; __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; + + __be32 maint_utility_register; }; struct fw_attribute_group { @@ -252,7 +269,7 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, typedef void (*fw_address_callback_t)(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, - int generation, int speed, + int generation, unsigned long long offset, void *data, size_t length, void *callback_data); @@ -269,10 +286,10 @@ struct fw_packet { u32 timestamp; /* - * This callback is called when the packet transmission has - * completed; for successful transmission, the status code is - * the ack received from the destination, otherwise it's a - * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO. + * This callback is called when the packet transmission has completed. + * For successful transmission, the status code is the ack received + * from the destination. Otherwise it is one of the juju-specific + * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK. * The callback can be called from tasklet context and thus * must never block. */ @@ -355,17 +372,19 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc); * scatter-gather streaming (e.g. assembling video frame automatically). */ struct fw_iso_packet { - u16 payload_length; /* Length of indirect payload. */ - u32 interrupt:1; /* Generate interrupt on this packet */ - u32 skip:1; /* Set to not send packet at all. */ - u32 tag:2; - u32 sy:4; - u32 header_length:8; /* Length of immediate header. */ - u32 header[0]; + u16 payload_length; /* Length of indirect payload */ + u32 interrupt:1; /* Generate interrupt on this packet */ + u32 skip:1; /* tx: Set to not send packet at all */ + /* rx: Sync bit, wait for matching sy */ + u32 tag:2; /* tx: Tag in packet header */ + u32 sy:4; /* tx: Sy in packet header */ + u32 header_length:8; /* Length of immediate header */ + u32 header[0]; /* tx: Top of 1394 isoch. data_block */ }; -#define FW_ISO_CONTEXT_TRANSMIT 0 -#define FW_ISO_CONTEXT_RECEIVE 1 +#define FW_ISO_CONTEXT_TRANSMIT 0 +#define FW_ISO_CONTEXT_RECEIVE 1 +#define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 #define FW_ISO_CONTEXT_MATCH_TAG0 1 #define FW_ISO_CONTEXT_MATCH_TAG1 2 @@ -389,24 +408,31 @@ struct fw_iso_buffer { int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, int page_count, enum dma_data_direction direction); void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); +size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed); struct fw_iso_context; typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data); +typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context, + dma_addr_t completed, void *data); struct fw_iso_context { struct fw_card *card; int type; int channel; int speed; size_t header_size; - fw_iso_callback_t callback; + union { + fw_iso_callback_t sc; + fw_iso_mc_callback_t mc; + } callback; void *callback_data; }; struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type, int channel, int speed, size_t header_size, fw_iso_callback_t callback, void *callback_data); +int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels); int fw_iso_context_queue(struct fw_iso_context *ctx, struct fw_iso_packet *packet, struct fw_iso_buffer *buffer, diff --git a/include/linux/fs.h b/include/linux/fs.h index 471e1ff5079a..e5106e49bd2c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -53,6 +53,7 @@ struct inodes_stat_t { #define MAY_APPEND 8 #define MAY_ACCESS 16 #define MAY_OPEN 32 +#define MAY_CHDIR 64 /* * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond @@ -415,7 +416,8 @@ struct buffer_head; typedef int (get_block_t)(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, - ssize_t bytes, void *private); + ssize_t bytes, void *private, int ret, + bool is_async); /* * Attribute flags. These should be or-ed together to figure out what @@ -1783,6 +1785,19 @@ extern int get_sb_pseudo(struct file_system_type *, char *, struct vfsmount *mnt); extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); +static inline void sb_mark_dirty(struct super_block *sb) +{ + sb->s_dirt = 1; +} +static inline void sb_mark_clean(struct super_block *sb) +{ + sb->s_dirt = 0; +} +static inline int sb_is_dirty(struct super_block *sb) +{ + return sb->s_dirt; +} + /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ #define fops_get(fops) \ (((fops) && try_module_get((fops)->owner) ? (fops) : NULL)) diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index c57db27ac861..b8581c09d19f 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -20,7 +20,7 @@ #include <linux/fscache.h> #include <linux/sched.h> -#include <linux/slow-work.h> +#include <linux/workqueue.h> #define NR_MAXCACHES BITS_PER_LONG @@ -76,18 +76,14 @@ typedef void (*fscache_operation_release_t)(struct fscache_operation *op); typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); struct fscache_operation { - union { - struct work_struct fast_work; /* record for fast ops */ - struct slow_work slow_work; /* record for (very) slow ops */ - }; + struct work_struct work; /* record for async ops */ struct list_head pend_link; /* link in object->pending_ops */ struct fscache_object *object; /* object to be operated upon */ unsigned long flags; #define FSCACHE_OP_TYPE 0x000f /* operation type */ -#define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */ -#define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */ -#define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */ +#define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */ +#define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */ #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ #define FSCACHE_OP_DEAD 6 /* op is now dead */ @@ -105,7 +101,8 @@ struct fscache_operation { /* operation releaser */ fscache_operation_release_t release; -#ifdef CONFIG_SLOW_WORK_DEBUG +#ifdef CONFIG_WORKQUEUE_DEBUGFS + struct work_struct put_work; /* work to delay operation put */ const char *name; /* operation name */ const char *state; /* operation state */ #define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0) @@ -117,7 +114,7 @@ struct fscache_operation { }; extern atomic_t fscache_op_debug_id; -extern const struct slow_work_ops fscache_op_slow_work_ops; +extern void fscache_op_work_func(struct work_struct *work); extern void fscache_enqueue_operation(struct fscache_operation *); extern void fscache_put_operation(struct fscache_operation *); @@ -128,33 +125,21 @@ extern void fscache_put_operation(struct fscache_operation *); * @release: The release function to assign * * Do basic initialisation of an operation. The caller must still set flags, - * object, either fast_work or slow_work if necessary, and processor if needed. + * object and processor if needed. */ static inline void fscache_operation_init(struct fscache_operation *op, - fscache_operation_release_t release) + fscache_operation_processor_t processor, + fscache_operation_release_t release) { + INIT_WORK(&op->work, fscache_op_work_func); atomic_set(&op->usage, 1); op->debug_id = atomic_inc_return(&fscache_op_debug_id); + op->processor = processor; op->release = release; INIT_LIST_HEAD(&op->pend_link); fscache_set_op_state(op, "Init"); } -/** - * fscache_operation_init_slow - Do additional initialisation of a slow op - * @op: The operation to initialise - * @processor: The processor function to assign - * - * Do additional initialisation of an operation as required for slow work. - */ -static inline -void fscache_operation_init_slow(struct fscache_operation *op, - fscache_operation_processor_t processor) -{ - op->processor = processor; - slow_work_init(&op->slow_work, &fscache_op_slow_work_ops); -} - /* * data read operation */ @@ -389,7 +374,7 @@ struct fscache_object { struct fscache_cache *cache; /* cache that supplied this object */ struct fscache_cookie *cookie; /* netfs's file/index object */ struct fscache_object *parent; /* parent object */ - struct slow_work work; /* attention scheduling record */ + struct work_struct work; /* attention scheduling record */ struct list_head dependents; /* FIFO of dependent objects */ struct list_head dep_link; /* link in parent's dependents list */ struct list_head pending_ops; /* unstarted operations on this object */ @@ -411,7 +396,7 @@ extern const char *fscache_object_states[]; (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ (obj)->state >= FSCACHE_OBJECT_DYING) -extern const struct slow_work_ops fscache_object_slow_work_ops; +extern void fscache_object_work_func(struct work_struct *work); /** * fscache_object_init - Initialise a cache object description @@ -433,7 +418,7 @@ void fscache_object_init(struct fscache_object *object, spin_lock_init(&object->lock); INIT_LIST_HEAD(&object->cache_link); INIT_HLIST_NODE(&object->cookie_link); - vslow_work_init(&object->work, &fscache_object_slow_work_ops); + INIT_WORK(&object->work, fscache_object_work_func); INIT_LIST_HEAD(&object->dependents); INIT_LIST_HEAD(&object->dep_link); INIT_LIST_HEAD(&object->pending_ops); @@ -534,6 +519,8 @@ extern void fscache_io_error(struct fscache_cache *cache); extern void fscache_mark_pages_cached(struct fscache_retrieval *op, struct pagevec *pagevec); +extern bool fscache_object_sleep_till_congested(signed long *timeoutp); + extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, const void *data, uint16_t datalen); diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 595ce49288b7..ec0dad5ab90f 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -85,7 +85,7 @@ struct fscache_cookie_def { /* get an index key * - should store the key data in the buffer - * - should return the amount of amount stored + * - should return the amount of data stored * - not permitted to return an error * - the netfs data from the cookie being used as the source is * presented @@ -454,6 +454,7 @@ int fscache_read_or_alloc_page(struct fscache_cookie *cookie, * @cookie: The cookie representing the cache object * @mapping: The netfs inode mapping to which the pages will be attached * @pages: A list of potential netfs pages to be filled + * @nr_pages: Number of pages to be read and/or allocated * @end_io_func: The callback to invoke when and if each page is filled * @context: An arbitrary piece of data to pass on to end_io_func() * @gfp: The conditions under which memory allocation should be made diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h new file mode 100644 index 000000000000..fc295d7ea463 --- /dev/null +++ b/include/linux/fsl-diu-fb.h @@ -0,0 +1,223 @@ +/* + * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved. + * + * Freescale DIU Frame Buffer device driver + * + * Authors: Hongjun Chen <hong-jun.chen@freescale.com> + * Paul Widmer <paul.widmer@freescale.com> + * Srikanth Srinivasan <srikanth.srinivasan@freescale.com> + * York Sun <yorksun@freescale.com> + * + * Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __FSL_DIU_FB_H__ +#define __FSL_DIU_FB_H__ + +/* Arbitrary threshold to determine the allocation method + * See mpc8610fb_set_par(), map_video_memory(), and unmap_video_memory() + */ +#define MEM_ALLOC_THRESHOLD (1024*768*4+32) +/* Minimum value that the pixel clock can be set to in pico seconds + * This is determined by platform clock/3 where the minimum platform + * clock is 533MHz. This gives 5629 pico seconds. + */ +#define MIN_PIX_CLK 5629 +#define MAX_PIX_CLK 96096 + +#include <linux/types.h> + +struct mfb_alpha { + int enable; + int alpha; +}; + +struct mfb_chroma_key { + int enable; + __u8 red_max; + __u8 green_max; + __u8 blue_max; + __u8 red_min; + __u8 green_min; + __u8 blue_min; +}; + +struct aoi_display_offset { + int x_aoi_d; + int y_aoi_d; +}; + +#define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key) +#define MFB_WAIT_FOR_VSYNC _IOW('F', 0x20, u_int32_t) +#define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8) + +#define MFB_SET_ALPHA 0x80014d00 +#define MFB_GET_ALPHA 0x40014d00 +#define MFB_SET_AOID 0x80084d04 +#define MFB_GET_AOID 0x40084d04 +#define MFB_SET_PIXFMT 0x80014d08 +#define MFB_GET_PIXFMT 0x40014d08 + +#define FBIOGET_GWINFO 0x46E0 +#define FBIOPUT_GWINFO 0x46E1 + +#ifdef __KERNEL__ +#include <linux/spinlock.h> + +/* + * These are the fields of area descriptor(in DDR memory) for every plane + */ +struct diu_ad { + /* Word 0(32-bit) in DDR memory */ +/* __u16 comp; */ +/* __u16 pixel_s:2; */ +/* __u16 pallete:1; */ +/* __u16 red_c:2; */ +/* __u16 green_c:2; */ +/* __u16 blue_c:2; */ +/* __u16 alpha_c:3; */ +/* __u16 byte_f:1; */ +/* __u16 res0:3; */ + + __be32 pix_fmt; /* hard coding pixel format */ + + /* Word 1(32-bit) in DDR memory */ + __le32 addr; + + /* Word 2(32-bit) in DDR memory */ +/* __u32 delta_xs:11; */ +/* __u32 res1:1; */ +/* __u32 delta_ys:11; */ +/* __u32 res2:1; */ +/* __u32 g_alpha:8; */ + __le32 src_size_g_alpha; + + /* Word 3(32-bit) in DDR memory */ +/* __u32 delta_xi:11; */ +/* __u32 res3:5; */ +/* __u32 delta_yi:11; */ +/* __u32 res4:3; */ +/* __u32 flip:2; */ + __le32 aoi_size; + + /* Word 4(32-bit) in DDR memory */ + /*__u32 offset_xi:11; + __u32 res5:5; + __u32 offset_yi:11; + __u32 res6:5; + */ + __le32 offset_xyi; + + /* Word 5(32-bit) in DDR memory */ + /*__u32 offset_xd:11; + __u32 res7:5; + __u32 offset_yd:11; + __u32 res8:5; */ + __le32 offset_xyd; + + + /* Word 6(32-bit) in DDR memory */ + __u8 ckmax_r; + __u8 ckmax_g; + __u8 ckmax_b; + __u8 res9; + + /* Word 7(32-bit) in DDR memory */ + __u8 ckmin_r; + __u8 ckmin_g; + __u8 ckmin_b; + __u8 res10; +/* __u32 res10:8; */ + + /* Word 8(32-bit) in DDR memory */ + __le32 next_ad; + + /* Word 9(32-bit) in DDR memory, just for 64-bit aligned */ + __u32 paddr; +} __attribute__ ((packed)); + +/* DIU register map */ +struct diu { + __be32 desc[3]; + __be32 gamma; + __be32 pallete; + __be32 cursor; + __be32 curs_pos; + __be32 diu_mode; + __be32 bgnd; + __be32 bgnd_wb; + __be32 disp_size; + __be32 wb_size; + __be32 wb_mem_addr; + __be32 hsyn_para; + __be32 vsyn_para; + __be32 syn_pol; + __be32 thresholds; + __be32 int_status; + __be32 int_mask; + __be32 colorbar[8]; + __be32 filling; + __be32 plut; +} __attribute__ ((packed)); + +struct diu_hw { + struct diu *diu_reg; + spinlock_t reg_lock; + + __u32 mode; /* DIU operation mode */ +}; + +struct diu_addr { + __u8 __iomem *vaddr; /* Virtual address */ + dma_addr_t paddr; /* Physical address */ + __u32 offset; +}; + +struct diu_pool { + struct diu_addr ad; + struct diu_addr gamma; + struct diu_addr pallete; + struct diu_addr cursor; +}; + +#define FSL_DIU_BASE_OFFSET 0x2C000 /* Offset of DIU */ +#define INT_LCDC 64 /* DIU interrupt number */ + +#define FSL_AOI_NUM 6 /* 5 AOIs and one dummy AOI */ + /* 1 for plane 0, 2 for plane 1&2 each */ + +/* Minimum X and Y resolutions */ +#define MIN_XRES 64 +#define MIN_YRES 64 + +/* HW cursor parameters */ +#define MAX_CURS 32 + +/* Modes of operation of DIU */ +#define MFB_MODE0 0 /* DIU off */ +#define MFB_MODE1 1 /* All three planes output to display */ +#define MFB_MODE2 2 /* Plane 1 to display, planes 2+3 written back*/ +#define MFB_MODE3 3 /* All three planes written back to memory */ +#define MFB_MODE4 4 /* Color bar generation */ + +/* INT_STATUS/INT_MASK field descriptions */ +#define INT_VSYNC 0x01 /* Vsync interrupt */ +#define INT_VSYNC_WB 0x02 /* Vsync interrupt for write back operation */ +#define INT_UNDRUN 0x04 /* Under run exception interrupt */ +#define INT_PARERR 0x08 /* Display parameters error interrupt */ +#define INT_LS_BF_VS 0x10 /* Lines before vsync. interrupt */ + +/* Panels'operation modes */ +#define MFB_TYPE_OUTPUT 0 /* Panel output to display */ +#define MFB_TYPE_OFF 1 /* Panel off */ +#define MFB_TYPE_WB 2 /* Panel written back to memory */ +#define MFB_TYPE_TEST 3 /* Panel generate color bar */ + +#endif /* __KERNEL__ */ +#endif /* __FSL_DIU_FB_H__ */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 41e46330d9be..dcd6a7c3a435 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -1,3 +1,8 @@ +/* + * Ftrace header. For implementation details beyond the random comments + * scattered below, see: Documentation/trace/ftrace-design.txt + */ + #ifndef _LINUX_FTRACE_H #define _LINUX_FTRACE_H diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 3167f2df4126..02b8b24f8f51 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -11,8 +11,6 @@ struct trace_array; struct tracer; struct dentry; -DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq); - struct trace_print_flags { unsigned long mask; const char *name; @@ -58,6 +56,9 @@ struct trace_iterator { struct ring_buffer_iter *buffer_iter[NR_CPUS]; unsigned long iter_flags; + /* trace_seq for __print_flags() and __print_symbolic() etc. */ + struct trace_seq tmp_seq; + /* The below is zeroed out in pipe_read */ struct trace_seq seq; struct trace_entry *ent; @@ -146,14 +147,19 @@ struct ftrace_event_class { int (*raw_init)(struct ftrace_event_call *); }; +extern int ftrace_event_reg(struct ftrace_event_call *event, + enum trace_reg type); + enum { TRACE_EVENT_FL_ENABLED_BIT, TRACE_EVENT_FL_FILTERED_BIT, + TRACE_EVENT_FL_RECORDED_CMD_BIT, }; enum { - TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), - TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), + TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), + TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), + TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT), }; struct ftrace_event_call { @@ -171,6 +177,7 @@ struct ftrace_event_call { * 32 bit flags: * bit 1: enabled * bit 2: filter_active + * bit 3: enabled cmd record * * Changes to flags must hold the event_mutex. * @@ -257,8 +264,7 @@ static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, u64 count, struct pt_regs *regs, void *head) { - perf_tp_event(addr, count, raw_data, size, regs, head); - perf_swevent_put_recursion_context(rctx); + perf_tp_event(addr, count, raw_data, size, regs, head, rctx); } #endif diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 88e0eb596919..c3c578e09833 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -37,6 +37,10 @@ * * 7.14 * - add splice support to fuse device + * + * 7.15 + * - add store notify + * - add retrieve notify */ #ifndef _LINUX_FUSE_H @@ -68,7 +72,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 14 +#define FUSE_KERNEL_MINOR_VERSION 15 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -251,6 +255,7 @@ enum fuse_opcode { FUSE_DESTROY = 38, FUSE_IOCTL = 39, FUSE_POLL = 40, + FUSE_NOTIFY_REPLY = 41, /* CUSE specific operations */ CUSE_INIT = 4096, @@ -260,6 +265,8 @@ enum fuse_notify_code { FUSE_NOTIFY_POLL = 1, FUSE_NOTIFY_INVAL_INODE = 2, FUSE_NOTIFY_INVAL_ENTRY = 3, + FUSE_NOTIFY_STORE = 4, + FUSE_NOTIFY_RETRIEVE = 5, FUSE_NOTIFY_CODE_MAX, }; @@ -568,4 +575,29 @@ struct fuse_notify_inval_entry_out { __u32 padding; }; +struct fuse_notify_store_out { + __u64 nodeid; + __u64 offset; + __u32 size; + __u32 padding; +}; + +struct fuse_notify_retrieve_out { + __u64 notify_unique; + __u64 nodeid; + __u64 offset; + __u32 size; + __u32 padding; +}; + +/* Matches the size of fuse_write_in */ +struct fuse_notify_retrieve_in { + __u64 dummy1; + __u64 offset; + __u32 size; + __u32 dummy2; + __u64 dummy3; + __u64 dummy4; +}; + #endif /* _LINUX_FUSE_H */ diff --git a/include/linux/hid.h b/include/linux/hid.h index 895001f7f4b2..42a0f1d11365 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -311,6 +311,7 @@ struct hid_item { #define HID_QUIRK_HIDDEV_FORCE 0x00000010 #define HID_QUIRK_BADPAD 0x00000020 #define HID_QUIRK_MULTI_INPUT 0x00000040 +#define HID_QUIRK_HIDINPUT_FORCE 0x00000080 #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 #define HID_QUIRK_NO_INIT_REPORTS 0x20000000 diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h index 02c9af374741..269181b8f623 100644 --- a/include/linux/i2c/adp5588.h +++ b/include/linux/i2c/adp5588.h @@ -78,6 +78,40 @@ #define ADP5588_KEYMAPSIZE 80 +#define GPI_PIN_ROW0 97 +#define GPI_PIN_ROW1 98 +#define GPI_PIN_ROW2 99 +#define GPI_PIN_ROW3 100 +#define GPI_PIN_ROW4 101 +#define GPI_PIN_ROW5 102 +#define GPI_PIN_ROW6 103 +#define GPI_PIN_ROW7 104 +#define GPI_PIN_COL0 105 +#define GPI_PIN_COL1 106 +#define GPI_PIN_COL2 107 +#define GPI_PIN_COL3 108 +#define GPI_PIN_COL4 109 +#define GPI_PIN_COL5 110 +#define GPI_PIN_COL6 111 +#define GPI_PIN_COL7 112 +#define GPI_PIN_COL8 113 +#define GPI_PIN_COL9 114 + +#define GPI_PIN_ROW_BASE GPI_PIN_ROW0 +#define GPI_PIN_ROW_END GPI_PIN_ROW7 +#define GPI_PIN_COL_BASE GPI_PIN_COL0 +#define GPI_PIN_COL_END GPI_PIN_COL9 + +#define GPI_PIN_BASE GPI_PIN_ROW_BASE +#define GPI_PIN_END GPI_PIN_COL_END + +#define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1) + +struct adp5588_gpi_map { + unsigned short pin; + unsigned short sw_evt; +}; + struct adp5588_kpad_platform_data { int rows; /* Number of rows */ int cols; /* Number of columns */ @@ -87,6 +121,9 @@ struct adp5588_kpad_platform_data { unsigned en_keylock:1; /* Enable Key Lock feature */ unsigned short unlock_key1; /* Unlock Key 1 */ unsigned short unlock_key2; /* Unlock Key 2 */ + const struct adp5588_gpi_map *gpimap; + unsigned short gpimapsize; + const struct adp5588_gpio_platform_data *gpio_data; }; struct adp5588_gpio_platform_data { diff --git a/include/linux/i2c/mcs.h b/include/linux/i2c/mcs.h new file mode 100644 index 000000000000..725ae7c313ff --- /dev/null +++ b/include/linux/i2c/mcs.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim <jy0922.shim@samsung.com> + * Author: HeungJun Kim <riverful.kim@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MCS_H +#define __LINUX_MCS_H + +#define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff)) +#define MCS_KEY_VAL(v) (((v) >> 16) & 0xff) +#define MCS_KEY_CODE(v) ((v) & 0xffff) + +struct mcs_platform_data { + void (*cfg_pin)(void); + + /* touchscreen */ + unsigned int x_size; + unsigned int y_size; + + /* touchkey */ + const u32 *keymap; + unsigned int keymap_size; + unsigned int key_maxval; + bool no_autorepeat; +}; + +#endif /* __LINUX_MCS_H */ diff --git a/include/linux/i2c/mcs5000_ts.h b/include/linux/i2c/mcs5000_ts.h deleted file mode 100644 index 5a117b5ca15e..000000000000 --- a/include/linux/i2c/mcs5000_ts.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * mcs5000_ts.h - * - * Copyright (C) 2009 Samsung Electronics Co.Ltd - * Author: Joonyoung Shim <jy0922.shim@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - */ - -#ifndef __LINUX_MCS5000_TS_H -#define __LINUX_MCS5000_TS_H - -/* platform data for the MELFAS MCS-5000 touchscreen driver */ -struct mcs5000_ts_platform_data { - void (*cfg_pin)(void); - int x_size; - int y_size; -}; - -#endif /* __LINUX_MCS5000_TS_H */ diff --git a/include/linux/i2c/qt602240_ts.h b/include/linux/i2c/qt602240_ts.h new file mode 100644 index 000000000000..c5033e101094 --- /dev/null +++ b/include/linux/i2c/qt602240_ts.h @@ -0,0 +1,38 @@ +/* + * AT42QT602240/ATMXT224 Touchscreen driver + * + * Copyright (C) 2010 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim <jy0922.shim@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_QT602240_TS_H +#define __LINUX_QT602240_TS_H + +/* Orient */ +#define QT602240_NORMAL 0x0 +#define QT602240_DIAGONAL 0x1 +#define QT602240_HORIZONTAL_FLIP 0x2 +#define QT602240_ROTATED_90_COUNTER 0x3 +#define QT602240_VERTICAL_FLIP 0x4 +#define QT602240_ROTATED_90 0x5 +#define QT602240_ROTATED_180 0x6 +#define QT602240_DIAGONAL_COUNTER 0x7 + +/* The platform data for the AT42QT602240/ATMXT224 touchscreen driver */ +struct qt602240_platform_data { + unsigned int x_line; + unsigned int y_line; + unsigned int x_size; + unsigned int y_size; + unsigned int blen; + unsigned int threshold; + unsigned int voltage; + unsigned char orient; +}; + +#endif /* __LINUX_QT602240_TS_H */ diff --git a/include/linux/i8042.h b/include/linux/i8042.h index 9bf6870ee5f4..a986ff588944 100644 --- a/include/linux/i8042.h +++ b/include/linux/i8042.h @@ -46,31 +46,31 @@ int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, #else -void i8042_lock_chip(void) +static inline void i8042_lock_chip(void) { } -void i8042_unlock_chip(void) +static inline void i8042_unlock_chip(void) { } -int i8042_command(unsigned char *param, int command) +static inline int i8042_command(unsigned char *param, int command) { return -ENODEV; } -bool i8042_check_port_owner(const struct serio *serio) +static inline bool i8042_check_port_owner(const struct serio *serio) { return false; } -int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, +static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, struct serio *serio)) { return -ENODEV; } -int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, +static inline int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, struct serio *serio)) { return -ENODEV; diff --git a/include/linux/ide.h b/include/linux/ide.h index 7b02aa5ce9b4..072fe8c93e6f 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -458,7 +458,7 @@ enum { IDE_DFLAG_DOORLOCKING = (1 << 15), /* disallow DMA */ IDE_DFLAG_NODMA = (1 << 16), - /* powermanagment told us not to do anything, so sleep nicely */ + /* powermanagement told us not to do anything, so sleep nicely */ IDE_DFLAG_BLOCKED = (1 << 17), /* sleeping & sleep field valid */ IDE_DFLAG_SLEEPING = (1 << 18), diff --git a/include/linux/if.h b/include/linux/if.h index be350e62a905..53558ec59e1b 100644 --- a/include/linux/if.h +++ b/include/linux/if.h @@ -73,6 +73,8 @@ #define IFF_DONT_BRIDGE 0x800 /* disallow bridging this ether dev */ #define IFF_IN_NETPOLL 0x1000 /* whether we are processing netpoll */ #define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */ +#define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */ +#define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */ #define IF_GET_IFACE 0x0001 /* for querying only */ #define IF_GET_PROTO 0x0002 diff --git a/include/linux/if_bonding.h b/include/linux/if_bonding.h index cd525fae3c98..2c7994372bde 100644 --- a/include/linux/if_bonding.h +++ b/include/linux/if_bonding.h @@ -83,6 +83,7 @@ #define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */ +#define BOND_DEFAULT_TX_QUEUES 16 /* Default number of tx queues per device */ /* hashing types */ #define BOND_XMIT_POLICY_LAYER2 0 /* layer 2 (MAC only), default */ #define BOND_XMIT_POLICY_LAYER34 1 /* layer 3+4 (IP ^ (TCP || UDP)) */ diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 938b7e81df95..0d241a5c4909 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -102,8 +102,6 @@ struct __fdb_entry { #include <linux/netdevice.h> extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); -extern struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, - struct sk_buff *skb); extern int (*br_should_route_hook)(struct sk_buff *skb); #endif diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index bed7a4682b90..c831467774d0 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -119,7 +119,7 @@ struct ethhdr { unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ unsigned char h_source[ETH_ALEN]; /* source ether addr */ __be16 h_proto; /* packet type ID field */ -} __attribute__((packed)); +} __packed; #ifdef __KERNEL__ #include <linux/skbuff.h> diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h index 5459c5c09930..9947c39e62f6 100644 --- a/include/linux/if_fddi.h +++ b/include/linux/if_fddi.h @@ -67,7 +67,7 @@ struct fddi_8022_1_hdr { __u8 dsap; /* destination service access point */ __u8 ssap; /* source service access point */ __u8 ctrl; /* control byte #1 */ -} __attribute__ ((packed)); +} __packed; /* Define 802.2 Type 2 header */ struct fddi_8022_2_hdr { @@ -75,7 +75,7 @@ struct fddi_8022_2_hdr { __u8 ssap; /* source service access point */ __u8 ctrl_1; /* control byte #1 */ __u8 ctrl_2; /* control byte #2 */ -} __attribute__ ((packed)); +} __packed; /* Define 802.2 SNAP header */ #define FDDI_K_OUI_LEN 3 @@ -85,7 +85,7 @@ struct fddi_snap_hdr { __u8 ctrl; /* always 0x03 */ __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */ __be16 ethertype; /* packet type ID field */ -} __attribute__ ((packed)); +} __packed; /* Define FDDI LLC frame header */ struct fddihdr { @@ -98,7 +98,7 @@ struct fddihdr { struct fddi_8022_2_hdr llc_8022_2; struct fddi_snap_hdr llc_snap; } hdr; -} __attribute__ ((packed)); +} __packed; #ifdef __KERNEL__ #include <linux/netdevice.h> diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h index 80b3a1056a5f..191ee0869bc1 100644 --- a/include/linux/if_frad.h +++ b/include/linux/if_frad.h @@ -135,7 +135,7 @@ struct frhdr __be16 PID; #define IP_NLPID pad -} __attribute__((packed)); +} __packed; /* see RFC 1490 for the definition of the following */ #define FRAD_I_UI 0x03 diff --git a/include/linux/if_hippi.h b/include/linux/if_hippi.h index 8d038eb8db5c..5fe5f307c6f5 100644 --- a/include/linux/if_hippi.h +++ b/include/linux/if_hippi.h @@ -104,7 +104,7 @@ struct hippi_fp_hdr { __be32 fixed; #endif __be32 d2_size; -} __attribute__ ((packed)); +} __packed; struct hippi_le_hdr { #if defined (__BIG_ENDIAN_BITFIELD) @@ -129,7 +129,7 @@ struct hippi_le_hdr { __u8 daddr[HIPPI_ALEN]; __u16 locally_administered; __u8 saddr[HIPPI_ALEN]; -} __attribute__ ((packed)); +} __packed; #define HIPPI_OUI_LEN 3 /* @@ -142,12 +142,12 @@ struct hippi_snap_hdr { __u8 ctrl; /* always 0x03 */ __u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/ __be16 ethertype; /* packet type ID field */ -} __attribute__ ((packed)); +} __packed; struct hippi_hdr { struct hippi_fp_hdr fp; struct hippi_le_hdr le; struct hippi_snap_hdr snap; -} __attribute__ ((packed)); +} __packed; #endif /* _LINUX_IF_HIPPI_H */ diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 85c812db5a3f..2fc66dd783ee 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -4,7 +4,7 @@ #include <linux/types.h> #include <linux/netlink.h> -/* The struct should be in sync with struct net_device_stats */ +/* This struct should be in sync with struct rtnl_link_stats64 */ struct rtnl_link_stats { __u32 rx_packets; /* total packets received */ __u32 tx_packets; /* total packets transmitted */ @@ -37,6 +37,7 @@ struct rtnl_link_stats { __u32 tx_compressed; }; +/* The main device statistics structure */ struct rtnl_link_stats64 { __u64 rx_packets; /* total packets received */ __u64 tx_packets; /* total packets transmitted */ @@ -233,7 +234,7 @@ enum macvlan_mode { MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */ }; -/* SR-IOV virtual function managment section */ +/* SR-IOV virtual function management section */ enum { IFLA_VF_INFO_UNSPEC, diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 9ea047aca795..35280b302290 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -6,6 +6,7 @@ #include <linux/netdevice.h> #include <linux/netlink.h> #include <net/netlink.h> +#include <linux/u64_stats_sync.h> #if defined(CONFIG_MACVTAP) || defined(CONFIG_MACVTAP_MODULE) struct socket *macvtap_get_socket(struct file *); @@ -27,14 +28,16 @@ struct macvtap_queue; * struct macvlan_rx_stats - MACVLAN percpu rx stats * @rx_packets: number of received packets * @rx_bytes: number of received bytes - * @multicast: number of received multicast packets + * @rx_multicast: number of received multicast packets + * @syncp: synchronization point for 64bit counters * @rx_errors: number of errors */ struct macvlan_rx_stats { - unsigned long rx_packets; - unsigned long rx_bytes; - unsigned long multicast; - unsigned long rx_errors; + u64 rx_packets; + u64 rx_bytes; + u64 rx_multicast; + struct u64_stats_sync syncp; + unsigned long rx_errors; }; struct macvlan_dev { @@ -56,17 +59,21 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan, { struct macvlan_rx_stats *rx_stats; - rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id()); + rx_stats = this_cpu_ptr(vlan->rx_stats); if (likely(success)) { + u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++;; rx_stats->rx_bytes += len; if (multicast) - rx_stats->multicast++; + rx_stats->rx_multicast++; + u64_stats_update_end(&rx_stats->syncp); } else { rx_stats->rx_errors++; } } +extern void macvlan_common_setup(struct net_device *dev); + extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], int (*receive)(struct sk_buff *skb), @@ -84,8 +91,4 @@ extern int macvlan_link_register(struct rtnl_link_ops *ops); extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, struct net_device *dev); - -extern struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *, - struct sk_buff *); - #endif /* _LINUX_IF_MACVLAN_H */ diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h index 6ac23ef1801a..72bfa5a034dd 100644 --- a/include/linux/if_packet.h +++ b/include/linux/if_packet.h @@ -48,6 +48,7 @@ struct sockaddr_ll { #define PACKET_LOSS 14 #define PACKET_VNET_HDR 15 #define PACKET_TX_TIMESTAMP 16 +#define PACKET_TIMESTAMP 17 struct tpacket_stats { unsigned int tp_packets; diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index a6577af0c4e6..1925e0c3f162 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -59,7 +59,7 @@ struct sockaddr_pppox { union{ struct pppoe_addr pppoe; }sa_addr; -}__attribute__ ((packed)); +} __packed; /* The use of the above union isn't viable because the size of this * struct must stay fixed over time -- applications use sizeof(struct @@ -70,7 +70,7 @@ struct sockaddr_pppol2tp { sa_family_t sa_family; /* address family, AF_PPPOX */ unsigned int sa_protocol; /* protocol identifier */ struct pppol2tp_addr pppol2tp; -}__attribute__ ((packed)); +} __packed; /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32 * bits. So we need a different sockaddr structure. @@ -79,7 +79,7 @@ struct sockaddr_pppol2tpv3 { sa_family_t sa_family; /* address family, AF_PPPOX */ unsigned int sa_protocol; /* protocol identifier */ struct pppol2tpv3_addr pppol2tp; -} __attribute__ ((packed)); +} __packed; /********************************************************************* * @@ -129,7 +129,7 @@ struct pppoe_hdr { __be16 sid; __be16 length; struct pppoe_tag tag[0]; -} __attribute__ ((packed)); +} __packed; /* Length of entire PPPoE + PPP header */ #define PPPOE_SES_HLEN 8 diff --git a/include/linux/in.h b/include/linux/in.h index 583c76f9c30f..41d88a4689af 100644 --- a/include/linux/in.h +++ b/include/linux/in.h @@ -85,6 +85,7 @@ struct in_addr { #define IP_RECVORIGDSTADDR IP_ORIGDSTADDR #define IP_MINTTL 21 +#define IP_NODEFRAG 22 /* IP_MTU_DISCOVER values */ #define IP_PMTUDISC_DONT 0 /* Never send DF frames */ diff --git a/include/linux/input.h b/include/linux/input.h index 6fcc9101beeb..339d043ccb53 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -691,9 +691,12 @@ struct input_absinfo { #define ABS_TILT_X 0x1a #define ABS_TILT_Y 0x1b #define ABS_TOOL_WIDTH 0x1c + #define ABS_VOLUME 0x20 + #define ABS_MISC 0x28 +#define ABS_MT_SLOT 0x2f /* MT slot being modified */ #define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */ #define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */ #define ABS_MT_WIDTH_MAJOR 0x32 /* Major axis of approaching ellipse */ @@ -706,6 +709,12 @@ struct input_absinfo { #define ABS_MT_TRACKING_ID 0x39 /* Unique ID of initiated contact */ #define ABS_MT_PRESSURE 0x3a /* Pressure on contact area */ +#ifdef __KERNEL__ +/* Implementation details, userspace should not care about these */ +#define ABS_MT_FIRST ABS_MT_TOUCH_MAJOR +#define ABS_MT_LAST ABS_MT_PRESSURE +#endif + #define ABS_MAX 0x3f #define ABS_CNT (ABS_MAX+1) @@ -1048,6 +1057,14 @@ struct ff_effect { #include <linux/mod_devicetable.h> /** + * struct input_mt_slot - represents the state of an input MT slot + * @abs: holds current values of ABS_MT axes for this slot + */ +struct input_mt_slot { + int abs[ABS_MT_LAST - ABS_MT_FIRST + 1]; +}; + +/** * struct input_dev - represents an input device * @name: name of the device * @phys: physical path to the device in the system hierarchy @@ -1063,6 +1080,10 @@ struct ff_effect { * @sndbit: bitmap of sound effects supported by the device * @ffbit: bitmap of force feedback effects supported by the device * @swbit: bitmap of switches present on the device + * @hint_events_per_packet: average number of events generated by the + * device in a packet (between EV_SYN/SYN_REPORT events). Used by + * event handlers to estimate size of the buffer needed to hold + * events. * @keycodemax: size of keycode table * @keycodesize: size of elements in keycode table * @keycode: map of scancodes to keycodes for this device @@ -1078,9 +1099,12 @@ struct ff_effect { * @repeat_key: stores key code of the last key pressed; used to implement * software autorepeat * @timer: timer for software autorepeat - * @sync: set to 1 when there were no new events since last EV_SYNC * @abs: current values for reports from absolute axes * @rep: current values for autorepeat parameters (delay, rate) + * @mt: pointer to array of struct input_mt_slot holding current values + * of tracked contacts + * @mtsize: number of MT slots the device uses + * @slot: MT slot currently being transmitted * @key: reflects current state of device's keys/buttons * @led: reflects current state of device's LEDs * @snd: reflects current state of sound effects @@ -1119,6 +1143,7 @@ struct ff_effect { * last user closes the device * @going_away: marks devices that are in a middle of unregistering and * causes input_open_device*() fail with -ENODEV. + * @sync: set to %true when there were no new events since last EV_SYN * @dev: driver model's view of this device * @h_list: list of input handles associated with the device. When * accessing the list dev->mutex must be held @@ -1140,6 +1165,8 @@ struct input_dev { unsigned long ffbit[BITS_TO_LONGS(FF_CNT)]; unsigned long swbit[BITS_TO_LONGS(SW_CNT)]; + unsigned int hint_events_per_packet; + unsigned int keycodemax; unsigned int keycodesize; void *keycode; @@ -1153,11 +1180,13 @@ struct input_dev { unsigned int repeat_key; struct timer_list timer; - int sync; - int abs[ABS_CNT]; int rep[REP_MAX + 1]; + struct input_mt_slot *mt; + int mtsize; + int slot; + unsigned long key[BITS_TO_LONGS(KEY_CNT)]; unsigned long led[BITS_TO_LONGS(LED_CNT)]; unsigned long snd[BITS_TO_LONGS(SND_CNT)]; @@ -1182,6 +1211,8 @@ struct input_dev { unsigned int users; bool going_away; + bool sync; + struct device dev; struct list_head h_list; @@ -1406,8 +1437,28 @@ static inline void input_mt_sync(struct input_dev *dev) input_event(dev, EV_SYN, SYN_MT_REPORT, 0); } +static inline void input_mt_slot(struct input_dev *dev, int slot) +{ + input_event(dev, EV_ABS, ABS_MT_SLOT, slot); +} + void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code); +/** + * input_set_events_per_packet - tell handlers about the driver event rate + * @dev: the input device used by the driver + * @n_events: the average number of events between calls to input_sync() + * + * If the event rate sent from a device is unusually large, use this + * function to set the expected event rate. This will allow handlers + * to set up an appropriate buffer size for the event stream, in order + * to minimize information loss. + */ +static inline void input_set_events_per_packet(struct input_dev *dev, int n_events) +{ + dev->hint_events_per_packet = n_events; +} + static inline void input_set_abs_params(struct input_dev *dev, int axis, int min, int max, int fuzz, int flat) { dev->absmin[axis] = min; @@ -1485,5 +1536,8 @@ int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file); int input_ff_create_memless(struct input_dev *dev, void *data, int (*play_effect)(struct input_dev *, void *, struct ff_effect *)); +int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots); +void input_mt_destroy_slots(struct input_dev *dev); + #endif #endif diff --git a/include/linux/input/adxl34x.h b/include/linux/input/adxl34x.h new file mode 100644 index 000000000000..df00d998a44a --- /dev/null +++ b/include/linux/input/adxl34x.h @@ -0,0 +1,349 @@ +/* + * include/linux/input/adxl34x.h + * + * Digital Accelerometer characteristics are highly application specific + * and may vary between boards and models. The platform_data for the + * device's "struct device" holds this information. + * + * Copyright 2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_INPUT_ADXL34X_H__ +#define __LINUX_INPUT_ADXL34X_H__ + +struct adxl34x_platform_data { + + /* + * X,Y,Z Axis Offset: + * offer user offset adjustments in twoscompliment + * form with a scale factor of 15.6 mg/LSB (i.e. 0x7F = +2 g) + */ + + s8 x_axis_offset; + s8 y_axis_offset; + s8 z_axis_offset; + + /* + * TAP_X/Y/Z Enable: Setting TAP_X, Y, or Z Enable enables X, + * Y, or Z participation in Tap detection. A '0' excludes the + * selected axis from participation in Tap detection. + * Setting the SUPPRESS bit suppresses Double Tap detection if + * acceleration greater than tap_threshold is present between + * taps. + */ + +#define ADXL_SUPPRESS (1 << 3) +#define ADXL_TAP_X_EN (1 << 2) +#define ADXL_TAP_Y_EN (1 << 1) +#define ADXL_TAP_Z_EN (1 << 0) + + u8 tap_axis_control; + + /* + * tap_threshold: + * holds the threshold value for tap detection/interrupts. + * The data format is unsigned. The scale factor is 62.5 mg/LSB + * (i.e. 0xFF = +16 g). A zero value may result in undesirable + * behavior if Tap/Double Tap is enabled. + */ + + u8 tap_threshold; + + /* + * tap_duration: + * is an unsigned time value representing the maximum + * time that an event must be above the tap_threshold threshold + * to qualify as a tap event. The scale factor is 625 us/LSB. A zero + * value will prevent Tap/Double Tap functions from working. + */ + + u8 tap_duration; + + /* + * tap_latency: + * is an unsigned time value representing the wait time + * from the detection of a tap event to the opening of the time + * window tap_window for a possible second tap event. The scale + * factor is 1.25 ms/LSB. A zero value will disable the Double Tap + * function. + */ + + u8 tap_latency; + + /* + * tap_window: + * is an unsigned time value representing the amount + * of time after the expiration of tap_latency during which a second + * tap can begin. The scale factor is 1.25 ms/LSB. A zero value will + * disable the Double Tap function. + */ + + u8 tap_window; + + /* + * act_axis_control: + * X/Y/Z Enable: A '1' enables X, Y, or Z participation in activity + * or inactivity detection. A '0' excludes the selected axis from + * participation. If all of the axes are excluded, the function is + * disabled. + * AC/DC: A '0' = DC coupled operation and a '1' = AC coupled + * operation. In DC coupled operation, the current acceleration is + * compared with activity_threshold and inactivity_threshold directly + * to determine whether activity or inactivity is detected. In AC + * coupled operation for activity detection, the acceleration value + * at the start of activity detection is taken as a reference value. + * New samples of acceleration are then compared to this + * reference value and if the magnitude of the difference exceeds + * activity_threshold the device will trigger an activity interrupt. In + * AC coupled operation for inactivity detection, a reference value + * is used again for comparison and is updated whenever the + * device exceeds the inactivity threshold. Once the reference + * value is selected, the device compares the magnitude of the + * difference between the reference value and the current + * acceleration with inactivity_threshold. If the difference is below + * inactivity_threshold for a total of inactivity_time, the device is + * considered inactive and the inactivity interrupt is triggered. + */ + +#define ADXL_ACT_ACDC (1 << 7) +#define ADXL_ACT_X_EN (1 << 6) +#define ADXL_ACT_Y_EN (1 << 5) +#define ADXL_ACT_Z_EN (1 << 4) +#define ADXL_INACT_ACDC (1 << 3) +#define ADXL_INACT_X_EN (1 << 2) +#define ADXL_INACT_Y_EN (1 << 1) +#define ADXL_INACT_Z_EN (1 << 0) + + u8 act_axis_control; + + /* + * activity_threshold: + * holds the threshold value for activity detection. + * The data format is unsigned. The scale factor is + * 62.5 mg/LSB. A zero value may result in undesirable behavior if + * Activity interrupt is enabled. + */ + + u8 activity_threshold; + + /* + * inactivity_threshold: + * holds the threshold value for inactivity + * detection. The data format is unsigned. The scale + * factor is 62.5 mg/LSB. A zero value may result in undesirable + * behavior if Inactivity interrupt is enabled. + */ + + u8 inactivity_threshold; + + /* + * inactivity_time: + * is an unsigned time value representing the + * amount of time that acceleration must be below the value in + * inactivity_threshold for inactivity to be declared. The scale factor + * is 1 second/LSB. Unlike the other interrupt functions, which + * operate on unfiltered data, the inactivity function operates on the + * filtered output data. At least one output sample must be + * generated for the inactivity interrupt to be triggered. This will + * result in the function appearing un-responsive if the + * inactivity_time register is set with a value less than the time + * constant of the Output Data Rate. A zero value will result in an + * interrupt when the output data is below inactivity_threshold. + */ + + u8 inactivity_time; + + /* + * free_fall_threshold: + * holds the threshold value for Free-Fall detection. + * The data format is unsigned. The root-sum-square(RSS) value + * of all axes is calculated and compared to the value in + * free_fall_threshold to determine if a free fall event may be + * occurring. The scale factor is 62.5 mg/LSB. A zero value may + * result in undesirable behavior if Free-Fall interrupt is + * enabled. Values between 300 and 600 mg (0x05 to 0x09) are + * recommended. + */ + + u8 free_fall_threshold; + + /* + * free_fall_time: + * is an unsigned time value representing the minimum + * time that the RSS value of all axes must be less than + * free_fall_threshold to generate a Free-Fall interrupt. The + * scale factor is 5 ms/LSB. A zero value may result in + * undesirable behavior if Free-Fall interrupt is enabled. + * Values between 100 to 350 ms (0x14 to 0x46) are recommended. + */ + + u8 free_fall_time; + + /* + * data_rate: + * Selects device bandwidth and output data rate. + * RATE = 3200 Hz / (2^(15 - x)). Default value is 0x0A, or 100 Hz + * Output Data Rate. An Output Data Rate should be selected that + * is appropriate for the communication protocol and frequency + * selected. Selecting too high of an Output Data Rate with a low + * communication speed will result in samples being discarded. + */ + + u8 data_rate; + + /* + * data_range: + * FULL_RES: When this bit is set with the device is + * in Full-Resolution Mode, where the output resolution increases + * with RANGE to maintain a 4 mg/LSB scale factor. When this + * bit is cleared the device is in 10-bit Mode and RANGE determine the + * maximum g-Range and scale factor. + */ + +#define ADXL_FULL_RES (1 << 3) +#define ADXL_RANGE_PM_2g 0 +#define ADXL_RANGE_PM_4g 1 +#define ADXL_RANGE_PM_8g 2 +#define ADXL_RANGE_PM_16g 3 + + u8 data_range; + + /* + * low_power_mode: + * A '0' = Normal operation and a '1' = Reduced + * power operation with somewhat higher noise. + */ + + u8 low_power_mode; + + /* + * power_mode: + * LINK: A '1' with both the activity and inactivity functions + * enabled will delay the start of the activity function until + * inactivity is detected. Once activity is detected, inactivity + * detection will begin and prevent the detection of activity. This + * bit serially links the activity and inactivity functions. When '0' + * the inactivity and activity functions are concurrent. Additional + * information can be found in the Application section under Link + * Mode. + * AUTO_SLEEP: A '1' sets the ADXL34x to switch to Sleep Mode + * when inactivity (acceleration has been below inactivity_threshold + * for at least inactivity_time) is detected and the LINK bit is set. + * A '0' disables automatic switching to Sleep Mode. See SLEEP + * for further description. + */ + +#define ADXL_LINK (1 << 5) +#define ADXL_AUTO_SLEEP (1 << 4) + + u8 power_mode; + + /* + * fifo_mode: + * BYPASS The FIFO is bypassed + * FIFO FIFO collects up to 32 values then stops collecting data + * STREAM FIFO holds the last 32 data values. Once full, the FIFO's + * oldest data is lost as it is replaced with newer data + * + * DEFAULT should be ADXL_FIFO_STREAM + */ + +#define ADXL_FIFO_BYPASS 0 +#define ADXL_FIFO_FIFO 1 +#define ADXL_FIFO_STREAM 2 + + u8 fifo_mode; + + /* + * watermark: + * The Watermark feature can be used to reduce the interrupt load + * of the system. The FIFO fills up to the value stored in watermark + * [1..32] and then generates an interrupt. + * A '0' disables the watermark feature. + */ + + u8 watermark; + + u32 ev_type; /* EV_ABS or EV_REL */ + + u32 ev_code_x; /* ABS_X,Y,Z or REL_X,Y,Z */ + u32 ev_code_y; /* ABS_X,Y,Z or REL_X,Y,Z */ + u32 ev_code_z; /* ABS_X,Y,Z or REL_X,Y,Z */ + + /* + * A valid BTN or KEY Code; use tap_axis_control to disable + * event reporting + */ + + u32 ev_code_tap[3]; /* EV_KEY {X-Axis, Y-Axis, Z-Axis} */ + + /* + * A valid BTN or KEY Code for Free-Fall or Activity enables + * input event reporting. A '0' disables the Free-Fall or + * Activity reporting. + */ + + u32 ev_code_ff; /* EV_KEY */ + u32 ev_code_act_inactivity; /* EV_KEY */ + + /* + * Use ADXL34x INT2 instead of INT1 + */ + u8 use_int2; + + /* + * ADXL346 only ORIENTATION SENSING feature + * The orientation function of the ADXL346 reports both 2-D and + * 3-D orientation concurrently. + */ + +#define ADXL_EN_ORIENTATION_2D 1 +#define ADXL_EN_ORIENTATION_3D 2 +#define ADXL_EN_ORIENTATION_2D_3D 3 + + u8 orientation_enable; + + /* + * The width of the deadzone region between two or more + * orientation positions is determined by setting the Deadzone + * value. The deadzone region size can be specified with a + * resolution of 3.6deg. The deadzone angle represents the total + * angle where the orientation is considered invalid. + */ + +#define ADXL_DEADZONE_ANGLE_0p0 0 /* !!!0.0 [deg] */ +#define ADXL_DEADZONE_ANGLE_3p6 1 /* 3.6 [deg] */ +#define ADXL_DEADZONE_ANGLE_7p2 2 /* 7.2 [deg] */ +#define ADXL_DEADZONE_ANGLE_10p8 3 /* 10.8 [deg] */ +#define ADXL_DEADZONE_ANGLE_14p4 4 /* 14.4 [deg] */ +#define ADXL_DEADZONE_ANGLE_18p0 5 /* 18.0 [deg] */ +#define ADXL_DEADZONE_ANGLE_21p6 6 /* 21.6 [deg] */ +#define ADXL_DEADZONE_ANGLE_25p2 7 /* 25.2 [deg] */ + + u8 deadzone_angle; + + /* + * To eliminate most human motion such as walking or shaking, + * a Divisor value should be selected to effectively limit the + * orientation bandwidth. Set the depth of the filter used to + * low-pass filter the measured acceleration for stable + * orientation sensing + */ + +#define ADXL_LP_FILTER_DIVISOR_2 0 +#define ADXL_LP_FILTER_DIVISOR_4 1 +#define ADXL_LP_FILTER_DIVISOR_8 2 +#define ADXL_LP_FILTER_DIVISOR_16 3 +#define ADXL_LP_FILTER_DIVISOR_32 4 +#define ADXL_LP_FILTER_DIVISOR_64 5 +#define ADXL_LP_FILTER_DIVISOR_128 6 +#define ADXL_LP_FILTER_DIVISOR_256 7 + + u8 divisor_length; + + u32 ev_codes_orient_2d[4]; /* EV_KEY {+X, -X, +Y, -Y} */ + u32 ev_codes_orient_3d[6]; /* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */ +}; +#endif diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h new file mode 100644 index 000000000000..09522cb59910 --- /dev/null +++ b/include/linux/input/cy8ctmg110_pdata.h @@ -0,0 +1,10 @@ +#ifndef _LINUX_CY8CTMG110_PDATA_H +#define _LINUX_CY8CTMG110_PDATA_H + +struct cy8ctmg110_pdata +{ + int reset_pin; /* Reset pin is wired to this GPIO (optional) */ + int irq_pin; /* IRQ pin is wired to this GPIO */ +}; + +#endif diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h index c964cd7f436a..80352ad6581a 100644 --- a/include/linux/input/matrix_keypad.h +++ b/include/linux/input/matrix_keypad.h @@ -41,6 +41,9 @@ struct matrix_keymap_data { * @col_scan_delay_us: delay, measured in microseconds, that is * needed before we can keypad after activating column gpio * @debounce_ms: debounce interval in milliseconds + * @clustered_irq: may be specified if interrupts of all row/column GPIOs + * are bundled to one single irq + * @clustered_irq_flags: flags that are needed for the clustered irq * @active_low: gpio polarity * @wakeup: controls whether the device should be set up as wakeup * source @@ -63,6 +66,9 @@ struct matrix_keypad_platform_data { /* key debounce interval in milli-second */ unsigned int debounce_ms; + unsigned int clustered_irq; + unsigned int clustered_irq_flags; + bool active_low; bool wakeup; bool no_autorepeat; diff --git a/include/linux/intel_pmic_gpio.h b/include/linux/intel_pmic_gpio.h new file mode 100644 index 000000000000..920109a29191 --- /dev/null +++ b/include/linux/intel_pmic_gpio.h @@ -0,0 +1,15 @@ +#ifndef LINUX_INTEL_PMIC_H +#define LINUX_INTEL_PMIC_H + +struct intel_pmic_gpio_platform_data { + /* the first IRQ of the chip */ + unsigned irq_base; + /* number assigned to the first GPIO */ + unsigned gpio_base; + /* sram address for gpiointr register, the langwell chip will map + * the PMIC spi GPIO expander's GPIOINTR register in sram. + */ + unsigned gpiointr; +}; + +#endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c2331138ca1b..a0384a4d1e6f 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -53,16 +53,21 @@ * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. * Used by threaded interrupts which need to keep the * irq line disabled until the threaded handler has been run. + * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend + * */ #define IRQF_DISABLED 0x00000020 #define IRQF_SAMPLE_RANDOM 0x00000040 #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 -#define IRQF_TIMER 0x00000200 +#define __IRQF_TIMER 0x00000200 #define IRQF_PERCPU 0x00000400 #define IRQF_NOBALANCING 0x00000800 #define IRQF_IRQPOLL 0x00001000 #define IRQF_ONESHOT 0x00002000 +#define IRQF_NO_SUSPEND 0x00004000 + +#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) /* * Bits used by threaded handlers: diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 25085ddd955f..e0ea40f6c515 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -79,7 +79,9 @@ io_mapping_free(struct io_mapping *mapping) /* Atomic map/unmap */ static inline void * -io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) +io_mapping_map_atomic_wc(struct io_mapping *mapping, + unsigned long offset, + int slot) { resource_size_t phys_addr; unsigned long pfn; @@ -87,13 +89,13 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) BUG_ON(offset >= mapping->size); phys_addr = mapping->base + offset; pfn = (unsigned long) (phys_addr >> PAGE_SHIFT); - return iomap_atomic_prot_pfn(pfn, KM_USER0, mapping->prot); + return iomap_atomic_prot_pfn(pfn, slot, mapping->prot); } static inline void -io_mapping_unmap_atomic(void *vaddr) +io_mapping_unmap_atomic(void *vaddr, int slot) { - iounmap_atomic(vaddr, KM_USER0); + iounmap_atomic(vaddr, slot); } static inline void * @@ -133,13 +135,15 @@ io_mapping_free(struct io_mapping *mapping) /* Atomic map/unmap */ static inline void * -io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) +io_mapping_map_atomic_wc(struct io_mapping *mapping, + unsigned long offset, + int slot) { return ((char *) mapping) + offset; } static inline void -io_mapping_unmap_atomic(void *vaddr) +io_mapping_unmap_atomic(void *vaddr, int slot) { } diff --git a/include/linux/io.h b/include/linux/io.h index 6c7f0ba0d5fa..7fd2d2138bf3 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -29,10 +29,10 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count); #ifdef CONFIG_MMU int ioremap_page_range(unsigned long addr, unsigned long end, - unsigned long phys_addr, pgprot_t prot); + phys_addr_t phys_addr, pgprot_t prot); #else static inline int ioremap_page_range(unsigned long addr, unsigned long end, - unsigned long phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot) { return 0; } diff --git a/include/linux/iommu.h b/include/linux/iommu.h index be22ad83689c..0a2ba4098996 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -30,6 +30,7 @@ struct iommu_domain { }; #define IOMMU_CAP_CACHE_COHERENCY 0x1 +#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ struct iommu_ops { int (*domain_init)(struct iommu_domain *domain); diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h index dfc170362842..9708de265bb1 100644 --- a/include/linux/ip_vs.h +++ b/include/linux/ip_vs.h @@ -19,6 +19,7 @@ */ #define IP_VS_SVC_F_PERSISTENT 0x0001 /* persistent port */ #define IP_VS_SVC_F_HASHED 0x0002 /* hashed entry */ +#define IP_VS_SVC_F_ONEPACKET 0x0004 /* one-packet scheduling */ /* * Destination Server Flags @@ -85,6 +86,7 @@ #define IP_VS_CONN_F_SEQ_MASK 0x0600 /* in/out sequence mask */ #define IP_VS_CONN_F_NO_CPORT 0x0800 /* no client port set yet */ #define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ +#define IP_VS_CONN_F_ONE_PACKET 0x2000 /* forward only one packet */ #define IP_VS_SCHEDNAME_MAXLEN 16 #define IP_VS_IFNAME_MAXLEN 16 diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 99e1ab7e3eec..ab9e9e89e407 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -58,7 +58,7 @@ struct ipv6_opt_hdr { /* * TLV encoded option data follows. */ -} __attribute__ ((packed)); /* required for some archs */ +} __packed; /* required for some archs */ #define ipv6_destopt_hdr ipv6_opt_hdr #define ipv6_hopopt_hdr ipv6_opt_hdr @@ -99,7 +99,7 @@ struct ipv6_destopt_hao { __u8 type; __u8 length; struct in6_addr addr; -} __attribute__ ((__packed__)); +} __packed; /* * IPv6 fixed header @@ -246,7 +246,7 @@ struct inet6_skb_parm { __u16 srcrt; __u16 dst1; __u16 lastopt; - __u32 nhoff; + __u16 nhoff; __u16 flags; #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) __u16 dsthao; diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h new file mode 100644 index 000000000000..f1e6c184f14f --- /dev/null +++ b/include/linux/iscsi_boot_sysfs.h @@ -0,0 +1,123 @@ +/* + * Export the iSCSI boot info to userland via sysfs. + * + * Copyright (C) 2010 Red Hat, Inc. All rights reserved. + * Copyright (C) 2010 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _ISCSI_BOOT_SYSFS_ +#define _ISCSI_BOOT_SYSFS_ + +/* + * The text attributes names for each of the kobjects. +*/ +enum iscsi_boot_eth_properties_enum { + ISCSI_BOOT_ETH_INDEX, + ISCSI_BOOT_ETH_FLAGS, + ISCSI_BOOT_ETH_IP_ADDR, + ISCSI_BOOT_ETH_SUBNET_MASK, + ISCSI_BOOT_ETH_ORIGIN, + ISCSI_BOOT_ETH_GATEWAY, + ISCSI_BOOT_ETH_PRIMARY_DNS, + ISCSI_BOOT_ETH_SECONDARY_DNS, + ISCSI_BOOT_ETH_DHCP, + ISCSI_BOOT_ETH_VLAN, + ISCSI_BOOT_ETH_MAC, + /* eth_pci_bdf - this is replaced by link to the device itself. */ + ISCSI_BOOT_ETH_HOSTNAME, + ISCSI_BOOT_ETH_END_MARKER, +}; + +enum iscsi_boot_tgt_properties_enum { + ISCSI_BOOT_TGT_INDEX, + ISCSI_BOOT_TGT_FLAGS, + ISCSI_BOOT_TGT_IP_ADDR, + ISCSI_BOOT_TGT_PORT, + ISCSI_BOOT_TGT_LUN, + ISCSI_BOOT_TGT_CHAP_TYPE, + ISCSI_BOOT_TGT_NIC_ASSOC, + ISCSI_BOOT_TGT_NAME, + ISCSI_BOOT_TGT_CHAP_NAME, + ISCSI_BOOT_TGT_CHAP_SECRET, + ISCSI_BOOT_TGT_REV_CHAP_NAME, + ISCSI_BOOT_TGT_REV_CHAP_SECRET, + ISCSI_BOOT_TGT_END_MARKER, +}; + +enum iscsi_boot_initiator_properties_enum { + ISCSI_BOOT_INI_INDEX, + ISCSI_BOOT_INI_FLAGS, + ISCSI_BOOT_INI_ISNS_SERVER, + ISCSI_BOOT_INI_SLP_SERVER, + ISCSI_BOOT_INI_PRI_RADIUS_SERVER, + ISCSI_BOOT_INI_SEC_RADIUS_SERVER, + ISCSI_BOOT_INI_INITIATOR_NAME, + ISCSI_BOOT_INI_END_MARKER, +}; + +struct attribute_group; + +struct iscsi_boot_kobj { + struct kobject kobj; + struct attribute_group *attr_group; + struct list_head list; + + /* + * Pointer to store driver specific info. If set this will + * be freed for the LLD when the kobj release function is called. + */ + void *data; + /* + * Driver specific show function. + * + * The enum of the type. This can be any value of the above + * properties. + */ + ssize_t (*show) (void *data, int type, char *buf); + + /* + * Drivers specific visibility function. + * The function should return if they the attr should be readable + * writable or should not be shown. + * + * The enum of the type. This can be any value of the above + * properties. + */ + mode_t (*is_visible) (void *data, int type); +}; + +struct iscsi_boot_kset { + struct list_head kobj_list; + struct kset *kset; +}; + +struct iscsi_boot_kobj * +iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + mode_t (*is_visible) (void *data, int type)); + +struct iscsi_boot_kobj * +iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + mode_t (*is_visible) (void *data, int type)); +struct iscsi_boot_kobj * +iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + mode_t (*is_visible) (void *data, int type)); + +struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name); +struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno); +void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset); + +#endif diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h index d2e4042f8f5e..8ba7e5b9d62c 100644 --- a/include/linux/iscsi_ibft.h +++ b/include/linux/iscsi_ibft.h @@ -21,21 +21,13 @@ #ifndef ISCSI_IBFT_H #define ISCSI_IBFT_H -struct ibft_table_header { - char signature[4]; - u32 length; - u8 revision; - u8 checksum; - char oem_id[6]; - char oem_table_id[8]; - char reserved[24]; -} __attribute__((__packed__)); +#include <acpi/acpi.h> /* * Logical location of iSCSI Boot Format Table. * If the value is NULL there is no iBFT on the machine. */ -extern struct ibft_table_header *ibft_addr; +extern struct acpi_table_ibft *ibft_addr; /* * Routine used to find and reserve the iSCSI Boot Format Table. The diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h index b9b5a684ed69..b8c23f88dd54 100644 --- a/include/linux/isdnif.h +++ b/include/linux/isdnif.h @@ -317,7 +317,7 @@ typedef struct T30_s { __u8 r_scantime; __u8 r_id[FAXIDLEN]; __u8 r_code; -} __attribute__((packed)) T30_s; +} __packed T30_s; #define ISDN_TTY_FAX_CONN_IN 0 #define ISDN_TTY_FAX_CONN_OUT 1 diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index a4d2e9f7088a..0b52924a0cb6 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -601,13 +601,13 @@ struct transaction_s * Number of outstanding updates running on this transaction * [t_handle_lock] */ - int t_updates; + atomic_t t_updates; /* * Number of buffers reserved for use by all handles in this transaction * handle but not yet modified. [t_handle_lock] */ - int t_outstanding_credits; + atomic_t t_outstanding_credits; /* * Forward and backward links for the circular list of all transactions @@ -629,7 +629,7 @@ struct transaction_s /* * How many handles used this transaction? [t_handle_lock] */ - int t_handle_count; + atomic_t t_handle_count; /* * This transaction is being forced and some process is @@ -764,7 +764,7 @@ struct journal_s /* * Protect the various scalars in the journal */ - spinlock_t j_state_lock; + rwlock_t j_state_lock; /* * Number of processes waiting to create a barrier lock [j_state_lock] @@ -1026,11 +1026,12 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); struct jbd2_buffer_trigger_type { /* - * Fired just before a buffer is written to the journal. - * mapped_data is a mapped buffer that is the frozen data for - * commit. + * Fired a the moment data to write to the journal are known to be + * stable - so either at the moment b_frozen_data is created or just + * before a buffer is written to the journal. mapped_data is a mapped + * buffer that is the frozen data for commit. */ - void (*t_commit)(struct jbd2_buffer_trigger_type *type, + void (*t_frozen)(struct jbd2_buffer_trigger_type *type, struct buffer_head *bh, void *mapped_data, size_t size); @@ -1042,7 +1043,7 @@ struct jbd2_buffer_trigger_type { struct buffer_head *bh); }; -extern void jbd2_buffer_commit_trigger(struct journal_head *jh, +extern void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, struct jbd2_buffer_trigger_type *triggers); extern void jbd2_buffer_abort_trigger(struct journal_head *jh, @@ -1081,7 +1082,9 @@ static inline handle_t *journal_current_handle(void) */ extern handle_t *jbd2_journal_start(journal_t *, int nblocks); -extern int jbd2_journal_restart (handle_t *, int nblocks); +extern handle_t *jbd2__journal_start(journal_t *, int nblocks, int gfp_mask); +extern int jbd2_journal_restart(handle_t *, int nblocks); +extern int jbd2__journal_restart(handle_t *, int nblocks, int gfp_mask); extern int jbd2_journal_extend (handle_t *, int nblocks); extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *); extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *); @@ -1256,8 +1259,8 @@ static inline int jbd_space_needed(journal_t *journal) { int nblocks = journal->j_max_transaction_buffers; if (journal->j_committing_transaction) - nblocks += journal->j_committing_transaction-> - t_outstanding_credits; + nblocks += atomic_read(&journal->j_committing_transaction-> + t_outstanding_credits); return nblocks; } diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h index 0874ab59ffef..edb9231f1898 100644 --- a/include/linux/jffs2.h +++ b/include/linux/jffs2.h @@ -185,7 +185,7 @@ struct jffs2_raw_xref jint32_t hdr_crc; jint32_t ino; /* inode number */ jint32_t xid; /* XATTR identifier number */ - jint32_t xseqno; /* xref sequencial number */ + jint32_t xseqno; /* xref sequential number */ jint32_t node_crc; } __attribute__((packed)); diff --git a/include/linux/kdb.h b/include/linux/kdb.h index ccb2b3ec0fe8..ea6e5244ed3f 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -114,4 +114,8 @@ enum { KDB_INIT_EARLY, KDB_INIT_FULL, }; + +extern int kdbgetintenv(const char *, int *); +extern int kdb_set(int, const char **); + #endif /* !_KDB_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 8317ec4b9f3b..7d5b10ff63e0 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -171,6 +171,11 @@ static inline void might_fault(void) } #endif +struct va_format { + const char *fmt; + va_list *va; +}; + extern struct atomic_notifier_head panic_notifier_list; extern long (*panic_blink)(long time); NORET_TYPE void panic(const char * fmt, ...) @@ -247,6 +252,13 @@ extern struct pid *session_of_pgrp(struct pid *pgrp); #define FW_WARN "[Firmware Warn]: " #define FW_INFO "[Firmware Info]: " +/* + * HW_ERR + * Add this to a message for hardware errors, so that user can report + * it to hardware vendor instead of LKML or software vendor. + */ +#define HW_ERR "[Hardware Error]: " + #ifdef CONFIG_PRINTK asmlinkage int vprintk(const char *fmt, va_list args) __attribute__ ((format (printf, 1, 0))); @@ -508,9 +520,6 @@ extern void tracing_start(void); extern void tracing_stop(void); extern void ftrace_off_permanent(void); -extern void -ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); - static inline void __attribute__ ((format (printf, 1, 2))) ____trace_printk_check_format(const char *fmt, ...) { @@ -586,8 +595,6 @@ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); #else -static inline void -ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } static inline int trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); @@ -728,12 +735,6 @@ extern int do_sysinfo(struct sysinfo *info); #endif /* __KERNEL__ */ -#ifndef __EXPORTED_HEADERS__ -#ifndef __KERNEL__ -#warning Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders -#endif /* __KERNEL__ */ -#endif /* __EXPORTED_HEADERS__ */ - #define SI_LOAD_SHIFT 16 struct sysinfo { long uptime; /* Seconds since boot */ diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index 9340f34d1bb5..cc96f0f23e04 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -90,6 +90,19 @@ struct kgdb_bkpt { enum kgdb_bpstate state; }; +struct dbg_reg_def_t { + char *name; + int size; + int offset; +}; + +#ifndef DBG_MAX_REG_NUM +#define DBG_MAX_REG_NUM 0 +#else +extern struct dbg_reg_def_t dbg_reg_def[]; +extern char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs); +extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs); +#endif #ifndef KGDB_MAX_BREAKPOINTS # define KGDB_MAX_BREAKPOINTS 1000 #endif @@ -281,7 +294,7 @@ extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops); extern struct kgdb_io *dbg_io_ops; extern int kgdb_hex2long(char **ptr, unsigned long *long_val); -extern int kgdb_mem2hex(char *mem, char *buf, int count); +extern char *kgdb_mem2hex(char *mem, char *buf, int count); extern int kgdb_hex2mem(char *buf, char *mem, int count); extern int kgdb_isremovedbreak(unsigned long addr); diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h deleted file mode 100644 index b616d3930c3b..000000000000 --- a/include/linux/kmemtrace.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (C) 2008 Eduard - Gabriel Munteanu - * - * This file is released under GPL version 2. - */ - -#ifndef _LINUX_KMEMTRACE_H -#define _LINUX_KMEMTRACE_H - -#ifdef __KERNEL__ - -#include <trace/events/kmem.h> - -#ifdef CONFIG_KMEMTRACE -extern void kmemtrace_init(void); -#else -static inline void kmemtrace_init(void) -{ -} -#endif - -#endif /* __KERNEL__ */ - -#endif /* _LINUX_KMEMTRACE_H */ - diff --git a/include/linux/ks8842.h b/include/linux/ks8842.h index da0341b8ca0a..14ba4452296e 100644 --- a/include/linux/ks8842.h +++ b/include/linux/ks8842.h @@ -25,10 +25,14 @@ * struct ks8842_platform_data - Platform data of the KS8842 network driver * @macaddr: The MAC address of the device, set to all 0:s to use the on in * the chip. + * @rx_dma_channel: The DMA channel to use for RX, -1 for none. + * @tx_dma_channel: The DMA channel to use for TX, -1 for none. * */ struct ks8842_platform_data { u8 macaddr[ETH_ALEN]; + int rx_dma_channel; + int tx_dma_channel; }; #endif diff --git a/include/linux/kthread.h b/include/linux/kthread.h index aabc8a13ba71..685ea65eb803 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -30,8 +30,73 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), void kthread_bind(struct task_struct *k, unsigned int cpu); int kthread_stop(struct task_struct *k); int kthread_should_stop(void); +void *kthread_data(struct task_struct *k); int kthreadd(void *unused); extern struct task_struct *kthreadd_task; +/* + * Simple work processor based on kthread. + * + * This provides easier way to make use of kthreads. A kthread_work + * can be queued and flushed using queue/flush_kthread_work() + * respectively. Queued kthread_works are processed by a kthread + * running kthread_worker_fn(). + * + * A kthread_work can't be freed while it is executing. + */ +struct kthread_work; +typedef void (*kthread_work_func_t)(struct kthread_work *work); + +struct kthread_worker { + spinlock_t lock; + struct list_head work_list; + struct task_struct *task; +}; + +struct kthread_work { + struct list_head node; + kthread_work_func_t func; + wait_queue_head_t done; + atomic_t flushing; + int queue_seq; + int done_seq; +}; + +#define KTHREAD_WORKER_INIT(worker) { \ + .lock = SPIN_LOCK_UNLOCKED, \ + .work_list = LIST_HEAD_INIT((worker).work_list), \ + } + +#define KTHREAD_WORK_INIT(work, fn) { \ + .node = LIST_HEAD_INIT((work).node), \ + .func = (fn), \ + .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \ + .flushing = ATOMIC_INIT(0), \ + } + +#define DEFINE_KTHREAD_WORKER(worker) \ + struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) + +#define DEFINE_KTHREAD_WORK(work, fn) \ + struct kthread_work work = KTHREAD_WORK_INIT(work, fn) + +static inline void init_kthread_worker(struct kthread_worker *worker) +{ + *worker = (struct kthread_worker)KTHREAD_WORKER_INIT(*worker); +} + +static inline void init_kthread_work(struct kthread_work *work, + kthread_work_func_t fn) +{ + *work = (struct kthread_work)KTHREAD_WORK_INIT(*work, fn); +} + +int kthread_worker_fn(void *worker_ptr); + +bool queue_kthread_work(struct kthread_worker *worker, + struct kthread_work *work); +void flush_kthread_work(struct kthread_work *work); +void flush_kthread_worker(struct kthread_worker *worker); + #endif /* _LINUX_KTHREAD_H */ diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 23ea02253900..636fc381c897 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -524,6 +524,12 @@ struct kvm_enable_cap { #define KVM_CAP_PPC_OSI 52 #define KVM_CAP_PPC_UNSET_IRQ 53 #define KVM_CAP_ENABLE_CAP 54 +#ifdef __KVM_HAVE_XSAVE +#define KVM_CAP_XSAVE 55 +#endif +#ifdef __KVM_HAVE_XCRS +#define KVM_CAP_XCRS 56 +#endif #ifdef KVM_CAP_IRQ_ROUTING @@ -613,6 +619,7 @@ struct kvm_clock_data { */ #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) +/* KVM_SET_MEMORY_ALIAS is obsolete: */ #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) #define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44) #define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) @@ -714,6 +721,12 @@ struct kvm_clock_data { #define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs) #define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs) #define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap) +/* Available with KVM_CAP_XSAVE */ +#define KVM_GET_XSAVE _IOR(KVMIO, 0xa4, struct kvm_xsave) +#define KVM_SET_XSAVE _IOW(KVMIO, 0xa5, struct kvm_xsave) +/* Available with KVM_CAP_XCRS */ +#define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs) +#define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs) #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 7cb116afa1cd..c13cc48697aa 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -81,13 +81,14 @@ struct kvm_vcpu { int vcpu_id; struct mutex mutex; int cpu; + atomic_t guest_mode; struct kvm_run *run; unsigned long requests; unsigned long guest_debug; int srcu_idx; int fpu_active; - int guest_fpu_loaded; + int guest_fpu_loaded, guest_xcr0_loaded; wait_queue_head_t wq; int sigset_active; sigset_t sigset; @@ -123,6 +124,7 @@ struct kvm_memory_slot { } *lpage_info[KVM_NR_PAGE_SIZES - 1]; unsigned long userspace_addr; int user_alloc; + int id; }; static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) @@ -266,6 +268,8 @@ extern pfn_t bad_pfn; int is_error_page(struct page *page); int is_error_pfn(pfn_t pfn); +int is_hwpoison_pfn(pfn_t pfn); +int is_fault_pfn(pfn_t pfn); int kvm_is_error_hva(unsigned long addr); int kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, @@ -284,8 +288,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, int user_alloc); void kvm_disable_largepages(void); void kvm_arch_flush_shadow(struct kvm *kvm); -gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); -gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn); struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); @@ -445,7 +447,8 @@ void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, struct kvm_irq_mask_notifier *kimn); void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, struct kvm_irq_mask_notifier *kimn); -void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); +void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, + bool mask); #ifdef __KVM_HAVE_IOAPIC void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, @@ -562,10 +565,6 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se } #endif -#ifndef KVM_ARCH_HAS_UNALIAS_INSTANTIATION -#define unalias_gfn_instantiation unalias_gfn -#endif - #ifdef CONFIG_HAVE_KVM_IRQCHIP #define KVM_MAX_IRQ_ROUTES 1024 @@ -628,5 +627,25 @@ static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, #endif +static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) +{ + set_bit(req, &vcpu->requests); +} + +static inline bool kvm_make_check_request(int req, struct kvm_vcpu *vcpu) +{ + return test_and_set_bit(req, &vcpu->requests); +} + +static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) +{ + if (test_bit(req, &vcpu->requests)) { + clear_bit(req, &vcpu->requests); + return true; + } else { + return false; + } +} + #endif diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index fb46efbeabec..7ac0d4eee430 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -32,11 +32,11 @@ typedef unsigned long gva_t; typedef u64 gpa_t; -typedef unsigned long gfn_t; +typedef u64 gfn_t; typedef unsigned long hva_t; typedef u64 hpa_t; -typedef unsigned long hfn_t; +typedef u64 hfn_t; typedef hfn_t pfn_t; diff --git a/include/linux/libata.h b/include/linux/libata.h index b85f3ff34d7d..f010f18a0f86 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -751,6 +751,7 @@ struct ata_port { struct ata_host *host; struct device *dev; + struct mutex scsi_scan_mutex; struct delayed_work hotplug_task; struct work_struct scsi_rescan_task; diff --git a/include/linux/list.h b/include/linux/list.h index 5d57a3a1fa1b..d167b5d7c0ac 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -1,6 +1,7 @@ #ifndef _LINUX_LIST_H #define _LINUX_LIST_H +#include <linux/types.h> #include <linux/stddef.h> #include <linux/poison.h> #include <linux/prefetch.h> @@ -16,10 +17,6 @@ * using the generic single-entry routines. */ -struct list_head { - struct list_head *next, *prev; -}; - #define LIST_HEAD_INIT(name) { &(name), &(name) } #define LIST_HEAD(name) \ @@ -566,14 +563,6 @@ static inline void list_splice_tail_init(struct list_head *list, * You lose the ability to access the tail in O(1). */ -struct hlist_head { - struct hlist_node *first; -}; - -struct hlist_node { - struct hlist_node *next, **pprev; -}; - #define HLIST_HEAD_INIT { .first = NULL } #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) diff --git a/include/linux/lmb.h b/include/linux/lmb.h deleted file mode 100644 index f3d14333ebed..000000000000 --- a/include/linux/lmb.h +++ /dev/null @@ -1,89 +0,0 @@ -#ifndef _LINUX_LMB_H -#define _LINUX_LMB_H -#ifdef __KERNEL__ - -/* - * Logical memory blocks. - * - * Copyright (C) 2001 Peter Bergner, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/init.h> -#include <linux/mm.h> - -#define MAX_LMB_REGIONS 128 - -struct lmb_property { - u64 base; - u64 size; -}; - -struct lmb_region { - unsigned long cnt; - u64 size; - struct lmb_property region[MAX_LMB_REGIONS+1]; -}; - -struct lmb { - unsigned long debug; - u64 rmo_size; - struct lmb_region memory; - struct lmb_region reserved; -}; - -extern struct lmb lmb; - -extern void __init lmb_init(void); -extern void __init lmb_analyze(void); -extern long lmb_add(u64 base, u64 size); -extern long lmb_remove(u64 base, u64 size); -extern long __init lmb_free(u64 base, u64 size); -extern long __init lmb_reserve(u64 base, u64 size); -extern u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, - u64 (*nid_range)(u64, u64, int *)); -extern u64 __init lmb_alloc(u64 size, u64 align); -extern u64 __init lmb_alloc_base(u64 size, - u64, u64 max_addr); -extern u64 __init __lmb_alloc_base(u64 size, - u64 align, u64 max_addr); -extern u64 __init lmb_phys_mem_size(void); -extern u64 lmb_end_of_DRAM(void); -extern void __init lmb_enforce_memory_limit(u64 memory_limit); -extern int __init lmb_is_reserved(u64 addr); -extern int lmb_is_region_reserved(u64 base, u64 size); -extern int lmb_find(struct lmb_property *res); - -extern void lmb_dump_all(void); - -static inline u64 -lmb_size_bytes(struct lmb_region *type, unsigned long region_nr) -{ - return type->region[region_nr].size; -} -static inline u64 -lmb_size_pages(struct lmb_region *type, unsigned long region_nr) -{ - return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT; -} -static inline u64 -lmb_start_pfn(struct lmb_region *type, unsigned long region_nr) -{ - return type->region[region_nr].base >> PAGE_SHIFT; -} -static inline u64 -lmb_end_pfn(struct lmb_region *type, unsigned long region_nr) -{ - return lmb_start_pfn(type, region_nr) + - lmb_size_pages(type, region_nr); -} - -#include <asm/lmb.h> - -#endif /* __KERNEL__ */ - -#endif /* _LINUX_LMB_H */ diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h index de48d167568b..78fbf24f357a 100644 --- a/include/linux/lru_cache.h +++ b/include/linux/lru_cache.h @@ -262,7 +262,7 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char * @lc: the lru cache to operate on * * Note that the reference counts and order on the active and lru lists may - * still change. Returns true if we aquired the lock. + * still change. Returns true if we acquired the lock. */ static inline int lc_try_lock(struct lru_cache *lc) { diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 6907251d5200..112a55033352 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -90,10 +90,42 @@ struct common_audit_data { u32 requested; u32 audited; u32 denied; + /* + * auditdeny is a bit tricky and unintuitive. See the + * comments in avc.c for it's meaning and usage. + */ + u32 auditdeny; struct av_decision *avd; int result; } selinux_audit_data; #endif +#ifdef CONFIG_SECURITY_APPARMOR + struct { + int error; + int op; + int type; + void *profile; + const char *name; + const char *info; + union { + void *target; + struct { + long pos; + void *target; + } iface; + struct { + int rlim; + unsigned long max; + } rlim; + struct { + const char *target; + u32 request; + u32 denied; + uid_t ouid; + } fs; + }; + } apparmor_audit_data; +#endif }; /* these callback will be implemented by a specific LSM */ void (*lsm_pre_audit)(struct audit_buffer *, void *); diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index 78c3bed1c3f5..b5e7f2202484 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h @@ -251,7 +251,7 @@ struct mISDNhead { unsigned int prim; unsigned int id; -} __attribute__((packed)); +} __packed; #define MISDN_HEADER_LEN sizeof(struct mISDNhead) #define MAX_DATA_SIZE 2048 diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h new file mode 100644 index 000000000000..d0f08018335d --- /dev/null +++ b/include/linux/marvell_phy.h @@ -0,0 +1,21 @@ +#ifndef _MARVELL_PHY_H +#define _MARVELL_PHY_H + +/* Mask used for ID comparisons */ +#define MARVELL_PHY_ID_MASK 0xfffffff0 + +/* Known PHY IDs */ +#define MARVELL_PHY_ID_88E1101 0x01410c60 +#define MARVELL_PHY_ID_88E1112 0x01410c90 +#define MARVELL_PHY_ID_88E1111 0x01410cc0 +#define MARVELL_PHY_ID_88E1118 0x01410e10 +#define MARVELL_PHY_ID_88E1121R 0x01410cb0 +#define MARVELL_PHY_ID_88E1145 0x01410cd0 +#define MARVELL_PHY_ID_88E1240 0x01410e30 +#define MARVELL_PHY_ID_88EC048 0x01410e90 + +/* struct phy_device dev_flags definitions */ +#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 +#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 + +#endif /* _MARVELL_PHY_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h new file mode 100644 index 000000000000..a59faf2b5edd --- /dev/null +++ b/include/linux/memblock.h @@ -0,0 +1,89 @@ +#ifndef _LINUX_MEMBLOCK_H +#define _LINUX_MEMBLOCK_H +#ifdef __KERNEL__ + +/* + * Logical memory blocks. + * + * Copyright (C) 2001 Peter Bergner, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/init.h> +#include <linux/mm.h> + +#define MAX_MEMBLOCK_REGIONS 128 + +struct memblock_property { + u64 base; + u64 size; +}; + +struct memblock_region { + unsigned long cnt; + u64 size; + struct memblock_property region[MAX_MEMBLOCK_REGIONS+1]; +}; + +struct memblock { + unsigned long debug; + u64 rmo_size; + struct memblock_region memory; + struct memblock_region reserved; +}; + +extern struct memblock memblock; + +extern void __init memblock_init(void); +extern void __init memblock_analyze(void); +extern long memblock_add(u64 base, u64 size); +extern long memblock_remove(u64 base, u64 size); +extern long __init memblock_free(u64 base, u64 size); +extern long __init memblock_reserve(u64 base, u64 size); +extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, + u64 (*nid_range)(u64, u64, int *)); +extern u64 __init memblock_alloc(u64 size, u64 align); +extern u64 __init memblock_alloc_base(u64 size, + u64, u64 max_addr); +extern u64 __init __memblock_alloc_base(u64 size, + u64 align, u64 max_addr); +extern u64 __init memblock_phys_mem_size(void); +extern u64 memblock_end_of_DRAM(void); +extern void __init memblock_enforce_memory_limit(u64 memory_limit); +extern int __init memblock_is_reserved(u64 addr); +extern int memblock_is_region_reserved(u64 base, u64 size); +extern int memblock_find(struct memblock_property *res); + +extern void memblock_dump_all(void); + +static inline u64 +memblock_size_bytes(struct memblock_region *type, unsigned long region_nr) +{ + return type->region[region_nr].size; +} +static inline u64 +memblock_size_pages(struct memblock_region *type, unsigned long region_nr) +{ + return memblock_size_bytes(type, region_nr) >> PAGE_SHIFT; +} +static inline u64 +memblock_start_pfn(struct memblock_region *type, unsigned long region_nr) +{ + return type->region[region_nr].base >> PAGE_SHIFT; +} +static inline u64 +memblock_end_pfn(struct memblock_region *type, unsigned long region_nr) +{ + return memblock_start_pfn(type, region_nr) + + memblock_size_pages(type, region_nr); +} + +#include <asm/memblock.h> + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_MEMBLOCK_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index b969efb03787..7a9ab7db1975 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -999,7 +999,7 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) * querying the cache size, so a fastpath for that case is appropriate. */ struct shrinker { - int (*shrink)(int nr_to_scan, gfp_t gfp_mask); + int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask); int seeks; /* seeks to recreate an obj */ /* These are for internal use */ @@ -1465,6 +1465,14 @@ extern int sysctl_memory_failure_recovery; extern void shake_page(struct page *p, int access); extern atomic_long_t mce_bad_pages; extern int soft_offline_page(struct page *page, int flags); +#ifdef CONFIG_MEMORY_FAILURE +int is_hwpoison_address(unsigned long addr); +#else +static inline int is_hwpoison_address(unsigned long addr) +{ + return 0; +} +#endif extern void dump_page(struct page *page); diff --git a/include/linux/msi.h b/include/linux/msi.h index 6991ab5b24d1..91b05c171854 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -14,8 +14,10 @@ struct irq_desc; extern void mask_msi_irq(unsigned int irq); extern void unmask_msi_irq(unsigned int irq); extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); +extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); +extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); struct msi_desc { diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index cbbbe9bfecad..30b0c4e78f91 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h @@ -19,6 +19,11 @@ struct mv643xx_eth_shared_platform_data { struct mbus_dram_target_info *dram; struct platform_device *shared_smi; unsigned int t_clk; + /* + * Max packet size for Tx IP/Layer 4 checksum, when set to 0, default + * limit of 9KiB will be used. + */ + int tx_csum_limit; }; #define MV643XX_ETH_PHY_ADDR_DEFAULT 0 diff --git a/include/linux/nbd.h b/include/linux/nbd.h index 155719dab813..bb58854a8061 100644 --- a/include/linux/nbd.h +++ b/include/linux/nbd.h @@ -88,7 +88,7 @@ struct nbd_request { char handle[8]; __be64 from; __be32 len; -} __attribute__ ((packed)); +} __packed; /* * This is the reply packet that nbd-server sends back to the client after diff --git a/include/linux/ncp.h b/include/linux/ncp.h index 99f0adeeb3f3..3ace8370e61e 100644 --- a/include/linux/ncp.h +++ b/include/linux/ncp.h @@ -27,7 +27,7 @@ struct ncp_request_header { __u8 conn_high; __u8 function; __u8 data[0]; -} __attribute__((packed)); +} __packed; #define NCP_REPLY (0x3333) #define NCP_WATCHDOG (0x3E3E) @@ -42,7 +42,7 @@ struct ncp_reply_header { __u8 completion_code; __u8 connection_state; __u8 data[0]; -} __attribute__((packed)); +} __packed; #define NCP_VOLNAME_LEN (16) #define NCP_NUMBER_OF_VOLUMES (256) @@ -158,7 +158,7 @@ struct nw_info_struct { #ifdef __KERNEL__ struct nw_nfs_info nfs; #endif -} __attribute__((packed)); +} __packed; /* modify mask - use with MODIFY_DOS_INFO structure */ #define DM_ATTRIBUTES (cpu_to_le32(0x02)) @@ -190,12 +190,12 @@ struct nw_modify_dos_info { __u16 inheritanceGrantMask; __u16 inheritanceRevokeMask; __u32 maximumSpace; -} __attribute__((packed)); +} __packed; struct nw_search_sequence { __u8 volNumber; __u32 dirBase; __u32 sequence; -} __attribute__((packed)); +} __packed; #endif /* _LINUX_NCP_H */ diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h index 5ec9ca671687..8da05bc098ca 100644 --- a/include/linux/ncp_fs_sb.h +++ b/include/linux/ncp_fs_sb.h @@ -104,13 +104,13 @@ struct ncp_server { unsigned int state; /* STREAM only: receiver state */ struct { - __u32 magic __attribute__((packed)); - __u32 len __attribute__((packed)); - __u16 type __attribute__((packed)); - __u16 p1 __attribute__((packed)); - __u16 p2 __attribute__((packed)); - __u16 p3 __attribute__((packed)); - __u16 type2 __attribute__((packed)); + __u32 magic __packed; + __u32 len __packed; + __u16 type __packed; + __u16 p1 __packed; + __u16 p2 __packed; + __u16 p3 __packed; + __u16 type2 __packed; } buf; /* STREAM only: temporary buffer */ unsigned char* ptr; /* STREAM only: pointer to data */ size_t len; /* STREAM only: length of data to receive */ diff --git a/include/linux/net.h b/include/linux/net.h index 2b4deeeb8646..dee0b11a8759 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -129,10 +129,9 @@ struct socket_wq { * @type: socket type (%SOCK_STREAM, etc) * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc) * @ops: protocol specific socket operations - * @fasync_list: Asynchronous wake up list * @file: File back pointer for gc * @sk: internal networking protocol agnostic socket representation - * @wait: wait queue for several uses + * @wq: wait queue for several uses */ struct socket { socket_state state; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 40291f375024..46c36ffe20ee 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -54,6 +54,7 @@ struct vlan_group; struct netpoll_info; +struct phy_device; /* 802.11 specific */ struct wireless_dev; /* source back-compat hooks */ @@ -65,6 +66,11 @@ struct wireless_dev; #define HAVE_FREE_NETDEV /* free_netdev() */ #define HAVE_NETDEV_PRIV /* netdev_priv() */ +/* hardware address assignment types */ +#define NET_ADDR_PERM 0 /* address is permanent (default) */ +#define NET_ADDR_RANDOM 1 /* address is generated randomly */ +#define NET_ADDR_STOLEN 2 /* address is stolen from other device */ + /* Backlog congestion levels */ #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ #define NET_RX_DROP 1 /* packet dropped */ @@ -159,45 +165,39 @@ static inline bool dev_xmit_complete(int rc) #define MAX_HEADER (LL_MAX_HEADER + 48) #endif -#endif /* __KERNEL__ */ - /* - * Network device statistics. Akin to the 2.0 ether stats but - * with byte counters. + * Old network device statistics. Fields are native words + * (unsigned long) so they can be read and written atomically. */ struct net_device_stats { - unsigned long rx_packets; /* total packets received */ - unsigned long tx_packets; /* total packets transmitted */ - unsigned long rx_bytes; /* total bytes received */ - unsigned long tx_bytes; /* total bytes transmitted */ - unsigned long rx_errors; /* bad packets received */ - unsigned long tx_errors; /* packet transmit problems */ - unsigned long rx_dropped; /* no space in linux buffers */ - unsigned long tx_dropped; /* no space available in linux */ - unsigned long multicast; /* multicast packets received */ + unsigned long rx_packets; + unsigned long tx_packets; + unsigned long rx_bytes; + unsigned long tx_bytes; + unsigned long rx_errors; + unsigned long tx_errors; + unsigned long rx_dropped; + unsigned long tx_dropped; + unsigned long multicast; unsigned long collisions; - - /* detailed rx_errors: */ unsigned long rx_length_errors; - unsigned long rx_over_errors; /* receiver ring buff overflow */ - unsigned long rx_crc_errors; /* recved pkt with crc error */ - unsigned long rx_frame_errors; /* recv'd frame alignment error */ - unsigned long rx_fifo_errors; /* recv'r fifo overrun */ - unsigned long rx_missed_errors; /* receiver missed packet */ - - /* detailed tx_errors */ + unsigned long rx_over_errors; + unsigned long rx_crc_errors; + unsigned long rx_frame_errors; + unsigned long rx_fifo_errors; + unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; - - /* for cslip etc */ unsigned long rx_compressed; unsigned long tx_compressed; }; +#endif /* __KERNEL__ */ + /* Media selection options. */ enum { @@ -381,6 +381,8 @@ enum gro_result { }; typedef enum gro_result gro_result_t; +typedef struct sk_buff *rx_handler_func_t(struct sk_buff *skb); + extern void __napi_schedule(struct napi_struct *n); static inline int napi_disable_pending(struct napi_struct *n) @@ -504,9 +506,9 @@ struct netdev_queue { * please use this field instead of dev->trans_start */ unsigned long trans_start; - unsigned long tx_bytes; - unsigned long tx_packets; - unsigned long tx_dropped; + u64 tx_bytes; + u64 tx_packets; + u64 tx_dropped; } ____cacheline_aligned_in_smp; #ifdef CONFIG_RPS @@ -660,10 +662,19 @@ struct netdev_rx_queue { * Callback uses when the transmitter has not made any progress * for dev->watchdog ticks. * + * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, + * struct rtnl_link_stats64 *storage); * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); * Called when a user wants to get the network device usage - * statistics. If not defined, the counters in dev->stats will - * be used. + * statistics. Drivers must do one of the following: + * 1. Define @ndo_get_stats64 to fill in a zero-initialised + * rtnl_link_stats64 structure passed by the caller. + * 2. Define @ndo_get_stats to update a net_device_stats structure + * (which should normally be dev->stats) and return a pointer to + * it. The structure may be changed asynchronously only if each + * field is written atomically. + * 3. Update dev->stats asynchronously and atomically, and define + * neither operation. * * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp); * If device support VLAN receive accleration @@ -718,6 +729,8 @@ struct net_device_ops { struct neigh_parms *); void (*ndo_tx_timeout) (struct net_device *dev); + struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, + struct rtnl_link_stats64 *storage); struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); void (*ndo_vlan_rx_register)(struct net_device *dev, @@ -728,6 +741,8 @@ struct net_device_ops { unsigned short vid); #ifdef CONFIG_NET_POLL_CONTROLLER void (*ndo_poll_controller)(struct net_device *dev); + int (*ndo_netpoll_setup)(struct net_device *dev, + struct netpoll_info *info); void (*ndo_netpoll_cleanup)(struct net_device *dev); #endif int (*ndo_set_vf_mac)(struct net_device *dev, @@ -775,11 +790,11 @@ struct net_device { /* * This is the first field of the "visible" part of this structure * (i.e. as seen by users in the "Space.c" file). It is the name - * the interface. + * of the interface. */ char name[IFNAMSIZ]; - struct pm_qos_request_list *pm_qos_req; + struct pm_qos_request_list pm_qos_req; /* device name hash chain */ struct hlist_node name_hlist; @@ -847,7 +862,8 @@ struct net_device { #define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT) /* List of features with software fallbacks. */ -#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) +#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ + NETIF_F_TSO6 | NETIF_F_UFO) #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) @@ -908,6 +924,7 @@ struct net_device { /* Interface address info. */ unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ + unsigned char addr_assign_type; /* hw address assignment type */ unsigned char addr_len; /* hardware address length */ unsigned short dev_id; /* for shared network cards */ @@ -957,6 +974,8 @@ struct net_device { #endif struct netdev_queue rx_queue; + rx_handler_func_t *rx_handler; + void *rx_handler_data; struct netdev_queue *_tx ____cacheline_aligned_in_smp; @@ -1024,10 +1043,6 @@ struct net_device { /* mid-layer private */ void *ml_priv; - /* bridge stuff */ - struct net_bridge_port *br_port; - /* macvlan */ - struct macvlan_port *macvlan_port; /* GARP */ struct garp_port *garp_port; @@ -1057,6 +1072,9 @@ struct net_device { #endif /* n-tuple filter list attached to this device */ struct ethtool_rx_ntuple_list ethtool_ntuple_list; + + /* phy device may attach itself for hardware timestamping */ + struct phy_device *phydev; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -1087,11 +1105,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, static inline struct net *dev_net(const struct net_device *dev) { -#ifdef CONFIG_NET_NS - return dev->nd_net; -#else - return &init_net; -#endif + return read_pnet(&dev->nd_net); } static inline @@ -1272,8 +1286,8 @@ extern void dev_add_pack(struct packet_type *pt); extern void dev_remove_pack(struct packet_type *pt); extern void __dev_remove_pack(struct packet_type *pt); -extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags, - unsigned short mask); +extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, + unsigned short mask); extern struct net_device *dev_get_by_name(struct net *net, const char *name); extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); extern struct net_device *__dev_get_by_name(struct net *net, const char *name); @@ -1656,6 +1670,9 @@ static inline int netif_is_multiqueue(const struct net_device *dev) return (dev->num_tx_queues > 1); } +extern void netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq); + /* Use this variant when it is known for sure that it * is executing from hardware interrupt context or with hardware interrupts * disabled. @@ -1693,6 +1710,11 @@ static inline void napi_free_frags(struct napi_struct *napi) napi->skb = NULL; } +extern int netdev_rx_handler_register(struct net_device *dev, + rx_handler_func_t *rx_handler, + void *rx_handler_data); +extern void netdev_rx_handler_unregister(struct net_device *dev); + extern void netif_nit_deliver(struct sk_buff *skb); extern int dev_valid_name(const char *name); extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); @@ -1772,6 +1794,8 @@ extern void netif_carrier_on(struct net_device *dev); extern void netif_carrier_off(struct net_device *dev); +extern void netif_notify_peers(struct net_device *dev); + /** * netif_dormant_on - mark device as dormant. * @dev: network device @@ -2116,8 +2140,10 @@ extern void netdev_features_change(struct net_device *dev); /* Load a device via the kmod */ extern void dev_load(struct net *net, const char *name); extern void dev_mcast_init(void); -extern const struct net_device_stats *dev_get_stats(struct net_device *dev); -extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats); +extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *storage); +extern void dev_txq_stats_fold(const struct net_device *dev, + struct rtnl_link_stats64 *stats); extern int netdev_max_backlog; extern int netdev_tstamp_prequeue; @@ -2227,25 +2253,23 @@ static inline const char *netdev_name(const struct net_device *dev) return dev->name; } -#define netdev_printk(level, netdev, format, args...) \ - dev_printk(level, (netdev)->dev.parent, \ - "%s: " format, \ - netdev_name(netdev), ##args) - -#define netdev_emerg(dev, format, args...) \ - netdev_printk(KERN_EMERG, dev, format, ##args) -#define netdev_alert(dev, format, args...) \ - netdev_printk(KERN_ALERT, dev, format, ##args) -#define netdev_crit(dev, format, args...) \ - netdev_printk(KERN_CRIT, dev, format, ##args) -#define netdev_err(dev, format, args...) \ - netdev_printk(KERN_ERR, dev, format, ##args) -#define netdev_warn(dev, format, args...) \ - netdev_printk(KERN_WARNING, dev, format, ##args) -#define netdev_notice(dev, format, args...) \ - netdev_printk(KERN_NOTICE, dev, format, ##args) -#define netdev_info(dev, format, args...) \ - netdev_printk(KERN_INFO, dev, format, ##args) +extern int netdev_printk(const char *level, const struct net_device *dev, + const char *format, ...) + __attribute__ ((format (printf, 3, 4))); +extern int netdev_emerg(const struct net_device *dev, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); +extern int netdev_alert(const struct net_device *dev, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); +extern int netdev_crit(const struct net_device *dev, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); +extern int netdev_err(const struct net_device *dev, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); +extern int netdev_warn(const struct net_device *dev, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); +extern int netdev_notice(const struct net_device *dev, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); +extern int netdev_info(const struct net_device *dev, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); #if defined(DEBUG) #define netdev_dbg(__dev, format, args...) \ @@ -2293,20 +2317,26 @@ do { \ netdev_printk(level, (dev), fmt, ##args); \ } while (0) +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + #define netif_emerg(priv, type, dev, fmt, args...) \ - netif_printk(priv, type, KERN_EMERG, dev, fmt, ##args) + netif_level(emerg, priv, type, dev, fmt, ##args) #define netif_alert(priv, type, dev, fmt, args...) \ - netif_printk(priv, type, KERN_ALERT, dev, fmt, ##args) + netif_level(alert, priv, type, dev, fmt, ##args) #define netif_crit(priv, type, dev, fmt, args...) \ - netif_printk(priv, type, KERN_CRIT, dev, fmt, ##args) + netif_level(crit, priv, type, dev, fmt, ##args) #define netif_err(priv, type, dev, fmt, args...) \ - netif_printk(priv, type, KERN_ERR, dev, fmt, ##args) + netif_level(err, priv, type, dev, fmt, ##args) #define netif_warn(priv, type, dev, fmt, args...) \ - netif_printk(priv, type, KERN_WARNING, dev, fmt, ##args) + netif_level(warn, priv, type, dev, fmt, ##args) #define netif_notice(priv, type, dev, fmt, args...) \ - netif_printk(priv, type, KERN_NOTICE, dev, fmt, ##args) + netif_level(notice, priv, type, dev, fmt, ##args) #define netif_info(priv, type, dev, fmt, args...) \ - netif_printk(priv, type, KERN_INFO, (dev), fmt, ##args) + netif_level(info, priv, type, dev, fmt, ##args) #if defined(DEBUG) #define netif_dbg(priv, type, dev, format, args...) \ @@ -2329,7 +2359,7 @@ do { \ #endif #if defined(VERBOSE_DEBUG) -#define netif_vdbg netdev_dbg +#define netif_vdbg netif_dbg #else #define netif_vdbg(priv, type, dev, format, args...) \ ({ \ diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild index 48767cd16453..edeeabdc1500 100644 --- a/include/linux/netfilter/Kbuild +++ b/include/linux/netfilter/Kbuild @@ -3,11 +3,13 @@ header-y += nf_conntrack_tuple_common.h header-y += nfnetlink_conntrack.h header-y += nfnetlink_log.h header-y += nfnetlink_queue.h +header-y += xt_CHECKSUM.h header-y += xt_CLASSIFY.h header-y += xt_CONNMARK.h header-y += xt_CONNSECMARK.h header-y += xt_CT.h header-y += xt_DSCP.h +header-y += xt_IDLETIMER.h header-y += xt_LED.h header-y += xt_MARK.h header-y += xt_NFLOG.h @@ -18,17 +20,19 @@ header-y += xt_TCPMSS.h header-y += xt_TCPOPTSTRIP.h header-y += xt_TEE.h header-y += xt_TPROXY.h +header-y += xt_cluster.h header-y += xt_comment.h header-y += xt_connbytes.h header-y += xt_connlimit.h header-y += xt_connmark.h header-y += xt_conntrack.h -header-y += xt_cluster.h +header-y += xt_cpu.h header-y += xt_dccp.h header-y += xt_dscp.h header-y += xt_esp.h header-y += xt_hashlimit.h header-y += xt_iprange.h +header-y += xt_ipvs.h header-y += xt_helper.h header-y += xt_length.h header-y += xt_limit.h diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 14e6d32002c4..1afd18c855ec 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -76,6 +76,10 @@ enum ip_conntrack_status { /* Conntrack is a template */ IPS_TEMPLATE_BIT = 11, IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT), + + /* Conntrack is a fake untracked entry */ + IPS_UNTRACKED_BIT = 12, + IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), }; /* Connection tracking event types */ diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h index d3bab7a2c9b7..ea9b8d380527 100644 --- a/include/linux/netfilter/nfnetlink_log.h +++ b/include/linux/netfilter/nfnetlink_log.h @@ -89,6 +89,7 @@ enum nfulnl_attr_config { #define NFULNL_COPY_NONE 0x00 #define NFULNL_COPY_META 0x01 #define NFULNL_COPY_PACKET 0x02 +/* 0xff is reserved, don't use it for new copy modes. */ #define NFULNL_CFG_F_SEQ 0x0001 #define NFULNL_CFG_F_SEQ_GLOBAL 0x0002 diff --git a/include/linux/netfilter/xt_CHECKSUM.h b/include/linux/netfilter/xt_CHECKSUM.h new file mode 100644 index 000000000000..9a2e4661654e --- /dev/null +++ b/include/linux/netfilter/xt_CHECKSUM.h @@ -0,0 +1,20 @@ +/* Header file for iptables ipt_CHECKSUM target + * + * (C) 2002 by Harald Welte <laforge@gnumonks.org> + * (C) 2010 Red Hat Inc + * Author: Michael S. Tsirkin <mst@redhat.com> + * + * This software is distributed under GNU GPL v2, 1991 +*/ +#ifndef _XT_CHECKSUM_TARGET_H +#define _XT_CHECKSUM_TARGET_H + +#include <linux/types.h> + +#define XT_CHECKSUM_OP_FILL 0x01 /* fill in checksum in IP header */ + +struct xt_CHECKSUM_info { + __u8 operation; /* bitset of operations */ +}; + +#endif /* _XT_CHECKSUM_TARGET_H */ diff --git a/include/linux/netfilter/xt_IDLETIMER.h b/include/linux/netfilter/xt_IDLETIMER.h new file mode 100644 index 000000000000..3e1aa1be942e --- /dev/null +++ b/include/linux/netfilter/xt_IDLETIMER.h @@ -0,0 +1,45 @@ +/* + * linux/include/linux/netfilter/xt_IDLETIMER.h + * + * Header file for Xtables timer target module. + * + * Copyright (C) 2004, 2010 Nokia Corporation + * Written by Timo Teras <ext-timo.teras@nokia.com> + * + * Converted to x_tables and forward-ported to 2.6.34 + * by Luciano Coelho <luciano.coelho@nokia.com> + * + * Contact: Luciano Coelho <luciano.coelho@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef _XT_IDLETIMER_H +#define _XT_IDLETIMER_H + +#include <linux/types.h> + +#define MAX_IDLETIMER_LABEL_SIZE 28 + +struct idletimer_tg_info { + __u32 timeout; + + char label[MAX_IDLETIMER_LABEL_SIZE]; + + /* for kernel module internal use only */ + struct idletimer_tg *timer __attribute((aligned(8))); +}; + +#endif diff --git a/include/linux/netfilter/xt_cpu.h b/include/linux/netfilter/xt_cpu.h new file mode 100644 index 000000000000..93c7f11d8f42 --- /dev/null +++ b/include/linux/netfilter/xt_cpu.h @@ -0,0 +1,11 @@ +#ifndef _XT_CPU_H +#define _XT_CPU_H + +#include <linux/types.h> + +struct xt_cpu_info { + __u32 cpu; + __u32 invert; +}; + +#endif /*_XT_CPU_H*/ diff --git a/include/linux/netfilter/xt_ipvs.h b/include/linux/netfilter/xt_ipvs.h new file mode 100644 index 000000000000..1167aeb7a347 --- /dev/null +++ b/include/linux/netfilter/xt_ipvs.h @@ -0,0 +1,27 @@ +#ifndef _XT_IPVS_H +#define _XT_IPVS_H + +enum { + XT_IPVS_IPVS_PROPERTY = 1 << 0, /* all other options imply this one */ + XT_IPVS_PROTO = 1 << 1, + XT_IPVS_VADDR = 1 << 2, + XT_IPVS_VPORT = 1 << 3, + XT_IPVS_DIR = 1 << 4, + XT_IPVS_METHOD = 1 << 5, + XT_IPVS_VPORTCTL = 1 << 6, + XT_IPVS_MASK = (1 << 7) - 1, + XT_IPVS_ONCE_MASK = XT_IPVS_MASK & ~XT_IPVS_IPVS_PROPERTY +}; + +struct xt_ipvs_mtinfo { + union nf_inet_addr vaddr, vmask; + __be16 vport; + __u8 l4proto; + __u8 fwd_method; + __be16 vportctl; + + __u8 invert; + __u8 bitmask; +}; + +#endif /* _XT_IPVS_H */ diff --git a/include/linux/netfilter/xt_quota.h b/include/linux/netfilter/xt_quota.h index 8dc89dfc1361..b0d28c659ab7 100644 --- a/include/linux/netfilter/xt_quota.h +++ b/include/linux/netfilter/xt_quota.h @@ -11,9 +11,9 @@ struct xt_quota_priv; struct xt_quota_info { u_int32_t flags; u_int32_t pad; + aligned_u64 quota; /* Used internally by the kernel */ - aligned_u64 quota; struct xt_quota_priv *master; }; diff --git a/include/linux/netfilter_ipv4/ipt_LOG.h b/include/linux/netfilter_ipv4/ipt_LOG.h index 90fa6525ef9c..dcdbadf9fd4a 100644 --- a/include/linux/netfilter_ipv4/ipt_LOG.h +++ b/include/linux/netfilter_ipv4/ipt_LOG.h @@ -7,7 +7,8 @@ #define IPT_LOG_IPOPT 0x04 /* Log IP options */ #define IPT_LOG_UID 0x08 /* Log UID owning local socket */ #define IPT_LOG_NFLOG 0x10 /* Unsupported, don't reuse */ -#define IPT_LOG_MASK 0x1f +#define IPT_LOG_MACDECODE 0x20 /* Decode MAC header */ +#define IPT_LOG_MASK 0x2f struct ipt_log_info { unsigned char level; diff --git a/include/linux/netfilter_ipv6/ip6t_LOG.h b/include/linux/netfilter_ipv6/ip6t_LOG.h index 0d0119b0458c..9dd5579e02ec 100644 --- a/include/linux/netfilter_ipv6/ip6t_LOG.h +++ b/include/linux/netfilter_ipv6/ip6t_LOG.h @@ -7,7 +7,8 @@ #define IP6T_LOG_IPOPT 0x04 /* Log IP options */ #define IP6T_LOG_UID 0x08 /* Log UID owning local socket */ #define IP6T_LOG_NFLOG 0x10 /* Unsupported, don't use */ -#define IP6T_LOG_MASK 0x1f +#define IP6T_LOG_MACDECODE 0x20 /* Decode MAC header */ +#define IP6T_LOG_MASK 0x2f struct ip6t_log_info { unsigned char level; diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index e9e231215865..413742c92d14 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -46,9 +46,11 @@ void netpoll_poll(struct netpoll *np); void netpoll_send_udp(struct netpoll *np, const char *msg, int len); void netpoll_print_options(struct netpoll *np); int netpoll_parse_options(struct netpoll *np, char *opt); +int __netpoll_setup(struct netpoll *np); int netpoll_setup(struct netpoll *np); int netpoll_trap(void); void netpoll_set_trap(int trap); +void __netpoll_cleanup(struct netpoll *np); void netpoll_cleanup(struct netpoll *np); int __netpoll_rx(struct sk_buff *skb); void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); @@ -57,12 +59,15 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); #ifdef CONFIG_NETPOLL static inline bool netpoll_rx(struct sk_buff *skb) { - struct netpoll_info *npinfo = skb->dev->npinfo; + struct netpoll_info *npinfo; unsigned long flags; bool ret = false; + rcu_read_lock_bh(); + npinfo = rcu_dereference_bh(skb->dev->npinfo); + if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) - return false; + goto out; spin_lock_irqsave(&npinfo->rx_lock, flags); /* check rx_flags again with the lock held */ @@ -70,12 +75,14 @@ static inline bool netpoll_rx(struct sk_buff *skb) ret = true; spin_unlock_irqrestore(&npinfo->rx_lock, flags); +out: + rcu_read_unlock_bh(); return ret; } static inline int netpoll_rx_on(struct sk_buff *skb) { - struct netpoll_info *npinfo = skb->dev->npinfo; + struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo); return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); } @@ -91,7 +98,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) { struct net_device *dev = napi->dev; - rcu_read_lock(); /* deal with race on ->npinfo */ if (dev && dev->npinfo) { spin_lock(&napi->poll_lock); napi->poll_owner = smp_processor_id(); @@ -108,7 +114,11 @@ static inline void netpoll_poll_unlock(void *have) napi->poll_owner = -1; spin_unlock(&napi->poll_lock); } - rcu_read_unlock(); +} + +static inline int netpoll_tx_running(struct net_device *dev) +{ + return irqs_disabled(); } #else @@ -134,6 +144,10 @@ static inline void netpoll_poll_unlock(void *have) static inline void netpoll_netdev_init(struct net_device *dev) { } +static inline int netpoll_tx_running(struct net_device *dev) +{ + return 0; +} #endif #endif diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 9b8299af3741..07e40c625972 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -523,6 +523,7 @@ enum { NFSPROC4_CLNT_GETACL, NFSPROC4_CLNT_SETACL, NFSPROC4_CLNT_FS_LOCATIONS, + NFSPROC4_CLNT_RELEASE_LOCKOWNER, /* nfs41 */ NFSPROC4_CLNT_EXCHANGE_ID, diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 77c2ae53431c..508f8cf6da37 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -72,13 +72,20 @@ struct nfs_access_entry { int mask; }; +struct nfs_lock_context { + atomic_t count; + struct list_head list; + struct nfs_open_context *open_context; + fl_owner_t lockowner; + pid_t pid; +}; + struct nfs4_state; struct nfs_open_context { - atomic_t count; + struct nfs_lock_context lock_context; struct path path; struct rpc_cred *cred; struct nfs4_state *state; - fl_owner_t lockowner; fmode_t mode; unsigned long flags; @@ -353,6 +360,8 @@ extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); +extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); +extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); extern u64 nfs_compat_user_ino64(u64 fileid); extern void nfs_fattr_init(struct nfs_fattr *fattr); @@ -493,8 +502,15 @@ extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_page(struct inode *inode, struct page* page); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) +extern int nfs_commit_inode(struct inode *, int); extern struct nfs_write_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_write_data *wdata); +#else +static inline int +nfs_commit_inode(struct inode *inode, int how) +{ + return 0; +} #endif static inline int diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index d6e10a4c06e5..c82ee7cd6288 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -15,6 +15,7 @@ struct nlm_host; struct nfs4_sequence_args; struct nfs4_sequence_res; struct nfs_server; +struct nfs4_minor_version_ops; /* * The nfs_client identifies our client state to the server. @@ -70,11 +71,7 @@ struct nfs_client { */ char cl_ipaddr[48]; unsigned char cl_id_uniquifier; - int (* cl_call_sync)(struct nfs_server *server, - struct rpc_message *msg, - struct nfs4_sequence_args *args, - struct nfs4_sequence_res *res, - int cache_reply); + const struct nfs4_minor_version_ops *cl_mvops; #endif /* CONFIG_NFS_V4 */ #ifdef CONFIG_NFS_V4_1 diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h index 4499016e6d0d..5d59ae861aa6 100644 --- a/include/linux/nfs_mount.h +++ b/include/linux/nfs_mount.h @@ -69,5 +69,6 @@ struct nfs_mount_data { #define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 #define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000 #define NFS_MOUNT_NORESVPORT 0x40000 +#define NFS_MOUNT_LEGACY_INTERFACE 0x80000 #endif diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 3c60685d972b..f8b60e7f4c44 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -39,6 +39,7 @@ struct nfs_page { struct list_head wb_list; /* Defines state of page: */ struct page *wb_page; /* page to read in/write out */ struct nfs_open_context *wb_context; /* File state context info */ + struct nfs_lock_context *wb_lock_context; /* lock context info */ atomic_t wb_complete; /* i/os we're waiting for */ pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 51914d7d6cc4..fc461926c412 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -196,8 +196,10 @@ struct nfs_openargs { __u64 clientid; __u64 id; union { - struct iattr * attrs; /* UNCHECKED, GUARDED */ - nfs4_verifier verifier; /* EXCLUSIVE */ + struct { + struct iattr * attrs; /* UNCHECKED, GUARDED */ + nfs4_verifier verifier; /* EXCLUSIVE */ + }; nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ fmode_t delegation_type; /* CLAIM_PREVIOUS */ } u; @@ -313,6 +315,10 @@ struct nfs_lockt_res { struct nfs4_sequence_res seq_res; }; +struct nfs_release_lockowner_args { + struct nfs_lowner lock_owner; +}; + struct nfs4_delegreturnargs { const struct nfs_fh *fhandle; const nfs4_stateid *stateid; @@ -332,6 +338,7 @@ struct nfs4_delegreturnres { struct nfs_readargs { struct nfs_fh * fh; struct nfs_open_context *context; + struct nfs_lock_context *lock_context; __u64 offset; __u32 count; unsigned int pgbase; @@ -352,6 +359,7 @@ struct nfs_readres { struct nfs_writeargs { struct nfs_fh * fh; struct nfs_open_context *context; + struct nfs_lock_context *lock_context; __u64 offset; __u32 count; enum nfs3_stable_how stable; diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index 8c2c6116e788..f5487b6f91ed 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -160,7 +160,7 @@ struct nilfs_super_root { * struct nilfs_super_block - structure of super block on disk */ struct nilfs_super_block { - __le32 s_rev_level; /* Revision level */ +/*00*/ __le32 s_rev_level; /* Revision level */ __le16 s_minor_rev_level; /* minor revision level */ __le16 s_magic; /* Magic signature */ @@ -169,50 +169,53 @@ struct nilfs_super_block { is excluded. */ __le16 s_flags; /* flags */ __le32 s_crc_seed; /* Seed value of CRC calculation */ - __le32 s_sum; /* Check sum of super block */ +/*10*/ __le32 s_sum; /* Check sum of super block */ __le32 s_log_block_size; /* Block size represented as follows blocksize = 1 << (s_log_block_size + 10) */ __le64 s_nsegments; /* Number of segments in filesystem */ - __le64 s_dev_size; /* block device size in bytes */ +/*20*/ __le64 s_dev_size; /* block device size in bytes */ __le64 s_first_data_block; /* 1st seg disk block number */ - __le32 s_blocks_per_segment; /* number of blocks per full segment */ +/*30*/ __le32 s_blocks_per_segment; /* number of blocks per full segment */ __le32 s_r_segments_percentage; /* Reserved segments percentage */ __le64 s_last_cno; /* Last checkpoint number */ - __le64 s_last_pseg; /* disk block addr pseg written last */ +/*40*/ __le64 s_last_pseg; /* disk block addr pseg written last */ __le64 s_last_seq; /* seq. number of seg written last */ - __le64 s_free_blocks_count; /* Free blocks count */ +/*50*/ __le64 s_free_blocks_count; /* Free blocks count */ __le64 s_ctime; /* Creation time (execution time of newfs) */ - __le64 s_mtime; /* Mount time */ +/*60*/ __le64 s_mtime; /* Mount time */ __le64 s_wtime; /* Write time */ - __le16 s_mnt_count; /* Mount count */ +/*70*/ __le16 s_mnt_count; /* Mount count */ __le16 s_max_mnt_count; /* Maximal mount count */ __le16 s_state; /* File system state */ __le16 s_errors; /* Behaviour when detecting errors */ __le64 s_lastcheck; /* time of last check */ - __le32 s_checkinterval; /* max. time between checks */ +/*80*/ __le32 s_checkinterval; /* max. time between checks */ __le32 s_creator_os; /* OS */ __le16 s_def_resuid; /* Default uid for reserved blocks */ __le16 s_def_resgid; /* Default gid for reserved blocks */ __le32 s_first_ino; /* First non-reserved inode */ - __le16 s_inode_size; /* Size of an inode */ +/*90*/ __le16 s_inode_size; /* Size of an inode */ __le16 s_dat_entry_size; /* Size of a dat entry */ __le16 s_checkpoint_size; /* Size of a checkpoint */ __le16 s_segment_usage_size; /* Size of a segment usage */ - __u8 s_uuid[16]; /* 128-bit uuid for volume */ - char s_volume_name[80]; /* volume name */ +/*98*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */ +/*A8*/ char s_volume_name[80]; /* volume name */ - __le32 s_c_interval; /* Commit interval of segment */ +/*F8*/ __le32 s_c_interval; /* Commit interval of segment */ __le32 s_c_block_max; /* Threshold of data amount for the segment construction */ - __u32 s_reserved[192]; /* padding to the end of the block */ +/*100*/ __le64 s_feature_compat; /* Compatible feature set */ + __le64 s_feature_compat_ro; /* Read-only compatible feature set */ + __le64 s_feature_incompat; /* Incompatible feature set */ + __u32 s_reserved[186]; /* padding to the end of the block */ }; /* @@ -228,6 +231,16 @@ struct nilfs_super_block { #define NILFS_MINOR_REV 0 /* minor revision */ /* + * Feature set definitions + * + * If there is a bit set in the incompatible feature set that the kernel + * doesn't know about, it should refuse to mount the filesystem. + */ +#define NILFS_FEATURE_COMPAT_SUPP 0ULL +#define NILFS_FEATURE_COMPAT_RO_SUPP 0ULL +#define NILFS_FEATURE_INCOMPAT_SUPP 0ULL + +/* * Bytes count of super_block for CRC-calculation */ #define NILFS_SB_BYTES \ @@ -274,6 +287,12 @@ struct nilfs_super_block { #define NILFS_NAME_LEN 255 /* + * Block size limitations + */ +#define NILFS_MIN_BLOCK_SIZE 1024 +#define NILFS_MAX_BLOCK_SIZE 65536 + +/* * The new version of the directory entry. Since V0 structures are * stored in intel byte order, and the name_len field could never be * bigger than 255 chars, it's safe to reclaim the extra byte for the @@ -313,7 +332,25 @@ enum { #define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1) #define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \ ~NILFS_DIR_ROUND) +#define NILFS_MAX_REC_LEN ((1<<16)-1) +static inline unsigned nilfs_rec_len_from_disk(__le16 dlen) +{ + unsigned len = le16_to_cpu(dlen); + + if (len == NILFS_MAX_REC_LEN) + return 1 << 16; + return len; +} + +static inline __le16 nilfs_rec_len_to_disk(unsigned len) +{ + if (len == (1 << 16)) + return cpu_to_le16(NILFS_MAX_REC_LEN); + else if (len > (1 << 16)) + BUG(); + return cpu_to_le16(len); +} /** * struct nilfs_finfo - file information diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index b7c77f9712f4..2c8701687336 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -132,7 +132,7 @@ * %NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and * %NL80211_ATTR_REG_RULE_POWER_MAX_EIRP. * @NL80211_CMD_REQ_SET_REG: ask the wireless core to set the regulatory domain - * to the the specified ISO/IEC 3166-1 alpha2 country code. The core will + * to the specified ISO/IEC 3166-1 alpha2 country code. The core will * store this as a valid request and then query userspace for it. * * @NL80211_CMD_GET_MESH_PARAMS: Get mesh networking properties for the @@ -725,6 +725,12 @@ enum nl80211_commands { * @NL80211_ATTR_AP_ISOLATE: (AP mode) Do not forward traffic between stations * connected to this BSS. * + * @NL80211_ATTR_WIPHY_TX_POWER_SETTING: Transmit power setting type. See + * &enum nl80211_tx_power_setting for possible values. + * @NL80211_ATTR_WIPHY_TX_POWER_LEVEL: Transmit power level in signed mBm units. + * This is used in association with @NL80211_ATTR_WIPHY_TX_POWER_SETTING + * for non-automatic settings. + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -882,6 +888,9 @@ enum nl80211_attrs { NL80211_ATTR_AP_ISOLATE, + NL80211_ATTR_WIPHY_TX_POWER_SETTING, + NL80211_ATTR_WIPHY_TX_POWER_LEVEL, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -1659,4 +1668,17 @@ enum nl80211_cqm_rssi_threshold_event { NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, }; + +/** + * enum nl80211_tx_power_setting - TX power adjustment + * @NL80211_TX_POWER_AUTOMATIC: automatically determine transmit power + * @NL80211_TX_POWER_LIMITED: limit TX power by the mBm parameter + * @NL80211_TX_POWER_FIXED: fix TX power to the mBm parameter + */ +enum nl80211_tx_power_setting { + NL80211_TX_POWER_AUTOMATIC, + NL80211_TX_POWER_LIMITED, + NL80211_TX_POWER_FIXED, +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/linux/nmi.h b/include/linux/nmi.h index b752e807adde..06aab5eee134 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -20,10 +20,14 @@ extern void touch_nmi_watchdog(void); extern void acpi_nmi_disable(void); extern void acpi_nmi_enable(void); #else +#ifndef CONFIG_HARDLOCKUP_DETECTOR static inline void touch_nmi_watchdog(void) { touch_softlockup_watchdog(); } +#else +extern void touch_nmi_watchdog(void); +#endif static inline void acpi_nmi_disable(void) { } static inline void acpi_nmi_enable(void) { } #endif @@ -47,4 +51,13 @@ static inline bool trigger_all_cpu_backtrace(void) } #endif +#ifdef CONFIG_LOCKUP_DETECTOR +int hw_nmi_is_cpu_stuck(struct pt_regs *); +u64 hw_nmi_get_sample_period(void); +extern int watchdog_enabled; +struct ctl_table; +extern int proc_dowatchdog_enabled(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +#endif + #endif diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 540703b555cb..b2f1a4d83550 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -210,6 +210,7 @@ static inline int notifier_to_errno(int ret) #define NETDEV_POST_INIT 0x0010 #define NETDEV_UNREGISTER_BATCH 0x0011 #define NETDEV_BONDING_DESLAVE 0x0012 +#define NETDEV_NOTIFY_PEERS 0x0013 #define SYS_DOWN 0x0001 /* Notify of system down */ #define SYS_RESTART SYS_DOWN diff --git a/include/linux/of.h b/include/linux/of.h index a367e19bb3af..cad7cf0ab278 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -70,6 +70,11 @@ extern struct device_node *allnodes; extern struct device_node *of_chosen; extern rwlock_t devtree_lock; +static inline bool of_node_is_root(const struct device_node *node) +{ + return node && (node->parent == NULL); +} + static inline int of_node_check_flag(struct device_node *n, unsigned long flag) { return test_bit(flag, &n->_flags); @@ -141,6 +146,11 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) #define OF_BAD_ADDR ((u64)-1) +#ifndef of_node_to_nid +static inline int of_node_to_nid(struct device_node *np) { return -1; } +#define of_node_to_nid of_node_to_nid +#endif + extern struct device_node *of_find_node_by_name(struct device_node *from, const char *name); #define for_each_node_by_name(dn, name) \ diff --git a/include/linux/of_address.h b/include/linux/of_address.h new file mode 100644 index 000000000000..8aea06f0564c --- /dev/null +++ b/include/linux/of_address.h @@ -0,0 +1,44 @@ +#ifndef __OF_ADDRESS_H +#define __OF_ADDRESS_H +#include <linux/ioport.h> +#include <linux/of.h> + +extern u64 of_translate_address(struct device_node *np, const u32 *addr); +extern int of_address_to_resource(struct device_node *dev, int index, + struct resource *r); +extern void __iomem *of_iomap(struct device_node *device, int index); + +/* Extract an address from a device, returns the region size and + * the address space flags too. The PCI version uses a BAR number + * instead of an absolute index + */ +extern const u32 *of_get_address(struct device_node *dev, int index, + u64 *size, unsigned int *flags); + +#ifndef pci_address_to_pio +static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } +#define pci_address_to_pio pci_address_to_pio +#endif + +#ifdef CONFIG_PCI +extern const u32 *of_get_pci_address(struct device_node *dev, int bar_no, + u64 *size, unsigned int *flags); +extern int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r); +#else /* CONFIG_PCI */ +static inline int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r) +{ + return -ENOSYS; +} + +static inline const u32 *of_get_pci_address(struct device_node *dev, + int bar_no, u64 *size, unsigned int *flags) +{ + return NULL; +} +#endif /* CONFIG_PCI */ + + +#endif /* __OF_ADDRESS_H */ + diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 11651facc5f1..35aa44ad9f2c 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -1,32 +1,77 @@ #ifndef _LINUX_OF_DEVICE_H #define _LINUX_OF_DEVICE_H +/* + * The of_device *was* a kind of "base class" that was a superset of + * struct device for use by devices attached to an OF node and probed + * using OF properties. However, the important bit of OF-style + * probing, namely the device node pointer, has been moved into the + * common struct device when CONFIG_OF is set to make OF-style probing + * available to all bus types. So now, just make of_device and + * platform_device equivalent so that current of_platform bus users + * can be transparently migrated over to using the platform bus. + * + * This line will go away once all references to of_device are removed + * from the kernel. + */ +#define of_device platform_device +#include <linux/platform_device.h> +#include <linux/of_platform.h> /* temporary until merge */ + #ifdef CONFIG_OF_DEVICE #include <linux/device.h> #include <linux/of.h> #include <linux/mod_devicetable.h> -#include <asm/of_device.h> - #define to_of_device(d) container_of(d, struct of_device, dev) extern const struct of_device_id *of_match_device( const struct of_device_id *matches, const struct device *dev); +extern void of_device_make_bus_id(struct device *dev); + +/** + * of_driver_match_device - Tell if a driver's of_match_table matches a device. + * @drv: the device_driver structure to test + * @dev: the device structure to match against + */ +static inline int of_driver_match_device(const struct device *dev, + const struct device_driver *drv) +{ + return of_match_device(drv->of_match_table, dev) != NULL; +} -extern struct of_device *of_dev_get(struct of_device *dev); -extern void of_dev_put(struct of_device *dev); +extern struct platform_device *of_dev_get(struct platform_device *dev); +extern void of_dev_put(struct platform_device *dev); -extern int of_device_register(struct of_device *ofdev); -extern void of_device_unregister(struct of_device *ofdev); +extern int of_device_register(struct platform_device *ofdev); +extern void of_device_unregister(struct platform_device *ofdev); extern void of_release_dev(struct device *dev); -static inline void of_device_free(struct of_device *dev) +static inline void of_device_free(struct platform_device *dev) { of_release_dev(&dev->dev); } -extern ssize_t of_device_get_modalias(struct of_device *ofdev, +extern ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len); + +extern int of_device_uevent(struct device *dev, struct kobj_uevent_env *env); + + +#else /* CONFIG_OF_DEVICE */ + +static inline int of_driver_match_device(struct device *dev, + struct device_driver *drv) +{ + return 0; +} + +static inline int of_device_uevent(struct device *dev, + struct kobj_uevent_env *env) +{ + return -ENODEV; +} + #endif /* CONFIG_OF_DEVICE */ #endif /* _LINUX_OF_DEVICE_H */ diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index fc2472c3c254..6598c04dab01 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -33,34 +33,17 @@ enum of_gpio_flags { #ifdef CONFIG_OF_GPIO /* - * Generic OF GPIO chip - */ -struct of_gpio_chip { - struct gpio_chip gc; - int gpio_cells; - int (*xlate)(struct of_gpio_chip *of_gc, struct device_node *np, - const void *gpio_spec, enum of_gpio_flags *flags); -}; - -static inline struct of_gpio_chip *to_of_gpio_chip(struct gpio_chip *gc) -{ - return container_of(gc, struct of_gpio_chip, gc); -} - -/* * OF GPIO chip for memory mapped banks */ struct of_mm_gpio_chip { - struct of_gpio_chip of_gc; + struct gpio_chip gc; void (*save_regs)(struct of_mm_gpio_chip *mm_gc); void __iomem *regs; }; static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc) { - struct of_gpio_chip *of_gc = to_of_gpio_chip(gc); - - return container_of(of_gc, struct of_mm_gpio_chip, of_gc); + return container_of(gc, struct of_mm_gpio_chip, gc); } extern int of_get_gpio_flags(struct device_node *np, int index, @@ -69,11 +52,12 @@ extern unsigned int of_gpio_count(struct device_node *np); extern int of_mm_gpiochip_add(struct device_node *np, struct of_mm_gpio_chip *mm_gc); -extern int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, - struct device_node *np, - const void *gpio_spec, - enum of_gpio_flags *flags); -#else + +extern void of_gpiochip_add(struct gpio_chip *gc); +extern void of_gpiochip_remove(struct gpio_chip *gc); +extern struct gpio_chip *of_node_to_gpiochip(struct device_node *np); + +#else /* CONFIG_OF_GPIO */ /* Drivers may not strictly depend on the GPIO support, so let them link. */ static inline int of_get_gpio_flags(struct device_node *np, int index, @@ -87,6 +71,9 @@ static inline unsigned int of_gpio_count(struct device_node *np) return 0; } +static inline void of_gpiochip_add(struct gpio_chip *gc) { } +static inline void of_gpiochip_remove(struct gpio_chip *gc) { } + #endif /* CONFIG_OF_GPIO */ /** diff --git a/include/linux/of_i2c.h b/include/linux/of_i2c.h index 34974b5a76f7..0efe8d465f55 100644 --- a/include/linux/of_i2c.h +++ b/include/linux/of_i2c.h @@ -12,12 +12,19 @@ #ifndef __LINUX_OF_I2C_H #define __LINUX_OF_I2C_H +#if defined(CONFIG_OF_I2C) || defined(CONFIG_OF_I2C_MODULE) #include <linux/i2c.h> -void of_register_i2c_devices(struct i2c_adapter *adap, - struct device_node *adap_node); +extern void of_i2c_register_devices(struct i2c_adapter *adap); /* must call put_device() when done with returned i2c_client device */ -struct i2c_client *of_find_i2c_device_by_node(struct device_node *node); +extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node); + +#else +static inline void of_i2c_register_devices(struct i2c_adapter *adap) +{ + return; +} +#endif /* CONFIG_OF_I2C */ #endif /* __LINUX_OF_I2C_H */ diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h new file mode 100644 index 000000000000..5929781c104d --- /dev/null +++ b/include/linux/of_irq.h @@ -0,0 +1,70 @@ +#ifndef __OF_IRQ_H +#define __OF_IRQ_H + +#if defined(CONFIG_OF) +struct of_irq; +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/of.h> + +/* + * irq_of_parse_and_map() is used ba all OF enabled platforms; but SPARC + * implements it differently. However, the prototype is the same for all, + * so declare it here regardless of the CONFIG_OF_IRQ setting. + */ +extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); + +#if defined(CONFIG_OF_IRQ) +/** + * of_irq - container for device_node/irq_specifier pair for an irq controller + * @controller: pointer to interrupt controller device tree node + * @size: size of interrupt specifier + * @specifier: array of cells @size long specifing the specific interrupt + * + * This structure is returned when an interrupt is mapped. The controller + * field needs to be put() after use + */ +#define OF_MAX_IRQ_SPEC 4 /* We handle specifiers of at most 4 cells */ +struct of_irq { + struct device_node *controller; /* Interrupt controller node */ + u32 size; /* Specifier size */ + u32 specifier[OF_MAX_IRQ_SPEC]; /* Specifier copy */ +}; + +/* + * Workarounds only applied to 32bit powermac machines + */ +#define OF_IMAP_OLDWORLD_MAC 0x00000001 +#define OF_IMAP_NO_PHANDLE 0x00000002 + +#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC) +extern unsigned int of_irq_workarounds; +extern struct device_node *of_irq_dflt_pic; +extern int of_irq_map_oldworld(struct device_node *device, int index, + struct of_irq *out_irq); +#else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */ +#define of_irq_workarounds (0) +#define of_irq_dflt_pic (NULL) +static inline int of_irq_map_oldworld(struct device_node *device, int index, + struct of_irq *out_irq) +{ + return -EINVAL; +} +#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */ + + +extern int of_irq_map_raw(struct device_node *parent, const u32 *intspec, + u32 ointsize, const u32 *addr, + struct of_irq *out_irq); +extern int of_irq_map_one(struct device_node *device, int index, + struct of_irq *out_irq); +extern unsigned int irq_create_of_mapping(struct device_node *controller, + const u32 *intspec, + unsigned int intsize); +extern int of_irq_to_resource(struct device_node *dev, int index, + struct resource *r); + +#endif /* CONFIG_OF_IRQ */ +#endif /* CONFIG_OF */ +#endif /* __OF_IRQ_H */ diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 1643d3761eb4..4e6d989c06df 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -17,29 +17,24 @@ #include <linux/mod_devicetable.h> #include <linux/pm.h> #include <linux/of_device.h> - -/* - * The of_platform_bus_type is a bus type used by drivers that do not - * attach to a macio or similar bus but still use OF probing - * mechanism - */ -extern struct bus_type of_platform_bus_type; +#include <linux/platform_device.h> /* * An of_platform_driver driver is attached to a basic of_device on - * the "platform bus" (of_platform_bus_type). + * the "platform bus" (platform_bus_type). */ struct of_platform_driver { - int (*probe)(struct of_device* dev, + int (*probe)(struct platform_device* dev, const struct of_device_id *match); - int (*remove)(struct of_device* dev); + int (*remove)(struct platform_device* dev); - int (*suspend)(struct of_device* dev, pm_message_t state); - int (*resume)(struct of_device* dev); - int (*shutdown)(struct of_device* dev); + int (*suspend)(struct platform_device* dev, pm_message_t state); + int (*resume)(struct platform_device* dev); + int (*shutdown)(struct platform_device* dev); struct device_driver driver; + struct platform_driver platform_driver; }; #define to_of_platform_driver(drv) \ container_of(drv,struct of_platform_driver, driver) @@ -49,20 +44,30 @@ extern int of_register_driver(struct of_platform_driver *drv, extern void of_unregister_driver(struct of_platform_driver *drv); /* Platform drivers register/unregister */ -static inline int of_register_platform_driver(struct of_platform_driver *drv) -{ - return of_register_driver(drv, &of_platform_bus_type); -} -static inline void of_unregister_platform_driver(struct of_platform_driver *drv) -{ - of_unregister_driver(drv); -} +extern int of_register_platform_driver(struct of_platform_driver *drv); +extern void of_unregister_platform_driver(struct of_platform_driver *drv); -#include <asm/of_platform.h> - -extern struct of_device *of_find_device_by_node(struct device_node *np); +extern struct platform_device *of_device_alloc(struct device_node *np, + const char *bus_id, + struct device *parent); +extern struct platform_device *of_find_device_by_node(struct device_node *np); extern int of_bus_type_init(struct bus_type *bus, const char *name); + +#if !defined(CONFIG_SPARC) /* SPARC has its own device registration method */ +/* Platform devices and busses creation */ +extern struct platform_device *of_platform_device_create(struct device_node *np, + const char *bus_id, + struct device *parent); + +/* pseudo "matches" value to not do deep probe */ +#define OF_NO_DEEP_PROBE ((struct of_device_id *)-1) + +extern int of_platform_bus_probe(struct device_node *root, + const struct of_device_id *matches, + struct device *parent); +#endif /* !CONFIG_SPARC */ + #endif /* CONFIG_OF_DEVICE */ #endif /* _LINUX_OF_PLATFORM_H */ diff --git a/include/linux/of_spi.h b/include/linux/of_spi.h index 5f71ee8c0868..9e3e70f78ae6 100644 --- a/include/linux/of_spi.h +++ b/include/linux/of_spi.h @@ -9,10 +9,15 @@ #ifndef __LINUX_OF_SPI_H #define __LINUX_OF_SPI_H -#include <linux/of.h> #include <linux/spi/spi.h> -extern void of_register_spi_devices(struct spi_master *master, - struct device_node *np); +#if defined(CONFIG_OF_SPI) || defined(CONFIG_OF_SPI_MODULE) +extern void of_register_spi_devices(struct spi_master *master); +#else +static inline void of_register_spi_devices(struct spi_master *master) +{ + return; +} +#endif /* CONFIG_OF_SPI */ #endif /* __LINUX_OF_SPI */ diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h index 9bdd91486b49..c0b018790f07 100644 --- a/include/linux/omapfb.h +++ b/include/linux/omapfb.h @@ -85,6 +85,9 @@ #define OMAPFB_MEMTYPE_SRAM 1 #define OMAPFB_MEMTYPE_MAX 1 +#define OMAPFB_MEM_IDX_ENABLED 0x80 +#define OMAPFB_MEM_IDX_MASK 0x7f + enum omapfb_color_format { OMAPFB_COLOR_RGB565 = 0, OMAPFB_COLOR_YUV422, @@ -136,7 +139,7 @@ struct omapfb_plane_info { __u8 enabled; __u8 channel_out; __u8 mirror; - __u8 reserved1; + __u8 mem_idx; __u32 out_width; __u32 out_height; __u32 reserved2[12]; @@ -253,7 +256,7 @@ struct omapfb_platform_data { /* in arch/arm/plat-omap/fb.c */ extern void omapfb_set_platform_data(struct omapfb_platform_data *data); extern void omapfb_set_ctrl_platform_data(void *pdata); -extern void omapfb_reserve_sdram(void); +extern void omapfb_reserve_sdram_memblock(void); #endif diff --git a/include/linux/padata.h b/include/linux/padata.h index 8d8406246eef..bdcd1e9eacea 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -25,6 +25,11 @@ #include <linux/spinlock.h> #include <linux/list.h> #include <linux/timer.h> +#include <linux/notifier.h> +#include <linux/kobject.h> + +#define PADATA_CPU_SERIAL 0x01 +#define PADATA_CPU_PARALLEL 0x02 /** * struct padata_priv - Embedded to the users data structure. @@ -59,7 +64,20 @@ struct padata_list { }; /** - * struct padata_queue - The percpu padata queues. +* struct padata_serial_queue - The percpu padata serial queue +* +* @serial: List to wait for serialization after reordering. +* @work: work struct for serialization. +* @pd: Backpointer to the internal control structure. +*/ +struct padata_serial_queue { + struct padata_list serial; + struct work_struct work; + struct parallel_data *pd; +}; + +/** + * struct padata_parallel_queue - The percpu padata parallel queue * * @parallel: List to wait for parallelization. * @reorder: List to wait for reordering after parallel processing. @@ -67,18 +85,28 @@ struct padata_list { * @pwork: work struct for parallelization. * @swork: work struct for serialization. * @pd: Backpointer to the internal control structure. + * @work: work struct for parallelization. * @num_obj: Number of objects that are processed by this cpu. * @cpu_index: Index of the cpu. */ -struct padata_queue { - struct padata_list parallel; - struct padata_list reorder; - struct padata_list serial; - struct work_struct pwork; - struct work_struct swork; - struct parallel_data *pd; - atomic_t num_obj; - int cpu_index; +struct padata_parallel_queue { + struct padata_list parallel; + struct padata_list reorder; + struct parallel_data *pd; + struct work_struct work; + atomic_t num_obj; + int cpu_index; +}; + +/** + * struct padata_cpumask - The cpumasks for the parallel/serial workers + * + * @pcpu: cpumask for the parallel workers. + * @cbcpu: cpumask for the serial (callback) workers. + */ +struct padata_cpumask { + cpumask_var_t pcpu; + cpumask_var_t cbcpu; }; /** @@ -86,25 +114,29 @@ struct padata_queue { * that depends on the cpumask in use. * * @pinst: padata instance. - * @queue: percpu padata queues. + * @pqueue: percpu padata queues used for parallelization. + * @squeue: percpu padata queues used for serialuzation. * @seq_nr: The sequence number that will be attached to the next object. * @reorder_objects: Number of objects waiting in the reorder queues. * @refcnt: Number of objects holding a reference on this parallel_data. * @max_seq_nr: Maximal used sequence number. - * @cpumask: cpumask in use. + * @cpumask: The cpumasks in use for parallel and serial workers. * @lock: Reorder lock. + * @processed: Number of already processed objects. * @timer: Reorder timer. */ struct parallel_data { - struct padata_instance *pinst; - struct padata_queue *queue; - atomic_t seq_nr; - atomic_t reorder_objects; - atomic_t refcnt; - unsigned int max_seq_nr; - cpumask_var_t cpumask; - spinlock_t lock; - struct timer_list timer; + struct padata_instance *pinst; + struct padata_parallel_queue *pqueue; + struct padata_serial_queue *squeue; + atomic_t seq_nr; + atomic_t reorder_objects; + atomic_t refcnt; + unsigned int max_seq_nr; + struct padata_cpumask cpumask; + spinlock_t lock ____cacheline_aligned; + unsigned int processed; + struct timer_list timer; }; /** @@ -113,31 +145,48 @@ struct parallel_data { * @cpu_notifier: cpu hotplug notifier. * @wq: The workqueue in use. * @pd: The internal control structure. - * @cpumask: User supplied cpumask. + * @cpumask: User supplied cpumasks for parallel and serial works. + * @cpumask_change_notifier: Notifiers chain for user-defined notify + * callbacks that will be called when either @pcpu or @cbcpu + * or both cpumasks change. + * @kobj: padata instance kernel object. * @lock: padata instance lock. * @flags: padata flags. */ struct padata_instance { - struct notifier_block cpu_notifier; - struct workqueue_struct *wq; - struct parallel_data *pd; - cpumask_var_t cpumask; - struct mutex lock; - u8 flags; -#define PADATA_INIT 1 -#define PADATA_RESET 2 + struct notifier_block cpu_notifier; + struct workqueue_struct *wq; + struct parallel_data *pd; + struct padata_cpumask cpumask; + struct blocking_notifier_head cpumask_change_notifier; + struct kobject kobj; + struct mutex lock; + u8 flags; +#define PADATA_INIT 1 +#define PADATA_RESET 2 +#define PADATA_INVALID 4 }; -extern struct padata_instance *padata_alloc(const struct cpumask *cpumask, - struct workqueue_struct *wq); +extern struct padata_instance *padata_alloc_possible( + struct workqueue_struct *wq); +extern struct padata_instance *padata_alloc(struct workqueue_struct *wq, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask); extern void padata_free(struct padata_instance *pinst); extern int padata_do_parallel(struct padata_instance *pinst, struct padata_priv *padata, int cb_cpu); extern void padata_do_serial(struct padata_priv *padata); -extern int padata_set_cpumask(struct padata_instance *pinst, +extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); -extern int padata_add_cpu(struct padata_instance *pinst, int cpu); -extern int padata_remove_cpu(struct padata_instance *pinst, int cpu); -extern void padata_start(struct padata_instance *pinst); +extern int padata_set_cpumasks(struct padata_instance *pinst, + cpumask_var_t pcpumask, + cpumask_var_t cbcpumask); +extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask); +extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask); +extern int padata_start(struct padata_instance *pinst); extern void padata_stop(struct padata_instance *pinst); +extern int padata_register_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock); +extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock); #endif diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 5b59f35dcb8f..6fa317801e1c 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -128,7 +128,6 @@ enum pageflags { /* SLUB */ PG_slub_frozen = PG_active, - PG_slub_debug = PG_error, }; #ifndef __GENERATING_BOUNDS_H @@ -215,7 +214,6 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) __PAGEFLAG(SlobFree, slob_free) __PAGEFLAG(SlubFrozen, slub_frozen) -__PAGEFLAG(SlubDebug, slub_debug) /* * Private page markings that may be used by the filesystem that owns the page diff --git a/include/linux/pci.h b/include/linux/pci.h index 7cb00845f150..b1d17956a153 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -270,6 +270,8 @@ struct pci_dev { unsigned int d1_support:1; /* Low power state D1 is supported */ unsigned int d2_support:1; /* Low power state D2 is supported */ unsigned int no_d1d2:1; /* Only allow D0 and D3 */ + unsigned int mmio_always_on:1; /* disallow turning off io/mem + decoding during bar sizing */ unsigned int wakeup_prepared:1; unsigned int d3_delay; /* D3->D0 transition time in ms */ @@ -288,6 +290,7 @@ struct pci_dev { */ unsigned int irq; struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ + resource_size_t fw_addr[DEVICE_COUNT_RESOURCE]; /* FW-assigned addr */ /* These fields are used by common fixups */ unsigned int transparent:1; /* Transparent PCI bridge */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 3bedcc149c84..c81eec4d3c35 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1494,6 +1494,9 @@ #define PCI_DEVICE_ID_SBE_WANXL100 0x0301 #define PCI_DEVICE_ID_SBE_WANXL200 0x0302 #define PCI_DEVICE_ID_SBE_WANXL400 0x0104 +#define PCI_SUBDEVICE_ID_SBE_T3E3 0x0009 +#define PCI_SUBDEVICE_ID_SBE_2T3E3_P0 0x0901 +#define PCI_SUBDEVICE_ID_SBE_2T3E3_P1 0x0902 #define PCI_VENDOR_ID_TOSHIBA 0x1179 #define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1 0x0101 @@ -2054,7 +2057,6 @@ #define PCI_DEVICE_ID_NX2_57711E 0x1650 #define PCI_DEVICE_ID_TIGON3_5705 0x1653 #define PCI_DEVICE_ID_TIGON3_5705_2 0x1654 -#define PCI_DEVICE_ID_TIGON3_5720 0x1658 #define PCI_DEVICE_ID_TIGON3_5721 0x1659 #define PCI_DEVICE_ID_TIGON3_5722 0x165a #define PCI_DEVICE_ID_TIGON3_5723 0x165b @@ -2068,13 +2070,11 @@ #define PCI_DEVICE_ID_TIGON3_5754M 0x1672 #define PCI_DEVICE_ID_TIGON3_5755M 0x1673 #define PCI_DEVICE_ID_TIGON3_5756 0x1674 -#define PCI_DEVICE_ID_TIGON3_5750 0x1676 #define PCI_DEVICE_ID_TIGON3_5751 0x1677 #define PCI_DEVICE_ID_TIGON3_5715 0x1678 #define PCI_DEVICE_ID_TIGON3_5715S 0x1679 #define PCI_DEVICE_ID_TIGON3_5754 0x167a #define PCI_DEVICE_ID_TIGON3_5755 0x167b -#define PCI_DEVICE_ID_TIGON3_5750M 0x167c #define PCI_DEVICE_ID_TIGON3_5751M 0x167d #define PCI_DEVICE_ID_TIGON3_5751F 0x167e #define PCI_DEVICE_ID_TIGON3_5787F 0x167f @@ -2264,6 +2264,7 @@ #define PCI_DEVICE_ID_TDI_EHCI 0x0101 #define PCI_VENDOR_ID_FREESCALE 0x1957 +#define PCI_DEVICE_ID_MPC8308 0xc006 #define PCI_DEVICE_ID_MPC8315E 0x00b4 #define PCI_DEVICE_ID_MPC8315 0x00b5 #define PCI_DEVICE_ID_MPC8314E 0x00b6 @@ -2324,9 +2325,11 @@ #define PCI_DEVICE_ID_JMICRON_JMB361 0x2361 #define PCI_DEVICE_ID_JMICRON_JMB362 0x2362 #define PCI_DEVICE_ID_JMICRON_JMB363 0x2363 +#define PCI_DEVICE_ID_JMICRON_JMB364 0x2364 #define PCI_DEVICE_ID_JMICRON_JMB365 0x2365 #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 #define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 +#define PCI_DEVICE_ID_JMICRON_JMB369 0x2369 #define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 #define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382 #define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 @@ -2772,3 +2775,6 @@ #define PCI_DEVICE_ID_RME_DIGI32 0x9896 #define PCI_DEVICE_ID_RME_DIGI32_PRO 0x9897 #define PCI_DEVICE_ID_RME_DIGI32_8 0x9898 + +#define PCI_VENDOR_ID_XEN 0x5853 +#define PCI_DEVICE_ID_XEN_PLATFORM 0x0001 diff --git a/include/linux/percpu.h b/include/linux/percpu.h index d3a38d687104..b8b9084527b1 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -45,6 +45,16 @@ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) /* + * Percpu allocator can serve percpu allocations before slab is + * initialized which allows slab to depend on the percpu allocator. + * The following two parameters decide how much resource to + * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or + * larger than PERCPU_DYNAMIC_EARLY_SIZE. + */ +#define PERCPU_DYNAMIC_EARLY_SLOTS 128 +#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10) + +/* * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy * back on the first chunk for dynamic percpu allocation if arch is * manually allocating and mapping it for faster access (as a part of @@ -104,16 +114,11 @@ extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, int nr_units); extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); -extern struct pcpu_alloc_info * __init pcpu_build_alloc_info( - size_t reserved_size, ssize_t dyn_size, - size_t atom_size, - pcpu_fc_cpu_distance_fn_t cpu_distance_fn); - extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, void *base_addr); #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK -extern int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size, +extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn, pcpu_fc_alloc_fn_t alloc_fn, @@ -140,6 +145,7 @@ extern bool is_kernel_percpu_address(unsigned long addr); #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void __init setup_per_cpu_areas(void); #endif +extern void __init percpu_init_late(void); #else /* CONFIG_SMP */ @@ -153,6 +159,8 @@ static inline bool is_kernel_percpu_address(unsigned long addr) static inline void __init setup_per_cpu_areas(void) { } +static inline void __init percpu_init_late(void) { } + static inline void *pcpu_lpage_remapped(void *kaddr) { return NULL; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 5d0266d94985..716f99b682c1 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -214,8 +214,9 @@ struct perf_event_attr { * See also PERF_RECORD_MISC_EXACT_IP */ precise_ip : 2, /* skid constraint */ + mmap_data : 1, /* non-exec mmap data */ - __reserved_1 : 47; + __reserved_1 : 46; union { __u32 wakeup_events; /* wakeup every n events */ @@ -461,6 +462,7 @@ enum perf_callchain_context { #ifdef CONFIG_PERF_EVENTS # include <asm/perf_event.h> +# include <asm/local64.h> #endif struct perf_guest_info_callbacks { @@ -531,14 +533,16 @@ struct hw_perf_event { struct hrtimer hrtimer; }; #ifdef CONFIG_HAVE_HW_BREAKPOINT - /* breakpoint */ - struct arch_hw_breakpoint info; + struct { /* breakpoint */ + struct arch_hw_breakpoint info; + struct list_head bp_list; + }; #endif }; - atomic64_t prev_count; + local64_t prev_count; u64 sample_period; u64 last_period; - atomic64_t period_left; + local64_t period_left; u64 interrupts; u64 freq_time_stamp; @@ -548,7 +552,10 @@ struct hw_perf_event { struct perf_event; -#define PERF_EVENT_TXN_STARTED 1 +/* + * Common implementation detail of pmu::{start,commit,cancel}_txn + */ +#define PERF_EVENT_TXN 0x1 /** * struct pmu - generic performance monitoring unit @@ -562,14 +569,28 @@ struct pmu { void (*unthrottle) (struct perf_event *event); /* - * group events scheduling is treated as a transaction, - * add group events as a whole and perform one schedulability test. - * If test fails, roll back the whole group + * Group events scheduling is treated as a transaction, add group + * events as a whole and perform one schedulability test. If the test + * fails, roll back the whole group */ + /* + * Start the transaction, after this ->enable() doesn't need + * to do schedulability tests. + */ void (*start_txn) (const struct pmu *pmu); - void (*cancel_txn) (const struct pmu *pmu); + /* + * If ->start_txn() disabled the ->enable() schedulability test + * then ->commit_txn() is required to perform one. On success + * the transaction is closed. On error the transaction is kept + * open until ->cancel_txn() is called. + */ int (*commit_txn) (const struct pmu *pmu); + /* + * Will cancel the transaction, assumes ->disable() is called for + * each successfull ->enable() during the transaction. + */ + void (*cancel_txn) (const struct pmu *pmu); }; /** @@ -584,7 +605,9 @@ enum perf_event_active_state { struct file; -struct perf_mmap_data { +#define PERF_BUFFER_WRITABLE 0x01 + +struct perf_buffer { atomic_t refcount; struct rcu_head rcu_head; #ifdef CONFIG_PERF_USE_VMALLOC @@ -650,7 +673,8 @@ struct perf_event { enum perf_event_active_state state; unsigned int attach_state; - atomic64_t count; + local64_t count; + atomic64_t child_count; /* * These are the total time in nanoseconds that the event @@ -709,7 +733,7 @@ struct perf_event { atomic_t mmap_count; int mmap_locked; struct user_struct *mmap_user; - struct perf_mmap_data *data; + struct perf_buffer *buffer; /* poll related */ wait_queue_head_t waitq; @@ -807,7 +831,7 @@ struct perf_cpu_context { struct perf_output_handle { struct perf_event *event; - struct perf_mmap_data *data; + struct perf_buffer *buffer; unsigned long wakeup; unsigned long size; void *addr; @@ -910,8 +934,10 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); -extern void -perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); +#ifndef perf_arch_fetch_caller_regs +static inline void +perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } +#endif /* * Take a snapshot of the regs. Skip ip and frame pointer to @@ -921,31 +947,11 @@ perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); * - bp for callchains * - eflags, for future purposes, just in case */ -static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) +static inline void perf_fetch_caller_regs(struct pt_regs *regs) { - unsigned long ip; - memset(regs, 0, sizeof(*regs)); - switch (skip) { - case 1 : - ip = CALLER_ADDR0; - break; - case 2 : - ip = CALLER_ADDR1; - break; - case 3 : - ip = CALLER_ADDR2; - break; - case 4: - ip = CALLER_ADDR3; - break; - /* No need to support further for now */ - default: - ip = 0; - } - - return perf_arch_fetch_caller_regs(regs, ip, skip); + perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); } static inline void @@ -955,21 +961,14 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) struct pt_regs hot_regs; if (!regs) { - perf_fetch_caller_regs(&hot_regs, 1); + perf_fetch_caller_regs(&hot_regs); regs = &hot_regs; } __perf_sw_event(event_id, nr, nmi, regs, addr); } } -extern void __perf_event_mmap(struct vm_area_struct *vma); - -static inline void perf_event_mmap(struct vm_area_struct *vma) -{ - if (vma->vm_flags & VM_EXEC) - __perf_event_mmap(vma); -} - +extern void perf_event_mmap(struct vm_area_struct *vma); extern struct perf_guest_info_callbacks *perf_guest_cbs; extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); @@ -1001,7 +1000,7 @@ static inline bool perf_paranoid_kernel(void) extern void perf_event_init(void); extern void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, struct pt_regs *regs, - struct hlist_head *head); + struct hlist_head *head, int rctx); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags @@ -1068,7 +1067,7 @@ static inline void perf_event_disable(struct perf_event *event) { } #define perf_cpu_notifier(fn) \ do { \ static struct notifier_block fn##_nb __cpuinitdata = \ - { .notifier_call = fn, .priority = 20 }; \ + { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ (void *)(unsigned long)smp_processor_id()); \ fn(&fn##_nb, (unsigned long)CPU_STARTING, \ diff --git a/include/linux/phonet.h b/include/linux/phonet.h index e5126cff9b2a..24426c3d6b5a 100644 --- a/include/linux/phonet.h +++ b/include/linux/phonet.h @@ -56,7 +56,7 @@ struct phonethdr { __be16 pn_length; __u8 pn_robj; __u8 pn_sobj; -} __attribute__((packed)); +} __packed; /* Common Phonet payload header */ struct phonetmsg { @@ -98,7 +98,7 @@ struct sockaddr_pn { __u8 spn_dev; __u8 spn_resource; __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3]; -} __attribute__ ((packed)); +} __packed; /* Well known address */ #define PN_DEV_PC 0x10 diff --git a/include/linux/phy.h b/include/linux/phy.h index 987e111f7b11..6b0a782c6224 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -234,6 +234,8 @@ enum phy_state { PHY_RESUMING }; +struct sk_buff; + /* phy_device: An instance of a PHY * * drv: Pointer to the driver for this PHY instance @@ -402,6 +404,26 @@ struct phy_driver { /* Clears up any memory if needed */ void (*remove)(struct phy_device *phydev); + /* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */ + int (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr); + + /* + * Requests a Rx timestamp for 'skb'. If the skb is accepted, + * the phy driver promises to deliver it using netif_rx() as + * soon as a timestamp becomes available. One of the + * PTP_CLASS_ values is passed in 'type'. The function must + * return true if the skb is accepted for delivery. + */ + bool (*rxtstamp)(struct phy_device *dev, struct sk_buff *skb, int type); + + /* + * Requests a Tx timestamp for 'skb'. The phy driver promises + * to deliver it to the socket's error queue as soon as a + * timestamp becomes available. One of the PTP_CLASS_ values + * is passed in 'type'. + */ + void (*txtstamp)(struct phy_device *dev, struct sk_buff *skb, int type); + struct device_driver driver; }; #define to_phy_driver(d) container_of(d, struct phy_driver, driver) @@ -498,7 +520,7 @@ void phy_stop_machine(struct phy_device *phydev); int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); int phy_mii_ioctl(struct phy_device *phydev, - struct mii_ioctl_data *mii_data, int cmd); + struct ifreq *ifr, int cmd); int phy_start_interrupts(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id); diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 5417944d3687..d7ecad0093bb 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -43,10 +43,64 @@ extern struct resource *platform_get_resource_byname(struct platform_device *, u extern int platform_get_irq_byname(struct platform_device *, const char *); extern int platform_add_devices(struct platform_device **, int); -extern struct platform_device *platform_device_register_simple(const char *, int id, - const struct resource *, unsigned int); -extern struct platform_device *platform_device_register_data(struct device *, - const char *, int, const void *, size_t); +extern struct platform_device *platform_device_register_resndata( + struct device *parent, const char *name, int id, + const struct resource *res, unsigned int num, + const void *data, size_t size); + +/** + * platform_device_register_simple - add a platform-level device and its resources + * @name: base name of the device we're adding + * @id: instance id + * @res: set of resources that needs to be allocated for the device + * @num: number of resources + * + * This function creates a simple platform device that requires minimal + * resource and memory management. Canned release function freeing memory + * allocated for the device allows drivers using such devices to be + * unloaded without waiting for the last reference to the device to be + * dropped. + * + * This interface is primarily intended for use with legacy drivers which + * probe hardware directly. Because such drivers create sysfs device nodes + * themselves, rather than letting system infrastructure handle such device + * enumeration tasks, they don't fully conform to the Linux driver model. + * In particular, when such drivers are built as modules, they can't be + * "hotplugged". + * + * Returns &struct platform_device pointer on success, or ERR_PTR() on error. + */ +static inline struct platform_device *platform_device_register_simple( + const char *name, int id, + const struct resource *res, unsigned int num) +{ + return platform_device_register_resndata(NULL, name, id, + res, num, NULL, 0); +} + +/** + * platform_device_register_data - add a platform-level device with platform-specific data + * @parent: parent device for the device we're adding + * @name: base name of the device we're adding + * @id: instance id + * @data: platform specific data for this platform device + * @size: size of platform specific data + * + * This function creates a simple platform device that requires minimal + * resource and memory management. Canned release function freeing memory + * allocated for the device allows drivers using such devices to be + * unloaded without waiting for the last reference to the device to be + * dropped. + * + * Returns &struct platform_device pointer on success, or ERR_PTR() on error. + */ +static inline struct platform_device *platform_device_register_data( + struct device *parent, const char *name, int id, + const void *data, size_t size) +{ + return platform_device_register_resndata(parent, name, id, + NULL, 0, data, size); +} extern struct platform_device *platform_device_alloc(const char *name, int id); extern int platform_device_add_resources(struct platform_device *pdev, diff --git a/include/linux/plist.h b/include/linux/plist.h index 6898985e7b38..7254eda078e5 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -260,6 +260,23 @@ static inline int plist_node_empty(const struct plist_node *node) #endif /** + * plist_last_entry - get the struct for the last entry + * @head: the &struct plist_head pointer + * @type: the type of the struct this is embedded in + * @member: the name of the list_struct within the struct + */ +#ifdef CONFIG_DEBUG_PI_LIST +# define plist_last_entry(head, type, member) \ +({ \ + WARN_ON(plist_head_empty(head)); \ + container_of(plist_last(head), type, member); \ +}) +#else +# define plist_last_entry(head, type, member) \ + container_of(plist_last(head), type, member) +#endif + +/** * plist_first - return the first node (and thus, highest priority) * @head: the &struct plist_head pointer * @@ -271,4 +288,16 @@ static inline struct plist_node *plist_first(const struct plist_head *head) struct plist_node, plist.node_list); } +/** + * plist_last - return the last node (and thus, lowest priority) + * @head: the &struct plist_head pointer + * + * Assumes the plist is _not_ empty. + */ +static inline struct plist_node *plist_last(const struct plist_head *head) +{ + return list_entry(head->node_list.prev, + struct plist_node, plist.node_list); +} + #endif diff --git a/include/linux/pm.h b/include/linux/pm.h index 8e258c727971..52e8c55ff314 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -457,6 +457,7 @@ struct dev_pm_info { #ifdef CONFIG_PM_SLEEP struct list_head entry; struct completion completion; + unsigned long wakeup_count; #endif #ifdef CONFIG_PM_RUNTIME struct timer_list suspend_timer; @@ -476,9 +477,15 @@ struct dev_pm_info { enum rpm_request request; enum rpm_status runtime_status; int runtime_error; + unsigned long active_jiffies; + unsigned long suspended_jiffies; + unsigned long accounting_timestamp; #endif }; +extern void update_pm_runtime_accounting(struct device *dev); + + /* * The PM_EVENT_ messages are also used by drivers implementing the legacy * suspend framework, based on the ->suspend() and ->resume() callbacks common @@ -552,6 +559,11 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); } while (0) extern void device_pm_wait_for_dev(struct device *sub, struct device *dev); + +/* drivers/base/power/wakeup.c */ +extern void pm_wakeup_event(struct device *dev, unsigned int msec); +extern void pm_stay_awake(struct device *dev); +extern void pm_relax(void); #else /* !CONFIG_PM_SLEEP */ #define device_pm_lock() do {} while (0) @@ -565,6 +577,10 @@ static inline int dpm_suspend_start(pm_message_t state) #define suspend_report_result(fn, ret) do {} while (0) static inline void device_pm_wait_for_dev(struct device *a, struct device *b) {} + +static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} +static inline void pm_stay_awake(struct device *dev) {} +static inline void pm_relax(void) {} #endif /* !CONFIG_PM_SLEEP */ /* How to reorder dpm_list after device_move() */ diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h index 8ba440e5eb7f..77cbddb3784c 100644 --- a/include/linux/pm_qos_params.h +++ b/include/linux/pm_qos_params.h @@ -1,8 +1,10 @@ +#ifndef _LINUX_PM_QOS_PARAMS_H +#define _LINUX_PM_QOS_PARAMS_H /* interface for the pm_qos_power infrastructure of the linux kernel. * * Mark Gross <mgross@linux.intel.com> */ -#include <linux/list.h> +#include <linux/plist.h> #include <linux/notifier.h> #include <linux/miscdevice.h> @@ -14,9 +16,12 @@ #define PM_QOS_NUM_CLASSES 4 #define PM_QOS_DEFAULT_VALUE -1 -struct pm_qos_request_list; +struct pm_qos_request_list { + struct plist_node list; + int pm_qos_class; +}; -struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value); +void pm_qos_add_request(struct pm_qos_request_list *l, int pm_qos_class, s32 value); void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, s32 new_value); void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req); @@ -24,4 +29,6 @@ void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req); int pm_qos_request(int pm_qos_class); int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); +int pm_qos_request_active(struct pm_qos_request_list *req); +#endif diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 22d64c18056c..76aca48722ae 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -29,8 +29,11 @@ #ifdef CONFIG_PM -/* changes to device_may_wakeup take effect on the next pm state change. - * by default, devices should wakeup if they can. +/* Changes to device_may_wakeup take effect on the next pm state change. + * + * By default, most devices should leave wakeup disabled. The exceptions + * are devices that everyone expects to be wakeup sources: keyboards, + * power buttons, possibly network interfaces, etc. */ static inline void device_init_wakeup(struct device *dev, bool val) { @@ -59,7 +62,7 @@ static inline bool device_may_wakeup(struct device *dev) #else /* !CONFIG_PM */ -/* For some reason the next two routines work even without CONFIG_PM */ +/* For some reason the following routines work even without CONFIG_PM */ static inline void device_init_wakeup(struct device *dev, bool val) { dev->power.can_wakeup = val; @@ -67,6 +70,7 @@ static inline void device_init_wakeup(struct device *dev, bool val) static inline void device_set_wakeup_capable(struct device *dev, bool capable) { + dev->power.can_wakeup = capable; } static inline bool device_can_wakeup(struct device *dev) diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 7c4193eb0072..1bc1338b817b 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -414,6 +414,7 @@ struct pnp_protocol { int (*disable) (struct pnp_dev *dev); /* protocol specific suspend/resume */ + bool (*can_wakeup) (struct pnp_dev *dev); int (*suspend) (struct pnp_dev * dev, pm_message_t state); int (*resume) (struct pnp_dev * dev); diff --git a/include/linux/power/jz4740-battery.h b/include/linux/power/jz4740-battery.h new file mode 100644 index 000000000000..19c9610c720a --- /dev/null +++ b/include/linux/power/jz4740-battery.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2009, Jiejing Zhang <kzjeef@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __JZ4740_BATTERY_H +#define __JZ4740_BATTERY_H + +struct jz_battery_platform_data { + struct power_supply_info info; + int gpio_charge; /* GPIO port of Charger state */ + int gpio_charge_active_low; +}; + +#endif diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h new file mode 100644 index 000000000000..943a85ab0020 --- /dev/null +++ b/include/linux/ptp_classify.h @@ -0,0 +1,126 @@ +/* + * PTP 1588 support + * + * This file implements a BPF that recognizes PTP event messages. + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _PTP_CLASSIFY_H_ +#define _PTP_CLASSIFY_H_ + +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/filter.h> +#ifdef __KERNEL__ +#include <linux/in.h> +#else +#include <netinet/in.h> +#endif + +#define PTP_CLASS_NONE 0x00 /* not a PTP event message */ +#define PTP_CLASS_V1 0x01 /* protocol version 1 */ +#define PTP_CLASS_V2 0x02 /* protocol version 2 */ +#define PTP_CLASS_VMASK 0x0f /* max protocol version is 15 */ +#define PTP_CLASS_IPV4 0x10 /* event in an IPV4 UDP packet */ +#define PTP_CLASS_IPV6 0x20 /* event in an IPV6 UDP packet */ +#define PTP_CLASS_L2 0x30 /* event in a L2 packet */ +#define PTP_CLASS_VLAN 0x40 /* event in a VLAN tagged L2 packet */ +#define PTP_CLASS_PMASK 0xf0 /* mask for the packet type field */ + +#define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4) +#define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /*probably DNE*/ +#define PTP_CLASS_V2_IPV4 (PTP_CLASS_V2 | PTP_CLASS_IPV4) +#define PTP_CLASS_V2_IPV6 (PTP_CLASS_V2 | PTP_CLASS_IPV6) +#define PTP_CLASS_V2_L2 (PTP_CLASS_V2 | PTP_CLASS_L2) +#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) + +#define PTP_EV_PORT 319 + +#define OFF_ETYPE 12 +#define OFF_IHL 14 +#define OFF_FRAG 20 +#define OFF_PROTO4 23 +#define OFF_NEXT 6 +#define OFF_UDP_DST 2 + +#define IP6_HLEN 40 +#define UDP_HLEN 8 + +#define RELOFF_DST4 (ETH_HLEN + OFF_UDP_DST) +#define OFF_DST6 (ETH_HLEN + IP6_HLEN + OFF_UDP_DST) +#define OFF_PTP6 (ETH_HLEN + IP6_HLEN + UDP_HLEN) + +#define OP_AND (BPF_ALU | BPF_AND | BPF_K) +#define OP_JEQ (BPF_JMP | BPF_JEQ | BPF_K) +#define OP_JSET (BPF_JMP | BPF_JSET | BPF_K) +#define OP_LDB (BPF_LD | BPF_B | BPF_ABS) +#define OP_LDH (BPF_LD | BPF_H | BPF_ABS) +#define OP_LDHI (BPF_LD | BPF_H | BPF_IND) +#define OP_LDX (BPF_LDX | BPF_B | BPF_MSH) +#define OP_OR (BPF_ALU | BPF_OR | BPF_K) +#define OP_RETA (BPF_RET | BPF_A) +#define OP_RETK (BPF_RET | BPF_K) + +static inline int ptp_filter_init(struct sock_filter *f, int len) +{ + if (OP_LDH == f[0].code) + return sk_chk_filter(f, len); + else + return 0; +} + +#define PTP_FILTER \ + {OP_LDH, 0, 0, OFF_ETYPE }, /* */ \ + {OP_JEQ, 0, 12, ETH_P_IP }, /* f goto L20 */ \ + {OP_LDB, 0, 0, OFF_PROTO4 }, /* */ \ + {OP_JEQ, 0, 9, IPPROTO_UDP }, /* f goto L10 */ \ + {OP_LDH, 0, 0, OFF_FRAG }, /* */ \ + {OP_JSET, 7, 0, 0x1fff }, /* t goto L11 */ \ + {OP_LDX, 0, 0, OFF_IHL }, /* */ \ + {OP_LDHI, 0, 0, RELOFF_DST4 }, /* */ \ + {OP_JEQ, 0, 4, PTP_EV_PORT }, /* f goto L12 */ \ + {OP_LDHI, 0, 0, ETH_HLEN + UDP_HLEN }, /* */ \ + {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ + {OP_OR, 0, 0, PTP_CLASS_IPV4 }, /* */ \ + {OP_RETA, 0, 0, 0 }, /* */ \ +/*L1x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ +/*L20*/ {OP_JEQ, 0, 9, ETH_P_IPV6 }, /* f goto L40 */ \ + {OP_LDB, 0, 0, ETH_HLEN + OFF_NEXT }, /* */ \ + {OP_JEQ, 0, 6, IPPROTO_UDP }, /* f goto L30 */ \ + {OP_LDH, 0, 0, OFF_DST6 }, /* */ \ + {OP_JEQ, 0, 4, PTP_EV_PORT }, /* f goto L31 */ \ + {OP_LDH, 0, 0, OFF_PTP6 }, /* */ \ + {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ + {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \ + {OP_RETA, 0, 0, 0 }, /* */ \ +/*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ +/*L40*/ {OP_JEQ, 0, 6, ETH_P_8021Q }, /* f goto L50 */ \ + {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \ + {OP_JEQ, 0, 9, ETH_P_1588 }, /* f goto L60 */ \ + {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ + {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ + {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \ + {OP_RETA, 0, 0, 0 }, /* */ \ +/*L50*/ {OP_JEQ, 0, 4, ETH_P_1588 }, /* f goto L61 */ \ + {OP_LDH, 0, 0, ETH_HLEN }, /* */ \ + {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ + {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \ + {OP_RETA, 0, 0, 0 }, /* */ \ +/*L6x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, + +#endif diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index aa36793b48bd..d50ba858cfe0 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -28,6 +28,12 @@ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia) #if defined(CONFIG_QUOTA) +#define quota_error(sb, fmt, args...) \ + __quota_error((sb), __func__, fmt , ## args) + +extern void __quota_error(struct super_block *sb, const char *func, + const char *fmt, ...); + /* * declaration of quota_function calls in kernel. */ @@ -145,11 +151,6 @@ static inline bool sb_has_quota_active(struct super_block *sb, int type) !sb_has_quota_suspended(sb, type); } -static inline unsigned sb_any_quota_active(struct super_block *sb) -{ - return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb); -} - /* * Operations supported for diskquotas. */ @@ -194,11 +195,6 @@ static inline int sb_has_quota_active(struct super_block *sb, int type) return 0; } -static inline int sb_any_quota_active(struct super_block *sb) -{ - return 0; -} - static inline void dquot_initialize(struct inode *inode) { } @@ -270,7 +266,7 @@ static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr) static inline void dquot_alloc_space_nofail(struct inode *inode, qsize_t nr) { __dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN|DQUOT_SPACE_NOFAIL); - mark_inode_dirty(inode); + mark_inode_dirty_sync(inode); } static inline int dquot_alloc_space(struct inode *inode, qsize_t nr) @@ -279,7 +275,7 @@ static inline int dquot_alloc_space(struct inode *inode, qsize_t nr) ret = dquot_alloc_space_nodirty(inode, nr); if (!ret) - mark_inode_dirty(inode); + mark_inode_dirty_sync(inode); return ret; } @@ -309,7 +305,7 @@ static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr) ret = dquot_prealloc_block_nodirty(inode, nr); if (!ret) - mark_inode_dirty(inode); + mark_inode_dirty_sync(inode); return ret; } @@ -325,7 +321,7 @@ static inline int dquot_claim_block(struct inode *inode, qsize_t nr) ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits); if (!ret) - mark_inode_dirty(inode); + mark_inode_dirty_sync(inode); return ret; } @@ -337,7 +333,7 @@ static inline void dquot_free_space_nodirty(struct inode *inode, qsize_t nr) static inline void dquot_free_space(struct inode *inode, qsize_t nr) { dquot_free_space_nodirty(inode, nr); - mark_inode_dirty(inode); + mark_inode_dirty_sync(inode); } static inline void dquot_free_block_nodirty(struct inode *inode, qsize_t nr) diff --git a/include/linux/rar_register.h b/include/linux/rar_register.h new file mode 100644 index 000000000000..ffa805780f85 --- /dev/null +++ b/include/linux/rar_register.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General + * Public License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + * The full GNU General Public License is included in this + * distribution in the file called COPYING. + */ + + +#ifndef _RAR_REGISTER_H +#define _RAR_REGISTER_H + +#include <linux/types.h> + +/* following are used both in drivers as well as user space apps */ + +#define RAR_TYPE_VIDEO 0 +#define RAR_TYPE_AUDIO 1 +#define RAR_TYPE_IMAGE 2 +#define RAR_TYPE_DATA 3 + +#ifdef __KERNEL__ + +struct rar_device; + +int register_rar(int num, + int (*callback)(unsigned long data), unsigned long data); +void unregister_rar(int num); +int rar_get_address(int rar_index, dma_addr_t *start, dma_addr_t *end); +int rar_lock(int rar_index); + +#endif /* __KERNEL__ */ +#endif /* _RAR_REGISTER_H */ diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index fe1872e5b37e..7066acb2c530 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -110,7 +110,6 @@ struct rb_node struct rb_root { struct rb_node *rb_node; - void (*augment_cb)(struct rb_node *node); }; @@ -130,9 +129,7 @@ static inline void rb_set_color(struct rb_node *rb, int color) rb->rb_parent_color = (rb->rb_parent_color & ~1) | color; } -#define RB_ROOT (struct rb_root) { NULL, NULL, } -#define RB_AUGMENT_ROOT(x) (struct rb_root) { NULL, x} - +#define RB_ROOT (struct rb_root) { NULL, } #define rb_entry(ptr, type, member) container_of(ptr, type, member) #define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) @@ -142,6 +139,14 @@ static inline void rb_set_color(struct rb_node *rb, int color) extern void rb_insert_color(struct rb_node *, struct rb_root *); extern void rb_erase(struct rb_node *, struct rb_root *); +typedef void (*rb_augment_f)(struct rb_node *node, void *data); + +extern void rb_augment_insert(struct rb_node *node, + rb_augment_f func, void *data); +extern struct rb_node *rb_augment_erase_begin(struct rb_node *node); +extern void rb_augment_erase_end(struct rb_node *node, + rb_augment_f func, void *data); + /* Find logical next and previous nodes in a tree */ extern struct rb_node *rb_next(const struct rb_node *); extern struct rb_node *rb_prev(const struct rb_node *); diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index b653b4aaa8a6..9fbc54a2585d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -40,6 +40,7 @@ #include <linux/seqlock.h> #include <linux/lockdep.h> #include <linux/completion.h> +#include <linux/debugobjects.h> #ifdef CONFIG_RCU_TORTURE_TEST extern int rcutorture_runnable; /* for sysctl */ @@ -79,6 +80,16 @@ extern void rcu_init(void); (ptr)->next = NULL; (ptr)->func = NULL; \ } while (0) +/* + * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic + * initialization and destruction of rcu_head on the stack. rcu_head structures + * allocated dynamically in the heap or defined statically don't need any + * initialization. + */ +#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD +extern void init_rcu_head_on_stack(struct rcu_head *head); +extern void destroy_rcu_head_on_stack(struct rcu_head *head); +#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ static inline void init_rcu_head_on_stack(struct rcu_head *head) { } @@ -86,6 +97,7 @@ static inline void init_rcu_head_on_stack(struct rcu_head *head) static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { } +#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -517,4 +529,74 @@ extern void call_rcu(struct rcu_head *head, extern void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head)); +/* + * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally + * by call_rcu() and rcu callback execution, and are therefore not part of the + * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors. + */ + +#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD +# define STATE_RCU_HEAD_READY 0 +# define STATE_RCU_HEAD_QUEUED 1 + +extern struct debug_obj_descr rcuhead_debug_descr; + +static inline void debug_rcu_head_queue(struct rcu_head *head) +{ + debug_object_activate(head, &rcuhead_debug_descr); + debug_object_active_state(head, &rcuhead_debug_descr, + STATE_RCU_HEAD_READY, + STATE_RCU_HEAD_QUEUED); +} + +static inline void debug_rcu_head_unqueue(struct rcu_head *head) +{ + debug_object_active_state(head, &rcuhead_debug_descr, + STATE_RCU_HEAD_QUEUED, + STATE_RCU_HEAD_READY); + debug_object_deactivate(head, &rcuhead_debug_descr); +} +#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ +static inline void debug_rcu_head_queue(struct rcu_head *head) +{ +} + +static inline void debug_rcu_head_unqueue(struct rcu_head *head) +{ +} +#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ + +#ifndef CONFIG_PROVE_RCU +#define __do_rcu_dereference_check(c) do { } while (0) +#endif /* #ifdef CONFIG_PROVE_RCU */ + +#define __rcu_dereference_index_check(p, c) \ + ({ \ + typeof(p) _________p1 = ACCESS_ONCE(p); \ + __do_rcu_dereference_check(c); \ + smp_read_barrier_depends(); \ + (_________p1); \ + }) + +/** + * rcu_dereference_index_check() - rcu_dereference for indices with debug checking + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * Similar to rcu_dereference_check(), but omits the sparse checking. + * This allows rcu_dereference_index_check() to be used on integers, + * which can then be used as array indices. Attempting to use + * rcu_dereference_check() on an integer will give compiler warnings + * because the sparse address-space mechanism relies on dereferencing + * the RCU-protected pointer. Dereferencing integers is not something + * that even gcc will put up with. + * + * Note that this function does not implicitly check for RCU read-side + * critical sections. If this function gains lots of uses, it might + * make sense to provide versions for each flavor of RCU, but it does + * not make sense as of early 2010. + */ +#define rcu_dereference_index_check(p, c) \ + __rcu_dereference_index_check((p), (c)) + #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rds.h b/include/linux/rds.h index cab4994c2f63..24bce3ded9ea 100644 --- a/include/linux/rds.h +++ b/include/linux/rds.h @@ -100,7 +100,7 @@ struct rds_info_counter { u_int8_t name[32]; u_int64_t value; -} __attribute__((packed)); +} __packed; #define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 #define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02 @@ -115,7 +115,7 @@ struct rds_info_connection { __be32 faddr; u_int8_t transport[TRANSNAMSIZ]; /* null term ascii */ u_int8_t flags; -} __attribute__((packed)); +} __packed; struct rds_info_flow { __be32 laddr; @@ -123,7 +123,7 @@ struct rds_info_flow { u_int32_t bytes; __be16 lport; __be16 fport; -} __attribute__((packed)); +} __packed; #define RDS_INFO_MESSAGE_FLAG_ACK 0x01 #define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 @@ -136,7 +136,7 @@ struct rds_info_message { __be16 lport; __be16 fport; u_int8_t flags; -} __attribute__((packed)); +} __packed; struct rds_info_socket { u_int32_t sndbuf; @@ -146,7 +146,7 @@ struct rds_info_socket { __be16 connected_port; u_int32_t rcvbuf; u_int64_t inum; -} __attribute__((packed)); +} __packed; struct rds_info_tcp_socket { __be32 local_addr; @@ -158,7 +158,7 @@ struct rds_info_tcp_socket { u_int32_t last_sent_nxt; u_int32_t last_expected_una; u_int32_t last_seen_una; -} __attribute__((packed)); +} __packed; #define RDS_IB_GID_LEN 16 struct rds_info_rdma_connection { diff --git a/include/linux/regulator/tps6507x.h b/include/linux/regulator/tps6507x.h new file mode 100644 index 000000000000..4892f591bab1 --- /dev/null +++ b/include/linux/regulator/tps6507x.h @@ -0,0 +1,32 @@ +/* + * tps6507x.h -- Voltage regulation for the Texas Instruments TPS6507X + * + * Copyright (C) 2010 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef REGULATOR_TPS6507X +#define REGULATOR_TPS6507X + +/** + * tps6507x_reg_platform_data - platform data for tps6507x + * @defdcdc_default: Defines whether DCDC high or the low register controls + * output voltage by default. Valid for DCDC2 and DCDC3 outputs only. + */ +struct tps6507x_reg_platform_data { + bool defdcdc_default; +}; + +#endif diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 3b603f474186..ba394163dea1 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -360,7 +360,7 @@ int is_reiserfs_jr(struct reiserfs_super_block *rs); /* the spot for the super in versions 3.5 - 3.5.10 (inclusive) */ #define REISERFS_OLD_DISK_OFFSET_IN_BYTES (8 * 1024) -// reiserfs internal error code (used by search_by_key adn fix_nodes)) +/* reiserfs internal error code (used by search_by_key and fix_nodes)) */ #define CARRY_ON 0 #define REPEAT_SEARCH -1 #define IO_ERROR -2 diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index fbc8cb0d48c3..58d44491880f 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -282,6 +282,7 @@ enum rtattr_type_t { RTA_SESSION, /* no longer used */ RTA_MP_ALGO, /* no longer used */ RTA_TABLE, + RTA_MARK, __RTA_MAX }; diff --git a/include/linux/sched.h b/include/linux/sched.h index 747fcaedddb7..9591907c4f79 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -214,6 +214,7 @@ extern char ___assert_task_state[1 - 2*!!( #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) +#define task_is_dead(task) ((task)->exit_state != 0) #define task_is_stopped_or_traced(task) \ ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) #define task_contributes_to_load(task) \ @@ -271,19 +272,10 @@ extern int runqueue_is_locked(int cpu); extern cpumask_var_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) -extern int select_nohz_load_balancer(int cpu); -extern int get_nohz_load_balancer(void); -extern int nohz_ratelimit(int cpu); +extern void select_nohz_load_balancer(int stop_tick); +extern int get_nohz_timer_target(void); #else -static inline int select_nohz_load_balancer(int cpu) -{ - return 0; -} - -static inline int nohz_ratelimit(int cpu) -{ - return 0; -} +static inline void select_nohz_load_balancer(int stop_tick) { } #endif /* @@ -315,20 +307,16 @@ extern void scheduler_tick(void); extern void sched_show_task(struct task_struct *p); -#ifdef CONFIG_DETECT_SOFTLOCKUP -extern void softlockup_tick(void); +#ifdef CONFIG_LOCKUP_DETECTOR extern void touch_softlockup_watchdog(void); extern void touch_softlockup_watchdog_sync(void); extern void touch_all_softlockup_watchdogs(void); -extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos); +extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos); extern unsigned int softlockup_panic; extern int softlockup_thresh; #else -static inline void softlockup_tick(void) -{ -} static inline void touch_softlockup_watchdog(void) { } @@ -804,7 +792,7 @@ enum cpu_idle_type { #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ - +#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ enum powersavings_balance_level { @@ -839,6 +827,8 @@ static inline int sd_balance_for_package_power(void) return SD_PREFER_SIBLING; } +extern int __weak arch_sd_sibiling_asym_packing(void); + /* * Optimise SD flags for power savings: * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. @@ -860,7 +850,7 @@ struct sched_group { * CPU power of this group, SCHED_LOAD_SCALE being max power for a * single CPU. */ - unsigned int cpu_power; + unsigned int cpu_power, cpu_power_orig; /* * The CPUs this group covers. @@ -1696,6 +1686,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ @@ -1790,20 +1781,23 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) #endif /* - * Architectures can set this to 1 if they have specified - * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, - * but then during bootup it turns out that sched_clock() - * is reliable after all: + * Do not use outside of architecture code which knows its limitations. + * + * sched_clock() has no promise of monotonicity or bounded drift between + * CPUs, use (which you should not) requires disabling IRQs. + * + * Please use one of the three interfaces below. */ -#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -extern int sched_clock_stable; -#endif - -/* ftrace calls sched_clock() directly */ extern unsigned long long notrace sched_clock(void); +/* + * See the comment in kernel/sched_clock.c + */ +extern u64 cpu_clock(int cpu); +extern u64 local_clock(void); +extern u64 sched_clock_cpu(int cpu); + extern void sched_clock_init(void); -extern u64 sched_clock_cpu(int cpu); #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK static inline void sched_clock_tick(void) @@ -1818,17 +1812,19 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns) { } #else +/* + * Architectures can set this to 1 if they have specified + * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, + * but then during bootup it turns out that sched_clock() + * is reliable after all: + */ +extern int sched_clock_stable; + extern void sched_clock_tick(void); extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(u64 delta_ns); #endif -/* - * For kernel-internal use: high-speed (but slightly incorrect) per-cpu - * clock constructed from sched_clock(): - */ -extern unsigned long long cpu_clock(int cpu); - extern unsigned long long task_sched_runtime(struct task_struct *task); extern unsigned long long thread_group_sched_runtime(struct task_struct *task); @@ -2434,18 +2430,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ -#ifdef CONFIG_TRACING -extern void -__trace_special(void *__tr, void *__data, - unsigned long arg1, unsigned long arg2, unsigned long arg3); -#else -static inline void -__trace_special(void *__tr, void *__data, - unsigned long arg1, unsigned long arg2, unsigned long arg3) -{ -} -#endif - extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); diff --git a/include/linux/sctp.h b/include/linux/sctp.h index c20d3ce673c0..c11a28706fa4 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -61,7 +61,7 @@ typedef struct sctphdr { __be16 dest; __be32 vtag; __le32 checksum; -} __attribute__((packed)) sctp_sctphdr_t; +} __packed sctp_sctphdr_t; #ifdef __KERNEL__ #include <linux/skbuff.h> @@ -77,7 +77,7 @@ typedef struct sctp_chunkhdr { __u8 type; __u8 flags; __be16 length; -} __attribute__((packed)) sctp_chunkhdr_t; +} __packed sctp_chunkhdr_t; /* Section 3.2. Chunk Type Values. @@ -167,7 +167,7 @@ enum { SCTP_CHUNK_FLAG_T = 0x01 }; typedef struct sctp_paramhdr { __be16 type; __be16 length; -} __attribute__((packed)) sctp_paramhdr_t; +} __packed sctp_paramhdr_t; typedef enum { @@ -228,12 +228,12 @@ typedef struct sctp_datahdr { __be16 ssn; __be32 ppid; __u8 payload[0]; -} __attribute__((packed)) sctp_datahdr_t; +} __packed sctp_datahdr_t; typedef struct sctp_data_chunk { sctp_chunkhdr_t chunk_hdr; sctp_datahdr_t data_hdr; -} __attribute__((packed)) sctp_data_chunk_t; +} __packed sctp_data_chunk_t; /* DATA Chuck Specific Flags */ enum { @@ -259,78 +259,78 @@ typedef struct sctp_inithdr { __be16 num_inbound_streams; __be32 initial_tsn; __u8 params[0]; -} __attribute__((packed)) sctp_inithdr_t; +} __packed sctp_inithdr_t; typedef struct sctp_init_chunk { sctp_chunkhdr_t chunk_hdr; sctp_inithdr_t init_hdr; -} __attribute__((packed)) sctp_init_chunk_t; +} __packed sctp_init_chunk_t; /* Section 3.3.2.1. IPv4 Address Parameter (5) */ typedef struct sctp_ipv4addr_param { sctp_paramhdr_t param_hdr; struct in_addr addr; -} __attribute__((packed)) sctp_ipv4addr_param_t; +} __packed sctp_ipv4addr_param_t; /* Section 3.3.2.1. IPv6 Address Parameter (6) */ typedef struct sctp_ipv6addr_param { sctp_paramhdr_t param_hdr; struct in6_addr addr; -} __attribute__((packed)) sctp_ipv6addr_param_t; +} __packed sctp_ipv6addr_param_t; /* Section 3.3.2.1 Cookie Preservative (9) */ typedef struct sctp_cookie_preserve_param { sctp_paramhdr_t param_hdr; __be32 lifespan_increment; -} __attribute__((packed)) sctp_cookie_preserve_param_t; +} __packed sctp_cookie_preserve_param_t; /* Section 3.3.2.1 Host Name Address (11) */ typedef struct sctp_hostname_param { sctp_paramhdr_t param_hdr; uint8_t hostname[0]; -} __attribute__((packed)) sctp_hostname_param_t; +} __packed sctp_hostname_param_t; /* Section 3.3.2.1 Supported Address Types (12) */ typedef struct sctp_supported_addrs_param { sctp_paramhdr_t param_hdr; __be16 types[0]; -} __attribute__((packed)) sctp_supported_addrs_param_t; +} __packed sctp_supported_addrs_param_t; /* Appendix A. ECN Capable (32768) */ typedef struct sctp_ecn_capable_param { sctp_paramhdr_t param_hdr; -} __attribute__((packed)) sctp_ecn_capable_param_t; +} __packed sctp_ecn_capable_param_t; /* ADDIP Section 3.2.6 Adaptation Layer Indication */ typedef struct sctp_adaptation_ind_param { struct sctp_paramhdr param_hdr; __be32 adaptation_ind; -} __attribute__((packed)) sctp_adaptation_ind_param_t; +} __packed sctp_adaptation_ind_param_t; /* ADDIP Section 4.2.7 Supported Extensions Parameter */ typedef struct sctp_supported_ext_param { struct sctp_paramhdr param_hdr; __u8 chunks[0]; -} __attribute__((packed)) sctp_supported_ext_param_t; +} __packed sctp_supported_ext_param_t; /* AUTH Section 3.1 Random */ typedef struct sctp_random_param { sctp_paramhdr_t param_hdr; __u8 random_val[0]; -} __attribute__((packed)) sctp_random_param_t; +} __packed sctp_random_param_t; /* AUTH Section 3.2 Chunk List */ typedef struct sctp_chunks_param { sctp_paramhdr_t param_hdr; __u8 chunks[0]; -} __attribute__((packed)) sctp_chunks_param_t; +} __packed sctp_chunks_param_t; /* AUTH Section 3.3 HMAC Algorithm */ typedef struct sctp_hmac_algo_param { sctp_paramhdr_t param_hdr; __be16 hmac_ids[0]; -} __attribute__((packed)) sctp_hmac_algo_param_t; +} __packed sctp_hmac_algo_param_t; /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): * The INIT ACK chunk is used to acknowledge the initiation of an SCTP @@ -342,13 +342,13 @@ typedef sctp_init_chunk_t sctp_initack_chunk_t; typedef struct sctp_cookie_param { sctp_paramhdr_t p; __u8 body[0]; -} __attribute__((packed)) sctp_cookie_param_t; +} __packed sctp_cookie_param_t; /* Section 3.3.3.1 Unrecognized Parameters (8) */ typedef struct sctp_unrecognized_param { sctp_paramhdr_t param_hdr; sctp_paramhdr_t unrecognized; -} __attribute__((packed)) sctp_unrecognized_param_t; +} __packed sctp_unrecognized_param_t; @@ -363,7 +363,7 @@ typedef struct sctp_unrecognized_param { typedef struct sctp_gap_ack_block { __be16 start; __be16 end; -} __attribute__((packed)) sctp_gap_ack_block_t; +} __packed sctp_gap_ack_block_t; typedef __be32 sctp_dup_tsn_t; @@ -378,12 +378,12 @@ typedef struct sctp_sackhdr { __be16 num_gap_ack_blocks; __be16 num_dup_tsns; sctp_sack_variable_t variable[0]; -} __attribute__((packed)) sctp_sackhdr_t; +} __packed sctp_sackhdr_t; typedef struct sctp_sack_chunk { sctp_chunkhdr_t chunk_hdr; sctp_sackhdr_t sack_hdr; -} __attribute__((packed)) sctp_sack_chunk_t; +} __packed sctp_sack_chunk_t; /* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4): @@ -395,12 +395,12 @@ typedef struct sctp_sack_chunk { typedef struct sctp_heartbeathdr { sctp_paramhdr_t info; -} __attribute__((packed)) sctp_heartbeathdr_t; +} __packed sctp_heartbeathdr_t; typedef struct sctp_heartbeat_chunk { sctp_chunkhdr_t chunk_hdr; sctp_heartbeathdr_t hb_hdr; -} __attribute__((packed)) sctp_heartbeat_chunk_t; +} __packed sctp_heartbeat_chunk_t; /* For the abort and shutdown ACK we must carry the init tag in the @@ -409,7 +409,7 @@ typedef struct sctp_heartbeat_chunk { */ typedef struct sctp_abort_chunk { sctp_chunkhdr_t uh; -} __attribute__((packed)) sctp_abort_chunk_t; +} __packed sctp_abort_chunk_t; /* For the graceful shutdown we must carry the tag (in common header) @@ -417,12 +417,12 @@ typedef struct sctp_abort_chunk { */ typedef struct sctp_shutdownhdr { __be32 cum_tsn_ack; -} __attribute__((packed)) sctp_shutdownhdr_t; +} __packed sctp_shutdownhdr_t; struct sctp_shutdown_chunk_t { sctp_chunkhdr_t chunk_hdr; sctp_shutdownhdr_t shutdown_hdr; -} __attribute__ ((packed)); +} __packed; /* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */ @@ -430,12 +430,12 @@ typedef struct sctp_errhdr { __be16 cause; __be16 length; __u8 variable[0]; -} __attribute__((packed)) sctp_errhdr_t; +} __packed sctp_errhdr_t; typedef struct sctp_operr_chunk { sctp_chunkhdr_t chunk_hdr; sctp_errhdr_t err_hdr; -} __attribute__((packed)) sctp_operr_chunk_t; +} __packed sctp_operr_chunk_t; /* RFC 2960 3.3.10 - Operation Error * @@ -525,7 +525,7 @@ typedef struct sctp_ecnehdr { typedef struct sctp_ecne_chunk { sctp_chunkhdr_t chunk_hdr; sctp_ecnehdr_t ence_hdr; -} __attribute__((packed)) sctp_ecne_chunk_t; +} __packed sctp_ecne_chunk_t; /* RFC 2960. Appendix A. Explicit Congestion Notification. * Congestion Window Reduced (CWR) (13) @@ -537,7 +537,7 @@ typedef struct sctp_cwrhdr { typedef struct sctp_cwr_chunk { sctp_chunkhdr_t chunk_hdr; sctp_cwrhdr_t cwr_hdr; -} __attribute__((packed)) sctp_cwr_chunk_t; +} __packed sctp_cwr_chunk_t; /* PR-SCTP * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN) @@ -588,17 +588,17 @@ typedef struct sctp_cwr_chunk { struct sctp_fwdtsn_skip { __be16 stream; __be16 ssn; -} __attribute__((packed)); +} __packed; struct sctp_fwdtsn_hdr { __be32 new_cum_tsn; struct sctp_fwdtsn_skip skip[0]; -} __attribute((packed)); +} __packed; struct sctp_fwdtsn_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_fwdtsn_hdr fwdtsn_hdr; -} __attribute((packed)); +} __packed; /* ADDIP @@ -636,17 +636,17 @@ struct sctp_fwdtsn_chunk { typedef struct sctp_addip_param { sctp_paramhdr_t param_hdr; __be32 crr_id; -} __attribute__((packed)) sctp_addip_param_t; +} __packed sctp_addip_param_t; typedef struct sctp_addiphdr { __be32 serial; __u8 params[0]; -} __attribute__((packed)) sctp_addiphdr_t; +} __packed sctp_addiphdr_t; typedef struct sctp_addip_chunk { sctp_chunkhdr_t chunk_hdr; sctp_addiphdr_t addip_hdr; -} __attribute__((packed)) sctp_addip_chunk_t; +} __packed sctp_addip_chunk_t; /* AUTH * Section 4.1 Authentication Chunk (AUTH) @@ -701,11 +701,11 @@ typedef struct sctp_authhdr { __be16 shkey_id; __be16 hmac_id; __u8 hmac[0]; -} __attribute__((packed)) sctp_authhdr_t; +} __packed sctp_authhdr_t; typedef struct sctp_auth_chunk { sctp_chunkhdr_t chunk_hdr; sctp_authhdr_t auth_hdr; -} __attribute__((packed)) sctp_auth_chunk_t; +} __packed sctp_auth_chunk_t; #endif /* __LINUX_SCTP_H__ */ diff --git a/include/linux/security.h b/include/linux/security.h index 0c8819170463..723a93df756a 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -470,8 +470,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @path_truncate: * Check permission before truncating a file. * @path contains the path structure for the file. - * @length is the new length of the file. - * @time_attrs is the flags passed to do_truncate(). * Return 0 if permission is granted. * @inode_getattr: * Check permission before obtaining file attributes. @@ -1412,8 +1410,7 @@ struct security_operations { int (*path_rmdir) (struct path *dir, struct dentry *dentry); int (*path_mknod) (struct path *dir, struct dentry *dentry, int mode, unsigned int dev); - int (*path_truncate) (struct path *path, loff_t length, - unsigned int time_attrs); + int (*path_truncate) (struct path *path); int (*path_symlink) (struct path *dir, struct dentry *dentry, const char *old_name); int (*path_link) (struct dentry *old_dentry, struct path *new_dir, @@ -2806,8 +2803,7 @@ int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode); int security_path_rmdir(struct path *dir, struct dentry *dentry); int security_path_mknod(struct path *dir, struct dentry *dentry, int mode, unsigned int dev); -int security_path_truncate(struct path *path, loff_t length, - unsigned int time_attrs); +int security_path_truncate(struct path *path); int security_path_symlink(struct path *dir, struct dentry *dentry, const char *old_name); int security_path_link(struct dentry *old_dentry, struct path *new_dir, @@ -2841,8 +2837,7 @@ static inline int security_path_mknod(struct path *dir, struct dentry *dentry, return 0; } -static inline int security_path_truncate(struct path *path, loff_t length, - unsigned int time_attrs) +static inline int security_path_truncate(struct path *path) { return 0; } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f89e7fd59a4c..d20d9e7a9bbd 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -169,6 +169,7 @@ struct skb_shared_hwtstamps { * @software: generate software time stamp * @in_progress: device driver is going to provide * hardware time stamp + * @prevent_sk_orphan: make sk reference available on driver level * @flags: all shared_tx flags * * These flags are attached to packets as part of the @@ -178,7 +179,8 @@ union skb_shared_tx { struct { __u8 hardware:1, software:1, - in_progress:1; + in_progress:1, + prevent_sk_orphan:1; }; __u8 flags; }; @@ -202,10 +204,11 @@ struct skb_shared_info { */ atomic_t dataref; - skb_frag_t frags[MAX_SKB_FRAGS]; /* Intermediate layers must ensure that destructor_arg * remains valid until skb destructor */ void * destructor_arg; + /* must be last field, see pskb_expand_head() */ + skb_frag_t frags[MAX_SKB_FRAGS]; }; /* We divide dataref into two halves. The higher 16 bits hold references @@ -1414,12 +1417,14 @@ static inline int skb_network_offset(const struct sk_buff *skb) * * Various parts of the networking layer expect at least 32 bytes of * headroom, you should not reduce this. - * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span - * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes + * + * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) + * to reduce average number of cache lines per packet. + * get_rps_cpus() for example only access one 64 bytes aligned block : * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) */ #ifndef NET_SKB_PAD -#define NET_SKB_PAD 64 +#define NET_SKB_PAD max(32, L1_CACHE_BYTES) #endif extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); @@ -1931,6 +1936,36 @@ static inline ktime_t net_invalid_timestamp(void) return ktime_set(0, 0); } +extern void skb_timestamping_init(void); + +#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING + +extern void skb_clone_tx_timestamp(struct sk_buff *skb); +extern bool skb_defer_rx_timestamp(struct sk_buff *skb); + +#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ + +static inline void skb_clone_tx_timestamp(struct sk_buff *skb) +{ +} + +static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) +{ + return false; +} + +#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ + +/** + * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps + * + * @skb: clone of the the original outgoing packet + * @hwtstamps: hardware time stamps + * + */ +void skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); + /** * skb_tstamp_tx - queue clone of skb with send time stamps * @orig_skb: the original outgoing packet @@ -1945,6 +1980,28 @@ static inline ktime_t net_invalid_timestamp(void) extern void skb_tstamp_tx(struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps); +static inline void sw_tx_timestamp(struct sk_buff *skb) +{ + union skb_shared_tx *shtx = skb_tx(skb); + if (shtx->software && !shtx->in_progress) + skb_tstamp_tx(skb, NULL); +} + +/** + * skb_tx_timestamp() - Driver hook for transmit timestamping + * + * Ethernet MAC Drivers should call this function in their hard_xmit() + * function as soon as possible after giving the sk_buff to the MAC + * hardware, but before freeing the sk_buff. + * + * @skb: A socket buffer. + */ +static inline void skb_tx_timestamp(struct sk_buff *skb) +{ + skb_clone_tx_timestamp(skb); + sw_tx_timestamp(skb); +} + extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); extern __sum16 __skb_checksum_complete(struct sk_buff *skb); @@ -2132,7 +2189,8 @@ static inline bool skb_warn_if_lro(const struct sk_buff *skb) /* LRO sets gso_size but not gso_type, whereas if GSO is really * wanted then gso_type will be set. */ struct skb_shared_info *shinfo = skb_shinfo(skb); - if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) { + if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && + unlikely(shinfo->gso_type == 0)) { __skb_warn_lro_forwarding(skb); return true; } diff --git a/include/linux/slab.h b/include/linux/slab.h index 49d1247cd6d9..59260e21bdf5 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -268,7 +268,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, * allocator where we care about the real place the memory allocation * request comes from. */ -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ + (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); #define kmalloc_track_caller(size, flags) \ __kmalloc_track_caller(size, flags, _RET_IP_) @@ -286,7 +287,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); * standard allocator where we care about the real place the memory * allocation request comes from. */ -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ + (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node_track_caller(size, flags, node, \ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 1812dac8c496..1acfa73ce2ac 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -14,7 +14,8 @@ #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ #include <linux/compiler.h> -#include <linux/kmemtrace.h> + +#include <trace/events/kmem.h> #ifndef ARCH_KMALLOC_MINALIGN /* diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h deleted file mode 100644 index 13337bf6c3f5..000000000000 --- a/include/linux/slow-work.h +++ /dev/null @@ -1,163 +0,0 @@ -/* Worker thread pool for slow items, such as filesystem lookups or mkdirs - * - * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - * - * See Documentation/slow-work.txt - */ - -#ifndef _LINUX_SLOW_WORK_H -#define _LINUX_SLOW_WORK_H - -#ifdef CONFIG_SLOW_WORK - -#include <linux/sysctl.h> -#include <linux/timer.h> - -struct slow_work; -#ifdef CONFIG_SLOW_WORK_DEBUG -struct seq_file; -#endif - -/* - * The operations used to support slow work items - */ -struct slow_work_ops { - /* owner */ - struct module *owner; - - /* get a ref on a work item - * - return 0 if successful, -ve if not - */ - int (*get_ref)(struct slow_work *work); - - /* discard a ref to a work item */ - void (*put_ref)(struct slow_work *work); - - /* execute a work item */ - void (*execute)(struct slow_work *work); - -#ifdef CONFIG_SLOW_WORK_DEBUG - /* describe a work item for debugfs */ - void (*desc)(struct slow_work *work, struct seq_file *m); -#endif -}; - -/* - * A slow work item - * - A reference is held on the parent object by the thread pool when it is - * queued - */ -struct slow_work { - struct module *owner; /* the owning module */ - unsigned long flags; -#define SLOW_WORK_PENDING 0 /* item pending (further) execution */ -#define SLOW_WORK_EXECUTING 1 /* item currently executing */ -#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ -#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ -#define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */ -#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ - const struct slow_work_ops *ops; /* operations table for this item */ - struct list_head link; /* link in queue */ -#ifdef CONFIG_SLOW_WORK_DEBUG - struct timespec mark; /* jiffies at which queued or exec begun */ -#endif -}; - -struct delayed_slow_work { - struct slow_work work; - struct timer_list timer; -}; - -/** - * slow_work_init - Initialise a slow work item - * @work: The work item to initialise - * @ops: The operations to use to handle the slow work item - * - * Initialise a slow work item. - */ -static inline void slow_work_init(struct slow_work *work, - const struct slow_work_ops *ops) -{ - work->flags = 0; - work->ops = ops; - INIT_LIST_HEAD(&work->link); -} - -/** - * slow_work_init - Initialise a delayed slow work item - * @work: The work item to initialise - * @ops: The operations to use to handle the slow work item - * - * Initialise a delayed slow work item. - */ -static inline void delayed_slow_work_init(struct delayed_slow_work *dwork, - const struct slow_work_ops *ops) -{ - init_timer(&dwork->timer); - slow_work_init(&dwork->work, ops); -} - -/** - * vslow_work_init - Initialise a very slow work item - * @work: The work item to initialise - * @ops: The operations to use to handle the slow work item - * - * Initialise a very slow work item. This item will be restricted such that - * only a certain number of the pool threads will be able to execute items of - * this type. - */ -static inline void vslow_work_init(struct slow_work *work, - const struct slow_work_ops *ops) -{ - work->flags = 1 << SLOW_WORK_VERY_SLOW; - work->ops = ops; - INIT_LIST_HEAD(&work->link); -} - -/** - * slow_work_is_queued - Determine if a slow work item is on the work queue - * work: The work item to test - * - * Determine if the specified slow-work item is on the work queue. This - * returns true if it is actually on the queue. - * - * If the item is executing and has been marked for requeue when execution - * finishes, then false will be returned. - * - * Anyone wishing to wait for completion of execution can wait on the - * SLOW_WORK_EXECUTING bit. - */ -static inline bool slow_work_is_queued(struct slow_work *work) -{ - unsigned long flags = work->flags; - return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING); -} - -extern int slow_work_enqueue(struct slow_work *work); -extern void slow_work_cancel(struct slow_work *work); -extern int slow_work_register_user(struct module *owner); -extern void slow_work_unregister_user(struct module *owner); - -extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, - unsigned long delay); - -static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork) -{ - slow_work_cancel(&dwork->work); -} - -extern bool slow_work_sleep_till_thread_needed(struct slow_work *work, - signed long *_timeout); - -#ifdef CONFIG_SYSCTL -extern ctl_table slow_work_sysctls[]; -#endif - -#endif /* CONFIG_SLOW_WORK */ -#endif /* _LINUX_SLOW_WORK_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 4ba59cfc1f75..6447a723ecb1 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -10,9 +10,10 @@ #include <linux/gfp.h> #include <linux/workqueue.h> #include <linux/kobject.h> -#include <linux/kmemtrace.h> #include <linux/kmemleak.h> +#include <trace/events/kmem.h> + enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ diff --git a/include/linux/snmp.h b/include/linux/snmp.h index 52797714ade7..ebb0c80ffd6e 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -229,6 +229,7 @@ enum LINUX_MIB_TCPBACKLOGDROP, LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */ LINUX_MIB_TCPDEFERACCEPTDROP, + LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ __LINUX_MIB_MAX }; diff --git a/include/linux/socket.h b/include/linux/socket.h index 032a19eb61b1..a2fada9becb6 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -24,6 +24,9 @@ struct __kernel_sockaddr_storage { #include <linux/types.h> /* pid_t */ #include <linux/compiler.h> /* __user */ +struct pid; +struct cred; + #define __sockaddr_check_size(size) \ BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage))) @@ -309,6 +312,8 @@ struct ucred { #define IPX_TYPE 1 #ifdef __KERNEL__ +extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred); + extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, int offset, int len); diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h index b4ae570d3c98..92bd0839d5b4 100644 --- a/include/linux/spi/ads7846.h +++ b/include/linux/spi/ads7846.h @@ -48,11 +48,12 @@ struct ads7846_platform_data { * state if get_pendown_state == NULL */ int (*get_pendown_state)(void); - int (*filter_init) (struct ads7846_platform_data *pdata, + int (*filter_init) (const struct ads7846_platform_data *pdata, void **filter_data); int (*filter) (void *filter_data, int data_idx, int *val); void (*filter_cleanup)(void *filter_data); void (*wait_for_sync)(void); bool wakeup; + unsigned long irq_flags; }; diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index a2608bff9c78..623b704fdc42 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -167,7 +167,7 @@ struct ssb_device { * is an optimization. */ const struct ssb_bus_ops *ops; - struct device *dev; + struct device *dev, *dma_dev; struct ssb_bus *bus; struct ssb_device_id id; @@ -470,14 +470,6 @@ extern u32 ssb_dma_translation(struct ssb_device *dev); #define SSB_DMA_TRANSLATION_MASK 0xC0000000 #define SSB_DMA_TRANSLATION_SHIFT 30 -extern int ssb_dma_set_mask(struct ssb_device *dev, u64 mask); - -extern void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp_flags); -extern void ssb_dma_free_consistent(struct ssb_device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - gfp_t gfp_flags); - static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev) { #ifdef CONFIG_SSB_DEBUG @@ -486,155 +478,6 @@ static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev) #endif /* DEBUG */ } -static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - return pci_dma_mapping_error(dev->bus->host_pci, addr); -#endif - break; - case SSB_BUSTYPE_SSB: - return dma_mapping_error(dev->dev, addr); - default: - break; - } - __ssb_dma_not_implemented(dev); - return -ENOSYS; -} - -static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p, - size_t size, enum dma_data_direction dir) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - return pci_map_single(dev->bus->host_pci, p, size, dir); -#endif - break; - case SSB_BUSTYPE_SSB: - return dma_map_single(dev->dev, p, size, dir); - default: - break; - } - __ssb_dma_not_implemented(dev); - return 0; -} - -static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction dir) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir); - return; -#endif - break; - case SSB_BUSTYPE_SSB: - dma_unmap_single(dev->dev, dma_addr, size, dir); - return; - default: - break; - } - __ssb_dma_not_implemented(dev); -} - -static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev, - dma_addr_t dma_addr, - size_t size, - enum dma_data_direction dir) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, - size, dir); - return; -#endif - break; - case SSB_BUSTYPE_SSB: - dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir); - return; - default: - break; - } - __ssb_dma_not_implemented(dev); -} - -static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev, - dma_addr_t dma_addr, - size_t size, - enum dma_data_direction dir) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, - size, dir); - return; -#endif - break; - case SSB_BUSTYPE_SSB: - dma_sync_single_for_device(dev->dev, dma_addr, size, dir); - return; - default: - break; - } - __ssb_dma_not_implemented(dev); -} - -static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev, - dma_addr_t dma_addr, - unsigned long offset, - size_t size, - enum dma_data_direction dir) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - /* Just sync everything. That's all the PCI API can do. */ - pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, - offset + size, dir); - return; -#endif - break; - case SSB_BUSTYPE_SSB: - dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset, - size, dir); - return; - default: - break; - } - __ssb_dma_not_implemented(dev); -} - -static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev, - dma_addr_t dma_addr, - unsigned long offset, - size_t size, - enum dma_data_direction dir) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - /* Just sync everything. That's all the PCI API can do. */ - pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, - offset + size, dir); - return; -#endif - break; - case SSB_BUSTYPE_SSB: - dma_sync_single_range_for_device(dev->dev, dma_addr, offset, - size, dir); - return; - default: - break; - } - __ssb_dma_not_implemented(dev); -} - - #ifdef CONFIG_SSB_PCIHOST /* PCI-host wrapper driver */ extern int ssb_pcihost_register(struct pci_driver *driver); diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 87d7ec0bf779..5bbc447175dc 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -61,13 +61,7 @@ struct rpc_cred { /* * Client authentication handle */ -#define RPC_CREDCACHE_HASHBITS 4 -#define RPC_CREDCACHE_NR (1 << RPC_CREDCACHE_HASHBITS) -struct rpc_cred_cache { - struct hlist_head hashtable[RPC_CREDCACHE_NR]; - spinlock_t lock; -}; - +struct rpc_cred_cache; struct rpc_authops; struct rpc_auth { unsigned int au_cslack; /* call cred size estimate */ @@ -112,7 +106,7 @@ struct rpc_credops { void (*crdestroy)(struct rpc_cred *); int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); - void (*crbind)(struct rpc_task *, struct rpc_cred *, int); + struct rpc_cred * (*crbind)(struct rpc_task *, struct rpc_cred *, int); __be32 * (*crmarshal)(struct rpc_task *, __be32 *); int (*crrefresh)(struct rpc_task *); __be32 * (*crvalidate)(struct rpc_task *, __be32 *); @@ -125,11 +119,12 @@ struct rpc_credops { extern const struct rpc_authops authunix_ops; extern const struct rpc_authops authnull_ops; -void __init rpc_init_authunix(void); -void __init rpc_init_generic_auth(void); -void __init rpcauth_init_module(void); +int __init rpc_init_authunix(void); +int __init rpc_init_generic_auth(void); +int __init rpcauth_init_module(void); void __exit rpcauth_remove_module(void); void __exit rpc_destroy_generic_auth(void); +void rpc_destroy_authunix(void); struct rpc_cred * rpc_lookup_cred(void); struct rpc_cred * rpc_lookup_machine_cred(void); @@ -140,10 +135,8 @@ void rpcauth_release(struct rpc_auth *); struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int); void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); -void rpcauth_bindcred(struct rpc_task *, struct rpc_cred *, int); -void rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int); +struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int); void put_rpccred(struct rpc_cred *); -void rpcauth_unbindcred(struct rpc_task *); __be32 * rpcauth_marshcred(struct rpc_task *, __be32 *); __be32 * rpcauth_checkverf(struct rpc_task *, __be32 *); int rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, __be32 *data, void *obj); diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 6f52b4d7c447..7bf3e84b92f4 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -192,6 +192,7 @@ extern int cache_check(struct cache_detail *detail, extern void cache_flush(void); extern void cache_purge(struct cache_detail *detail); #define NEVER (0x7FFFFFFF) +extern void __init cache_initialize(void); extern int cache_register(struct cache_detail *cd); extern void cache_unregister(struct cache_detail *cd); diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 8ed9642a5a76..569dc722a600 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -131,6 +131,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); void rpc_shutdown_client(struct rpc_clnt *); void rpc_release_client(struct rpc_clnt *); +void rpc_task_release_client(struct rpc_task *); int rpcb_register(u32, u32, int, unsigned short); int rpcb_v4_register(const u32 program, const u32 version, @@ -148,8 +149,8 @@ int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags); struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags); -void rpc_restart_call_prepare(struct rpc_task *); -void rpc_restart_call(struct rpc_task *); +int rpc_restart_call_prepare(struct rpc_task *); +int rpc_restart_call(struct rpc_task *); void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); size_t rpc_max_payload(struct rpc_clnt *); void rpc_force_rebind(struct rpc_clnt *); diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 7be4f3a6d246..88513fd8e208 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -213,6 +213,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, const struct rpc_call_ops *ops); void rpc_put_task(struct rpc_task *); void rpc_exit_task(struct rpc_task *); +void rpc_exit(struct rpc_task *, int); void rpc_release_calldata(const struct rpc_call_ops *, void *); void rpc_killall_tasks(struct rpc_clnt *); void rpc_execute(struct rpc_task *); @@ -241,12 +242,6 @@ void rpc_destroy_mempool(void); extern struct workqueue_struct *rpciod_workqueue; void rpc_prepare_task(struct rpc_task *task); -static inline void rpc_exit(struct rpc_task *task, int status) -{ - task->tk_status = status; - task->tk_action = rpc_exit_task; -} - static inline int rpc_wait_for_completion_task(struct rpc_task *task) { return __rpc_wait_for_completion_task(task, NULL); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index b51470302399..ff5a77b28c50 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -64,6 +64,7 @@ struct rpc_rqst { * This is the private part */ struct rpc_task * rq_task; /* RPC task data */ + struct rpc_cred * rq_cred; /* Bound cred */ __be32 rq_xid; /* request XID */ int rq_cong; /* has incremented xprt->cong */ u32 rq_seqno; /* gss seq no. used on req. */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index bc7d6bb4cd8e..4af270ec2204 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -61,14 +61,15 @@ typedef int __bitwise suspend_state_t; * before device drivers' late suspend callbacks are executed. It returns * 0 on success or a negative error code otherwise, in which case the * system cannot enter the desired sleep state (@prepare_late(), @enter(), - * @wake(), and @finish() will not be called in that case). + * and @wake() will not be called in that case). * * @prepare_late: Finish preparing the platform for entering the system sleep * state indicated by @begin(). * @prepare_late is called before disabling nonboot CPUs and after * device drivers' late suspend callbacks have been executed. It returns * 0 on success or a negative error code otherwise, in which case the - * system cannot enter the desired sleep state (@enter() and @wake()). + * system cannot enter the desired sleep state (@enter() will not be + * executed). * * @enter: Enter the system sleep state indicated by @begin() or represented by * the argument if @begin() is not implemented. @@ -81,14 +82,15 @@ typedef int __bitwise suspend_state_t; * resume callbacks are executed. * This callback is optional, but should be implemented by the platforms * that implement @prepare_late(). If implemented, it is always called - * after @enter(), even if @enter() fails. + * after @prepare_late and @enter(), even if one of them fails. * * @finish: Finish wake-up of the platform. * @finish is called right prior to calling device drivers' regular suspend * callbacks. * This callback is optional, but should be implemented by the platforms * that implement @prepare(). If implemented, it is always called after - * @enter() and @wake(), if implemented, even if any of them fails. + * @enter() and @wake(), even if any of them fails. It is executed after + * a failing @prepare. * * @end: Called by the PM core right after resuming devices, to indicate to * the platform that the system has returned to the working state or @@ -286,6 +288,13 @@ extern int unregister_pm_notifier(struct notifier_block *nb); { .notifier_call = fn, .priority = pri }; \ register_pm_notifier(&fn##_nb); \ } + +/* drivers/base/power/wakeup.c */ +extern bool events_check_enabled; + +extern bool pm_check_wakeup_events(void); +extern bool pm_get_wakeup_count(unsigned long *count); +extern bool pm_save_wakeup_count(unsigned long count); #else /* !CONFIG_PM_SLEEP */ static inline int register_pm_notifier(struct notifier_block *nb) diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 81a4e213c6cf..8c0e349f4a6c 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -23,6 +23,29 @@ extern int swiotlb_force; #define IO_TLB_SHIFT 11 extern void swiotlb_init(int verbose); +extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); + +/* + * Enumeration for sync targets + */ +enum dma_sync_target { + SYNC_FOR_CPU = 0, + SYNC_FOR_DEVICE = 1, +}; +extern void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, + phys_addr_t phys, size_t size, + enum dma_data_direction dir); + +extern void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, + size_t size, enum dma_data_direction dir); + +extern void swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, + size_t size, enum dma_data_direction dir, + enum dma_sync_target target); + +/* Accessory functions. */ +extern void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, + enum dma_data_direction dir); extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, @@ -42,11 +65,11 @@ extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, - int direction); + enum dma_data_direction dir); extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, - int direction); + enum dma_data_direction dir); extern int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 7f614ce274a9..a6bfd1367d2a 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -124,7 +124,8 @@ extern struct trace_event_functions enter_syscall_print_funcs; extern struct trace_event_functions exit_syscall_print_funcs; #define SYSCALL_TRACE_ENTER_EVENT(sname) \ - static struct syscall_metadata __syscall_meta_##sname; \ + static struct syscall_metadata \ + __attribute__((__aligned__(4))) __syscall_meta_##sname; \ static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_enter_##sname; \ static struct ftrace_event_call __used \ @@ -138,7 +139,8 @@ extern struct trace_event_functions exit_syscall_print_funcs; } #define SYSCALL_TRACE_EXIT_EVENT(sname) \ - static struct syscall_metadata __syscall_meta_##sname; \ + static struct syscall_metadata \ + __attribute__((__aligned__(4))) __syscall_meta_##sname; \ static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_exit_##sname; \ static struct ftrace_event_call __used \ @@ -165,7 +167,6 @@ extern struct trace_event_functions exit_syscall_print_funcs; .enter_event = &event_enter_##sname, \ .exit_event = &event_exit_##sname, \ .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \ - .exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \ }; #define SYSCALL_DEFINE0(sname) \ @@ -180,7 +181,6 @@ extern struct trace_event_functions exit_syscall_print_funcs; .enter_event = &event_enter__##sname, \ .exit_event = &event_exit__##sname, \ .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \ - .exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \ }; \ asmlinkage long sys_##sname(void) #else diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index f2694eb4dd3d..3c92121ba9af 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -22,14 +22,8 @@ struct kobject; struct module; enum kobj_ns_type; -/* FIXME - * The *owner field is no longer used. - * x86 tree has been cleaned up. The owner - * attribute is still left for other arches. - */ struct attribute { const char *name; - struct module *owner; mode_t mode; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lock_class_key *key; @@ -136,8 +130,8 @@ int __must_check sysfs_create_file(struct kobject *kobj, const struct attribute *attr); int __must_check sysfs_create_files(struct kobject *kobj, const struct attribute **attr); -int __must_check sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, - mode_t mode); +int __must_check sysfs_chmod_file(struct kobject *kobj, + const struct attribute *attr, mode_t mode); void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr); void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr); @@ -225,7 +219,7 @@ static inline int sysfs_create_files(struct kobject *kobj, } static inline int sysfs_chmod_file(struct kobject *kobj, - struct attribute *attr, mode_t mode) + const struct attribute *attr, mode_t mode) { return 0; } diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h index 4496322e28dd..609e8ca5f534 100644 --- a/include/linux/sysrq.h +++ b/include/linux/sysrq.h @@ -45,6 +45,7 @@ struct sysrq_key_op { */ void handle_sysrq(int key, struct tty_struct *tty); +void __handle_sysrq(int key, struct tty_struct *tty, int check_mask); int register_sysrq_key(int key, struct sysrq_key_op *op); int unregister_sysrq_key(int key, struct sysrq_key_op *op); struct sysrq_key_op *__sysrq_get_key_op(int key); diff --git a/include/linux/time.h b/include/linux/time.h index ea3559f0b3f2..cb34e35fabac 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -76,9 +76,25 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon, const unsigned int min, const unsigned int sec); extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); + +/* + * timespec_add_safe assumes both values are positive and checks + * for overflow. It will return TIME_T_MAX if the reutrn would be + * smaller then either of the arguments. + */ extern struct timespec timespec_add_safe(const struct timespec lhs, const struct timespec rhs); + +static inline struct timespec timespec_add(struct timespec lhs, + struct timespec rhs) +{ + struct timespec ts_delta; + set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec, + lhs.tv_nsec + rhs.tv_nsec); + return ts_delta; +} + /* * sub = lhs - rhs, in normalized form */ @@ -97,8 +113,6 @@ static inline struct timespec timespec_sub(struct timespec lhs, #define timespec_valid(ts) \ (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) -extern struct timespec xtime; -extern struct timespec wall_to_monotonic; extern seqlock_t xtime_lock; extern void read_persistent_clock(struct timespec *ts); @@ -110,7 +124,8 @@ extern int timekeeping_suspended; unsigned long get_seconds(void); struct timespec current_kernel_time(void); -struct timespec __current_kernel_time(void); /* does not hold xtime_lock */ +struct timespec __current_kernel_time(void); /* does not take xtime_lock */ +struct timespec __get_wall_to_monotonic(void); /* does not take xtime_lock */ struct timespec get_monotonic_coarse(void); #define CURRENT_TIME (current_kernel_time()) diff --git a/include/linux/timer.h b/include/linux/timer.h index ea965b857a50..38cf093ef62c 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -100,6 +100,13 @@ void init_timer_deferrable_key(struct timer_list *timer, setup_timer_on_stack_key((timer), #timer, &__key, \ (fn), (data)); \ } while (0) +#define setup_deferrable_timer_on_stack(timer, fn, data) \ + do { \ + static struct lock_class_key __key; \ + setup_deferrable_timer_on_stack_key((timer), #timer, \ + &__key, (fn), \ + (data)); \ + } while (0) #else #define init_timer(timer)\ init_timer_key((timer), NULL, NULL) @@ -111,6 +118,8 @@ void init_timer_deferrable_key(struct timer_list *timer, setup_timer_key((timer), NULL, NULL, (fn), (data)) #define setup_timer_on_stack(timer, fn, data)\ setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data)) +#define setup_deferrable_timer_on_stack(timer, fn, data)\ + setup_deferrable_timer_on_stack_key((timer), NULL, NULL, (fn), (data)) #endif #ifdef CONFIG_DEBUG_OBJECTS_TIMERS @@ -150,6 +159,12 @@ static inline void setup_timer_on_stack_key(struct timer_list *timer, init_timer_on_stack_key(timer, name, key); } +extern void setup_deferrable_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key, + void (*function)(unsigned long), + unsigned long data); + /** * timer_pending - is a timer pending? * @timer: the timer in question diff --git a/include/linux/topology.h b/include/linux/topology.h index c44df50a05ab..b572e432d2f3 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -103,6 +103,7 @@ int arch_update_cpu_topology(void); | 1*SD_SHARE_PKG_RESOURCES \ | 0*SD_SERIALIZE \ | 0*SD_PREFER_SIBLING \ + | arch_sd_sibling_asym_packing() \ , \ .last_balance = jiffies, \ .balance_interval = 1, \ diff --git a/include/linux/tty.h b/include/linux/tty.h index 931078b73226..7802a243ee13 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -552,6 +552,9 @@ static inline void tty_audit_push_task(struct task_struct *tsk, } #endif +/* tty_io.c */ +extern int __init tty_init(void); + /* tty_ioctl.c */ extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); diff --git a/include/linux/types.h b/include/linux/types.h index 23d237a075e2..01a082f56ef4 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -8,7 +8,10 @@ #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] - +#else +#ifndef __EXPORTED_HEADERS__ +#warning "Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders" +#endif /* __EXPORTED_HEADERS__ */ #endif #include <linux/posix_types.h> @@ -197,6 +200,18 @@ typedef struct { } atomic64_t; #endif +struct list_head { + struct list_head *next, *prev; +}; + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + struct ustat { __kernel_daddr_t f_tfree; __kernel_ino_t f_tinode; diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h new file mode 100644 index 000000000000..fa261a0da280 --- /dev/null +++ b/include/linux/u64_stats_sync.h @@ -0,0 +1,140 @@ +#ifndef _LINUX_U64_STATS_SYNC_H +#define _LINUX_U64_STATS_SYNC_H + +/* + * To properly implement 64bits network statistics on 32bit and 64bit hosts, + * we provide a synchronization point, that is a noop on 64bit or UP kernels. + * + * Key points : + * 1) Use a seqcount on SMP 32bits, with low overhead. + * 2) Whole thing is a noop on 64bit arches or UP kernels. + * 3) Write side must ensure mutual exclusion or one seqcount update could + * be lost, thus blocking readers forever. + * If this synchronization point is not a mutex, but a spinlock or + * spinlock_bh() or disable_bh() : + * 3.1) Write side should not sleep. + * 3.2) Write side should not allow preemption. + * 3.3) If applicable, interrupts should be disabled. + * + * 4) If reader fetches several counters, there is no guarantee the whole values + * are consistent (remember point 1) : this is a noop on 64bit arches anyway) + * + * 5) readers are allowed to sleep or be preempted/interrupted : They perform + * pure reads. But if they have to fetch many values, it's better to not allow + * preemptions/interruptions to avoid many retries. + * + * 6) If counter might be written by an interrupt, readers should block interrupts. + * (On UP, there is no seqcount_t protection, a reader allowing interrupts could + * read partial values) + * + * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and + * u64_stats_fetch_retry_bh() helpers + * + * Usage : + * + * Stats producer (writer) should use following template granted it already got + * an exclusive access to counters (a lock is already taken, or per cpu + * data is used [in a non preemptable context]) + * + * spin_lock_bh(...) or other synchronization to get exclusive access + * ... + * u64_stats_update_begin(&stats->syncp); + * stats->bytes64 += len; // non atomic operation + * stats->packets64++; // non atomic operation + * u64_stats_update_end(&stats->syncp); + * + * While a consumer (reader) should use following template to get consistent + * snapshot for each variable (but no guarantee on several ones) + * + * u64 tbytes, tpackets; + * unsigned int start; + * + * do { + * start = u64_stats_fetch_begin(&stats->syncp); + * tbytes = stats->bytes64; // non atomic operation + * tpackets = stats->packets64; // non atomic operation + * } while (u64_stats_fetch_retry(&stats->syncp, start)); + * + * + * Example of use in drivers/net/loopback.c, using per_cpu containers, + * in BH disabled context. + */ +#include <linux/seqlock.h> + +struct u64_stats_sync { +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + seqcount_t seq; +#endif +}; + +static void inline u64_stats_update_begin(struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + write_seqcount_begin(&syncp->seq); +#endif +} + +static void inline u64_stats_update_end(struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + write_seqcount_end(&syncp->seq); +#endif +} + +static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + return read_seqcount_begin(&syncp->seq); +#else +#if BITS_PER_LONG==32 + preempt_disable(); +#endif + return 0; +#endif +} + +static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, + unsigned int start) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + return read_seqcount_retry(&syncp->seq, start); +#else +#if BITS_PER_LONG==32 + preempt_enable(); +#endif + return false; +#endif +} + +/* + * In case softirq handlers can update u64 counters, readers can use following helpers + * - SMP 32bit arches use seqcount protection, irq safe. + * - UP 32bit must disable BH. + * - 64bit have no problem atomically reading u64 values, irq safe. + */ +static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + return read_seqcount_begin(&syncp->seq); +#else +#if BITS_PER_LONG==32 + local_bh_disable(); +#endif + return 0; +#endif +} + +static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, + unsigned int start) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + return read_seqcount_retry(&syncp->seq, start); +#else +#if BITS_PER_LONG==32 + local_bh_enable(); +#endif + return false; +#endif +} + +#endif /* _LINUX_U64_STATS_SYNC_H */ diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h index 383b94ba8c20..964cb603f7c7 100644 --- a/include/linux/usb/audio-v2.h +++ b/include/linux/usb/audio-v2.h @@ -18,6 +18,21 @@ /* v1.0 and v2.0 of this standard have many things in common. For the rest * of the definitions, please refer to audio.h */ +/* + * bmControl field decoders + * + * From the USB Audio spec v2.0: + * + * bmaControls() is a (ch+1)-element array of 4-byte bitmaps, + * each containing a set of bit pairs. If a Control is present, + * it must be Host readable. If a certain Control is not + * present then the bit pair must be set to 0b00. + * If a Control is present but read-only, the bit pair must be + * set to 0b01. If a Control is also Host programmable, the bit + * pair must be set to 0b11. The value 0b10 is not allowed. + * + */ + static inline bool uac2_control_is_readable(u32 bmControls, u8 control) { return (bmControls >> (control * 2)) & 0x1; @@ -121,7 +136,7 @@ struct uac2_feature_unit_descriptor { /* 4.9.2 Class-Specific AS Interface Descriptor */ -struct uac_as_header_descriptor_v2 { +struct uac2_as_header_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h index c51200c715e5..a54b8255d75f 100644 --- a/include/linux/usb/audio.h +++ b/include/linux/usb/audio.h @@ -39,8 +39,8 @@ #define UAC_MIXER_UNIT 0x04 #define UAC_SELECTOR_UNIT 0x05 #define UAC_FEATURE_UNIT 0x06 -#define UAC_PROCESSING_UNIT_V1 0x07 -#define UAC_EXTENSION_UNIT_V1 0x08 +#define UAC1_PROCESSING_UNIT 0x07 +#define UAC1_EXTENSION_UNIT 0x08 /* A.6 Audio Class-Specific AS Interface Descriptor Subtypes */ #define UAC_AS_GENERAL 0x01 @@ -151,7 +151,7 @@ /* Terminal Control Selectors */ /* 4.3.2 Class-Specific AC Interface Descriptor */ -struct uac_ac_header_descriptor_v1 { +struct uac1_ac_header_descriptor { __u8 bLength; /* 8 + n */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* UAC_MS_HEADER */ @@ -165,7 +165,7 @@ struct uac_ac_header_descriptor_v1 { /* As above, but more useful for defining your own descriptors: */ #define DECLARE_UAC_AC_HEADER_DESCRIPTOR(n) \ -struct uac_ac_header_descriptor_v1_##n { \ +struct uac1_ac_header_descriptor_##n { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ @@ -205,7 +205,7 @@ struct uac_input_terminal_descriptor { #define UAC_TERMINAL_CS_COPY_PROTECT_CONTROL 0x01 /* 4.3.2.2 Output Terminal Descriptor */ -struct uac_output_terminal_descriptor_v1 { +struct uac1_output_terminal_descriptor { __u8 bLength; /* in bytes: 9 */ __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ __u8 bDescriptorSubtype; /* OUTPUT_TERMINAL descriptor subtype */ @@ -395,7 +395,7 @@ static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_desc } /* 4.5.2 Class-Specific AS Interface Descriptor */ -struct uac_as_header_descriptor_v1 { +struct uac1_as_header_descriptor { __u8 bLength; /* in bytes: 7 */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* AS_GENERAL */ diff --git a/include/linux/usb/video.h b/include/linux/usb/video.h index be436d9ee479..3b3b95e01f71 100644 --- a/include/linux/usb/video.h +++ b/include/linux/usb/video.h @@ -160,5 +160,409 @@ #define UVC_STATUS_TYPE_CONTROL 1 #define UVC_STATUS_TYPE_STREAMING 2 +/* 2.4.3.3. Payload Header Information */ +#define UVC_STREAM_EOH (1 << 7) +#define UVC_STREAM_ERR (1 << 6) +#define UVC_STREAM_STI (1 << 5) +#define UVC_STREAM_RES (1 << 4) +#define UVC_STREAM_SCR (1 << 3) +#define UVC_STREAM_PTS (1 << 2) +#define UVC_STREAM_EOF (1 << 1) +#define UVC_STREAM_FID (1 << 0) + +/* 4.1.2. Control Capabilities */ +#define UVC_CONTROL_CAP_GET (1 << 0) +#define UVC_CONTROL_CAP_SET (1 << 1) +#define UVC_CONTROL_CAP_DISABLED (1 << 2) +#define UVC_CONTROL_CAP_AUTOUPDATE (1 << 3) +#define UVC_CONTROL_CAP_ASYNCHRONOUS (1 << 4) + +/* ------------------------------------------------------------------------ + * UVC structures + */ + +/* All UVC descriptors have these 3 fields at the beginning */ +struct uvc_descriptor_header { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; +} __attribute__((packed)); + +/* 3.7.2. Video Control Interface Header Descriptor */ +struct uvc_header_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u16 bcdUVC; + __u16 wTotalLength; + __u32 dwClockFrequency; + __u8 bInCollection; + __u8 baInterfaceNr[]; +} __attribute__((__packed__)); + +#define UVC_DT_HEADER_SIZE(n) (12+(n)) + +#define UVC_HEADER_DESCRIPTOR(n) \ + uvc_header_descriptor_##n + +#define DECLARE_UVC_HEADER_DESCRIPTOR(n) \ +struct UVC_HEADER_DESCRIPTOR(n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u16 bcdUVC; \ + __u16 wTotalLength; \ + __u32 dwClockFrequency; \ + __u8 bInCollection; \ + __u8 baInterfaceNr[n]; \ +} __attribute__ ((packed)) + +/* 3.7.2.1. Input Terminal Descriptor */ +struct uvc_input_terminal_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bTerminalID; + __u16 wTerminalType; + __u8 bAssocTerminal; + __u8 iTerminal; +} __attribute__((__packed__)); + +#define UVC_DT_INPUT_TERMINAL_SIZE 8 + +/* 3.7.2.2. Output Terminal Descriptor */ +struct uvc_output_terminal_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bTerminalID; + __u16 wTerminalType; + __u8 bAssocTerminal; + __u8 bSourceID; + __u8 iTerminal; +} __attribute__((__packed__)); + +#define UVC_DT_OUTPUT_TERMINAL_SIZE 9 + +/* 3.7.2.3. Camera Terminal Descriptor */ +struct uvc_camera_terminal_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bTerminalID; + __u16 wTerminalType; + __u8 bAssocTerminal; + __u8 iTerminal; + __u16 wObjectiveFocalLengthMin; + __u16 wObjectiveFocalLengthMax; + __u16 wOcularFocalLength; + __u8 bControlSize; + __u8 bmControls[3]; +} __attribute__((__packed__)); + +#define UVC_DT_CAMERA_TERMINAL_SIZE(n) (15+(n)) + +/* 3.7.2.4. Selector Unit Descriptor */ +struct uvc_selector_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bUnitID; + __u8 bNrInPins; + __u8 baSourceID[0]; + __u8 iSelector; +} __attribute__((__packed__)); + +#define UVC_DT_SELECTOR_UNIT_SIZE(n) (6+(n)) + +#define UVC_SELECTOR_UNIT_DESCRIPTOR(n) \ + uvc_selector_unit_descriptor_##n + +#define DECLARE_UVC_SELECTOR_UNIT_DESCRIPTOR(n) \ +struct UVC_SELECTOR_UNIT_DESCRIPTOR(n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bUnitID; \ + __u8 bNrInPins; \ + __u8 baSourceID[n]; \ + __u8 iSelector; \ +} __attribute__ ((packed)) + +/* 3.7.2.5. Processing Unit Descriptor */ +struct uvc_processing_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bUnitID; + __u8 bSourceID; + __u16 wMaxMultiplier; + __u8 bControlSize; + __u8 bmControls[2]; + __u8 iProcessing; +} __attribute__((__packed__)); + +#define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n)) + +/* 3.7.2.6. Extension Unit Descriptor */ +struct uvc_extension_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bUnitID; + __u8 guidExtensionCode[16]; + __u8 bNumControls; + __u8 bNrInPins; + __u8 baSourceID[0]; + __u8 bControlSize; + __u8 bmControls[0]; + __u8 iExtension; +} __attribute__((__packed__)); + +#define UVC_DT_EXTENSION_UNIT_SIZE(p, n) (24+(p)+(n)) + +#define UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \ + uvc_extension_unit_descriptor_##p_##n + +#define DECLARE_UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \ +struct UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bUnitID; \ + __u8 guidExtensionCode[16]; \ + __u8 bNumControls; \ + __u8 bNrInPins; \ + __u8 baSourceID[p]; \ + __u8 bControlSize; \ + __u8 bmControls[n]; \ + __u8 iExtension; \ +} __attribute__ ((packed)) + +/* 3.8.2.2. Video Control Interrupt Endpoint Descriptor */ +struct uvc_control_endpoint_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u16 wMaxTransferSize; +} __attribute__((__packed__)); + +#define UVC_DT_CONTROL_ENDPOINT_SIZE 5 + +/* 3.9.2.1. Input Header Descriptor */ +struct uvc_input_header_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bNumFormats; + __u16 wTotalLength; + __u8 bEndpointAddress; + __u8 bmInfo; + __u8 bTerminalLink; + __u8 bStillCaptureMethod; + __u8 bTriggerSupport; + __u8 bTriggerUsage; + __u8 bControlSize; + __u8 bmaControls[]; +} __attribute__((__packed__)); + +#define UVC_DT_INPUT_HEADER_SIZE(n, p) (13+(n*p)) + +#define UVC_INPUT_HEADER_DESCRIPTOR(n, p) \ + uvc_input_header_descriptor_##n_##p + +#define DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(n, p) \ +struct UVC_INPUT_HEADER_DESCRIPTOR(n, p) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bNumFormats; \ + __u16 wTotalLength; \ + __u8 bEndpointAddress; \ + __u8 bmInfo; \ + __u8 bTerminalLink; \ + __u8 bStillCaptureMethod; \ + __u8 bTriggerSupport; \ + __u8 bTriggerUsage; \ + __u8 bControlSize; \ + __u8 bmaControls[p][n]; \ +} __attribute__ ((packed)) + +/* 3.9.2.2. Output Header Descriptor */ +struct uvc_output_header_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bNumFormats; + __u16 wTotalLength; + __u8 bEndpointAddress; + __u8 bTerminalLink; + __u8 bControlSize; + __u8 bmaControls[]; +} __attribute__((__packed__)); + +#define UVC_DT_OUTPUT_HEADER_SIZE(n, p) (9+(n*p)) + +#define UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \ + uvc_output_header_descriptor_##n_##p + +#define DECLARE_UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \ +struct UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bNumFormats; \ + __u16 wTotalLength; \ + __u8 bEndpointAddress; \ + __u8 bTerminalLink; \ + __u8 bControlSize; \ + __u8 bmaControls[p][n]; \ +} __attribute__ ((packed)) + +/* 3.9.2.6. Color matching descriptor */ +struct uvc_color_matching_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bColorPrimaries; + __u8 bTransferCharacteristics; + __u8 bMatrixCoefficients; +} __attribute__((__packed__)); + +#define UVC_DT_COLOR_MATCHING_SIZE 6 + +/* 4.3.1.1. Video Probe and Commit Controls */ +struct uvc_streaming_control { + __u16 bmHint; + __u8 bFormatIndex; + __u8 bFrameIndex; + __u32 dwFrameInterval; + __u16 wKeyFrameRate; + __u16 wPFrameRate; + __u16 wCompQuality; + __u16 wCompWindowSize; + __u16 wDelay; + __u32 dwMaxVideoFrameSize; + __u32 dwMaxPayloadTransferSize; + __u32 dwClockFrequency; + __u8 bmFramingInfo; + __u8 bPreferedVersion; + __u8 bMinVersion; + __u8 bMaxVersion; +} __attribute__((__packed__)); + +/* Uncompressed Payload - 3.1.1. Uncompressed Video Format Descriptor */ +struct uvc_format_uncompressed { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFormatIndex; + __u8 bNumFrameDescriptors; + __u8 guidFormat[16]; + __u8 bBitsPerPixel; + __u8 bDefaultFrameIndex; + __u8 bAspectRatioX; + __u8 bAspectRatioY; + __u8 bmInterfaceFlags; + __u8 bCopyProtect; +} __attribute__((__packed__)); + +#define UVC_DT_FORMAT_UNCOMPRESSED_SIZE 27 + +/* Uncompressed Payload - 3.1.2. Uncompressed Video Frame Descriptor */ +struct uvc_frame_uncompressed { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFrameIndex; + __u8 bmCapabilities; + __u16 wWidth; + __u16 wHeight; + __u32 dwMinBitRate; + __u32 dwMaxBitRate; + __u32 dwMaxVideoFrameBufferSize; + __u32 dwDefaultFrameInterval; + __u8 bFrameIntervalType; + __u32 dwFrameInterval[]; +} __attribute__((__packed__)); + +#define UVC_DT_FRAME_UNCOMPRESSED_SIZE(n) (26+4*(n)) + +#define UVC_FRAME_UNCOMPRESSED(n) \ + uvc_frame_uncompressed_##n + +#define DECLARE_UVC_FRAME_UNCOMPRESSED(n) \ +struct UVC_FRAME_UNCOMPRESSED(n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bFrameIndex; \ + __u8 bmCapabilities; \ + __u16 wWidth; \ + __u16 wHeight; \ + __u32 dwMinBitRate; \ + __u32 dwMaxBitRate; \ + __u32 dwMaxVideoFrameBufferSize; \ + __u32 dwDefaultFrameInterval; \ + __u8 bFrameIntervalType; \ + __u32 dwFrameInterval[n]; \ +} __attribute__ ((packed)) + +/* MJPEG Payload - 3.1.1. MJPEG Video Format Descriptor */ +struct uvc_format_mjpeg { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFormatIndex; + __u8 bNumFrameDescriptors; + __u8 bmFlags; + __u8 bDefaultFrameIndex; + __u8 bAspectRatioX; + __u8 bAspectRatioY; + __u8 bmInterfaceFlags; + __u8 bCopyProtect; +} __attribute__((__packed__)); + +#define UVC_DT_FORMAT_MJPEG_SIZE 11 + +/* MJPEG Payload - 3.1.2. MJPEG Video Frame Descriptor */ +struct uvc_frame_mjpeg { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFrameIndex; + __u8 bmCapabilities; + __u16 wWidth; + __u16 wHeight; + __u32 dwMinBitRate; + __u32 dwMaxBitRate; + __u32 dwMaxVideoFrameBufferSize; + __u32 dwDefaultFrameInterval; + __u8 bFrameIntervalType; + __u32 dwFrameInterval[]; +} __attribute__((__packed__)); + +#define UVC_DT_FRAME_MJPEG_SIZE(n) (26+4*(n)) + +#define UVC_FRAME_MJPEG(n) \ + uvc_frame_mjpeg_##n + +#define DECLARE_UVC_FRAME_MJPEG(n) \ +struct UVC_FRAME_MJPEG(n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bFrameIndex; \ + __u8 bmCapabilities; \ + __u16 wWidth; \ + __u16 wHeight; \ + __u32 dwMinBitRate; \ + __u32 dwMaxBitRate; \ + __u32 dwMaxVideoFrameBufferSize; \ + __u32 dwDefaultFrameInterval; \ + __u8 bFrameIntervalType; \ + __u32 dwFrameInterval[n]; \ +} __attribute__ ((packed)) + #endif /* __LINUX_USB_VIDEO_H */ diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index cc4f45361dbb..8178156711f9 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -36,6 +36,9 @@ static inline void put_user_ns(struct user_namespace *ns) kref_put(&ns->kref, free_user_ns); } +uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid); +gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid); + #else static inline struct user_namespace *get_user_ns(struct user_namespace *ns) @@ -52,6 +55,17 @@ static inline void put_user_ns(struct user_namespace *ns) { } +static inline uid_t user_ns_map_uid(struct user_namespace *to, + const struct cred *cred, uid_t uid) +{ + return uid; +} +static inline gid_t user_ns_map_gid(struct user_namespace *to, + const struct cred *cred, gid_t gid) +{ + return gid; +} + #endif #endif /* _LINUX_USER_H */ diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index c9a975976995..814f294d4cd0 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -29,6 +29,7 @@ */ #ifndef LINUX_VGA_H +#define LINUX_VGA_H #include <asm/vga.h> diff --git a/include/linux/virtio_9p.h b/include/linux/virtio_9p.h index 5cf11765146b..395c38a47adb 100644 --- a/include/linux/virtio_9p.h +++ b/include/linux/virtio_9p.h @@ -4,6 +4,7 @@ * compatible drivers/servers. */ #include <linux/virtio_ids.h> #include <linux/virtio_config.h> +#include <linux/types.h> /* The feature bitmap for virtio 9P */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 227c2a585e4f..de05e96e0a70 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -30,7 +30,7 @@ struct vm_struct { unsigned long flags; struct page **pages; unsigned int nr_pages; - unsigned long phys_addr; + phys_addr_t phys_addr; void *caller; }; diff --git a/include/linux/wlp.h b/include/linux/wlp.h index ac95ce6606ac..c76fe2392506 100644 --- a/include/linux/wlp.h +++ b/include/linux/wlp.h @@ -300,7 +300,7 @@ struct wlp_ie { __le16 cycle_param; __le16 acw_anchor_addr; u8 wssid_hash_list[]; -} __attribute__((packed)); +} __packed; static inline int wlp_ie_hash_length(struct wlp_ie *ie) { @@ -324,7 +324,7 @@ static inline void wlp_ie_set_hash_length(struct wlp_ie *ie, int hash_length) */ struct wlp_nonce { u8 data[16]; -} __attribute__((packed)); +} __packed; /** * WLP UUID @@ -336,7 +336,7 @@ struct wlp_nonce { */ struct wlp_uuid { u8 data[16]; -} __attribute__((packed)); +} __packed; /** @@ -348,7 +348,7 @@ struct wlp_dev_type { u8 OUI[3]; u8 OUIsubdiv; __le16 subID; -} __attribute__((packed)); +} __packed; /** * WLP frame header @@ -357,7 +357,7 @@ struct wlp_dev_type { struct wlp_frame_hdr { __le16 mux_hdr; /* WLP_PROTOCOL_ID */ enum wlp_frame_type type:8; -} __attribute__((packed)); +} __packed; /** * WLP attribute field header @@ -368,7 +368,7 @@ struct wlp_frame_hdr { struct wlp_attr_hdr { __le16 type; __le16 length; -} __attribute__((packed)); +} __packed; /** * Device information commonly used together @@ -401,13 +401,13 @@ struct wlp_device_info { struct wlp_attr_##name { \ struct wlp_attr_hdr hdr; \ type name; \ -} __attribute__((packed)); +} __packed; #define wlp_attr_array(type, name) \ struct wlp_attr_##name { \ struct wlp_attr_hdr hdr; \ type name[]; \ -} __attribute__((packed)); +} __packed; /** * WLP association attribute fields @@ -483,7 +483,7 @@ struct wlp_wss_info { struct wlp_attr_accept_enrl accept; struct wlp_attr_wss_sec_status sec_stat; struct wlp_attr_wss_bcast bcast; -} __attribute__((packed)); +} __packed; /* WLP WSS Information */ wlp_attr_array(struct wlp_wss_info, wss_info) @@ -520,7 +520,7 @@ wlp_attr(u8, wlp_assc_err) struct wlp_frame_std_abbrv_hdr { struct wlp_frame_hdr hdr; u8 tag; -} __attribute__((packed)); +} __packed; /** * WLP association frames @@ -533,7 +533,7 @@ struct wlp_frame_assoc { struct wlp_attr_version version; struct wlp_attr_msg_type msg_type; u8 attr[]; -} __attribute__((packed)); +} __packed; /* Ethernet to dev address mapping */ struct wlp_eda { diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 9466e860d8c2..4f9d277bcd9a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -9,6 +9,7 @@ #include <linux/linkage.h> #include <linux/bitops.h> #include <linux/lockdep.h> +#include <linux/threads.h> #include <asm/atomic.h> struct workqueue_struct; @@ -22,12 +23,59 @@ typedef void (*work_func_t)(struct work_struct *work); */ #define work_data_bits(work) ((unsigned long *)(&(work)->data)) +enum { + WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ + WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */ + WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */ +#ifdef CONFIG_DEBUG_OBJECTS_WORK + WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */ + WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ +#else + WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */ +#endif + + WORK_STRUCT_COLOR_BITS = 4, + + WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, + WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, + WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, +#ifdef CONFIG_DEBUG_OBJECTS_WORK + WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, +#else + WORK_STRUCT_STATIC = 0, +#endif + + /* + * The last color is no color used for works which don't + * participate in workqueue flushing. + */ + WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, + WORK_NO_COLOR = WORK_NR_COLORS, + + /* special cpu IDs */ + WORK_CPU_UNBOUND = NR_CPUS, + WORK_CPU_NONE = NR_CPUS + 1, + WORK_CPU_LAST = WORK_CPU_NONE, + + /* + * Reserve 7 bits off of cwq pointer w/ debugobjects turned + * off. This makes cwqs aligned to 128 bytes which isn't too + * excessive while allowing 15 workqueue flush colors. + */ + WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + + WORK_STRUCT_COLOR_BITS, + + WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, + WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, + WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS, + + /* bit mask for work_busy() return values */ + WORK_BUSY_PENDING = 1 << 0, + WORK_BUSY_RUNNING = 1 << 1, +}; + struct work_struct { atomic_long_t data; -#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ -#define WORK_STRUCT_STATIC 1 /* static initializer (debugobjects) */ -#define WORK_STRUCT_FLAG_MASK (3UL) -#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) struct list_head entry; work_func_t func; #ifdef CONFIG_LOCKDEP @@ -35,8 +83,9 @@ struct work_struct { #endif }; -#define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) -#define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(2) +#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) +#define WORK_DATA_STATIC_INIT() \ + ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC) struct delayed_work { struct work_struct work; @@ -96,9 +145,14 @@ struct execute_work { #ifdef CONFIG_DEBUG_OBJECTS_WORK extern void __init_work(struct work_struct *work, int onstack); extern void destroy_work_on_stack(struct work_struct *work); +static inline unsigned int work_static(struct work_struct *work) +{ + return *work_data_bits(work) & WORK_STRUCT_STATIC; +} #else static inline void __init_work(struct work_struct *work, int onstack) { } static inline void destroy_work_on_stack(struct work_struct *work) { } +static inline unsigned int work_static(struct work_struct *work) { return 0; } #endif /* @@ -162,7 +216,7 @@ static inline void destroy_work_on_stack(struct work_struct *work) { } * @work: The work item in question */ #define work_pending(work) \ - test_bit(WORK_STRUCT_PENDING, work_data_bits(work)) + test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) /** * delayed_work_pending - Find out whether a delayable work item is currently @@ -177,16 +231,56 @@ static inline void destroy_work_on_stack(struct work_struct *work) { } * @work: The work item in question */ #define work_clear_pending(work) \ - clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) + clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) + +enum { + WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ + WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ + WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ + WQ_RESCUER = 1 << 3, /* has an rescue worker */ + WQ_HIGHPRI = 1 << 4, /* high priority */ + WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ + + WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ + WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ + WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, +}; +/* unbound wq's aren't per-cpu, scale max_active according to #cpus */ +#define WQ_UNBOUND_MAX_ACTIVE \ + max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) + +/* + * System-wide workqueues which are always present. + * + * system_wq is the one used by schedule[_delayed]_work[_on](). + * Multi-CPU multi-threaded. There are users which expect relatively + * short queue flush time. Don't queue works which can run for too + * long. + * + * system_long_wq is similar to system_wq but may host long running + * works. Queue flushing might take relatively long. + * + * system_nrt_wq is non-reentrant and guarantees that any given work + * item is never executed in parallel by multiple CPUs. Queue + * flushing might take relatively long. + * + * system_unbound_wq is unbound workqueue. Workers are not bound to + * any specific CPU, not concurrency managed, and all queued works are + * executed immediately as long as max_active limit is not reached and + * resources are available. + */ +extern struct workqueue_struct *system_wq; +extern struct workqueue_struct *system_long_wq; +extern struct workqueue_struct *system_nrt_wq; +extern struct workqueue_struct *system_unbound_wq; extern struct workqueue_struct * -__create_workqueue_key(const char *name, int singlethread, - int freezeable, int rt, struct lock_class_key *key, - const char *lock_name); +__alloc_workqueue_key(const char *name, unsigned int flags, int max_active, + struct lock_class_key *key, const char *lock_name); #ifdef CONFIG_LOCKDEP -#define __create_workqueue(name, singlethread, freezeable, rt) \ +#define alloc_workqueue(name, flags, max_active) \ ({ \ static struct lock_class_key __key; \ const char *__lock_name; \ @@ -196,20 +290,20 @@ __create_workqueue_key(const char *name, int singlethread, else \ __lock_name = #name; \ \ - __create_workqueue_key((name), (singlethread), \ - (freezeable), (rt), &__key, \ - __lock_name); \ + __alloc_workqueue_key((name), (flags), (max_active), \ + &__key, __lock_name); \ }) #else -#define __create_workqueue(name, singlethread, freezeable, rt) \ - __create_workqueue_key((name), (singlethread), (freezeable), (rt), \ - NULL, NULL) +#define alloc_workqueue(name, flags, max_active) \ + __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL) #endif -#define create_workqueue(name) __create_workqueue((name), 0, 0, 0) -#define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1) -#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0) -#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0) +#define create_workqueue(name) \ + alloc_workqueue((name), WQ_RESCUER, 1) +#define create_freezeable_workqueue(name) \ + alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1) +#define create_singlethread_workqueue(name) \ + alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1) extern void destroy_workqueue(struct workqueue_struct *wq); @@ -231,16 +325,19 @@ extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay) extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); extern int schedule_on_each_cpu(work_func_t func); -extern int current_is_keventd(void); extern int keventd_up(void); -extern void init_workqueues(void); int execute_in_process_context(work_func_t fn, struct execute_work *); extern int flush_work(struct work_struct *work); - extern int cancel_work_sync(struct work_struct *work); +extern void workqueue_set_max_active(struct workqueue_struct *wq, + int max_active); +extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); +extern unsigned int work_cpu(struct work_struct *work); +extern unsigned int work_busy(struct work_struct *work); + /* * Kill off a pending schedule_delayed_work(). Note that the work callback * function may still be running on return from cancel_delayed_work(), unless @@ -297,4 +394,15 @@ static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) #else long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); #endif /* CONFIG_SMP */ + +#ifdef CONFIG_FREEZER +extern void freeze_workqueues_begin(void); +extern bool freeze_workqueues_busy(void); +extern void thaw_workqueues(void); +#endif /* CONFIG_FREEZER */ + +#ifdef CONFIG_LOCKDEP +int in_workqueue_context(struct workqueue_struct *wq); +#endif + #endif diff --git a/include/linux/writeback.h b/include/linux/writeback.h index d63ef8f9609f..c24eca71e80c 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -27,10 +27,6 @@ enum writeback_sync_modes { * in a manner such that unspecified fields are set to zero. */ struct writeback_control { - struct backing_dev_info *bdi; /* If !NULL, only write back this - queue */ - struct super_block *sb; /* if !NULL, only write inodes from - this super_block */ enum writeback_sync_modes sync_mode; unsigned long *older_than_this; /* If !NULL, only write back inodes older than this */ @@ -66,7 +62,8 @@ int inode_wait(void *); void writeback_inodes_sb(struct super_block *); int writeback_inodes_sb_if_idle(struct super_block *); void sync_inodes_sb(struct super_block *); -void writeback_inodes_wbc(struct writeback_control *wbc); +void writeback_inodes_wb(struct bdi_writeback *wb, + struct writeback_control *wbc); long wb_do_writeback(struct bdi_writeback *wb, int force_wait); void wakeup_flusher_threads(long nr_pages); diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 0cfa1e9c4cc1..f1e5bde4b35a 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -33,6 +33,20 @@ #define XATTR_USER_PREFIX "user." #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1) +/* Security namespace */ +#define XATTR_SELINUX_SUFFIX "selinux" +#define XATTR_NAME_SELINUX XATTR_SECURITY_PREFIX XATTR_SELINUX_SUFFIX + +#define XATTR_SMACK_SUFFIX "SMACK64" +#define XATTR_SMACK_IPIN "SMACK64IPIN" +#define XATTR_SMACK_IPOUT "SMACK64IPOUT" +#define XATTR_NAME_SMACK XATTR_SECURITY_PREFIX XATTR_SMACK_SUFFIX +#define XATTR_NAME_SMACKIPIN XATTR_SECURITY_PREFIX XATTR_SMACK_IPIN +#define XATTR_NAME_SMACKIPOUT XATTR_SECURITY_PREFIX XATTR_SMACK_IPOUT + +#define XATTR_CAPS_SUFFIX "capability" +#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX + struct inode; struct dentry; |