summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/cpufreq.h6
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mmzone.h38
-rw-r--r--include/linux/page-flags.h149
-rw-r--r--include/linux/pagemap.h45
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/rcupdate.h24
-rw-r--r--include/linux/rtc.h4
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/security.h38
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/sunrpc/svc.h4
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/vmstat.h215
15 files changed, 326 insertions, 211 deletions
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 466fbe9e4899..35e137636b0b 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -100,8 +100,10 @@ struct cpufreq_policy {
#define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
-#define CPUFREQ_SHARED_TYPE_ALL (0) /* All dependent CPUs should set freq */
-#define CPUFREQ_SHARED_TYPE_ANY (1) /* Freq can be set from any dependent CPU */
+#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
+#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
+#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
+#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
/******************** cpufreq transition notifiers *******************/
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 2d229327959e..0503b2ed8bae 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -46,6 +46,8 @@ enum kobject_action {
KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */
KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */
KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */
+ KOBJ_UNDOCK = (__force kobject_action_t) 0x08, /* undocking */
+ KOBJ_DOCK = (__force kobject_action_t) 0x09, /* dock */
};
struct kobject {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c41a1299b8cf..75179529e399 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -36,7 +36,6 @@ extern int sysctl_legacy_va_layout;
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
-#include <asm/atomic.h>
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
@@ -515,6 +514,11 @@ static inline void set_page_links(struct page *page, unsigned long zone,
set_page_section(page, pfn_to_section_nr(pfn));
}
+/*
+ * Some inline functions in vmstat.h depend on page_zone()
+ */
+#include <linux/vmstat.h>
+
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d6120fa69116..27e748eb72b0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -46,6 +46,27 @@ struct zone_padding {
#define ZONE_PADDING(name)
#endif
+enum zone_stat_item {
+ NR_ANON_PAGES, /* Mapped anonymous pages */
+ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
+ only modified from process context */
+ NR_FILE_PAGES,
+ NR_SLAB, /* Pages used by slab allocator */
+ NR_PAGETABLE, /* used for pagetables */
+ NR_FILE_DIRTY,
+ NR_WRITEBACK,
+ NR_UNSTABLE_NFS, /* NFS unstable pages */
+ NR_BOUNCE,
+#ifdef CONFIG_NUMA
+ NUMA_HIT, /* allocated in intended node */
+ NUMA_MISS, /* allocated in non intended node */
+ NUMA_FOREIGN, /* was intended here, hit elsewhere */
+ NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
+ NUMA_LOCAL, /* allocation from local node */
+ NUMA_OTHER, /* allocation from other node */
+#endif
+ NR_VM_ZONE_STAT_ITEMS };
+
struct per_cpu_pages {
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
@@ -55,13 +76,8 @@ struct per_cpu_pages {
struct per_cpu_pageset {
struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
-#ifdef CONFIG_NUMA
- unsigned long numa_hit; /* allocated in intended node */
- unsigned long numa_miss; /* allocated in non intended node */
- unsigned long numa_foreign; /* was intended here, hit elsewhere */
- unsigned long interleave_hit; /* interleaver prefered this zone */
- unsigned long local_node; /* allocation from local node */
- unsigned long other_node; /* allocation from other node */
+#ifdef CONFIG_SMP
+ s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif
} ____cacheline_aligned_in_smp;
@@ -165,12 +181,8 @@ struct zone {
/* A count of how many reclaimers are scanning this zone */
atomic_t reclaim_in_progress;
- /*
- * timestamp (in jiffies) of the last zone reclaim that did not
- * result in freeing of pages. This is used to avoid repeated scans
- * if all memory in the zone is in use.
- */
- unsigned long last_unsuccessful_zone_reclaim;
+ /* Zone statistics */
+ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
/*
* prev_priority holds the scanning priority for this zone. It is
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 0c076d58c676..5748642e9f36 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -5,12 +5,8 @@
#ifndef PAGE_FLAGS_H
#define PAGE_FLAGS_H
-#include <linux/percpu.h>
-#include <linux/cache.h>
#include <linux/types.h>
-#include <asm/pgtable.h>
-
/*
* Various page->flags bits:
*
@@ -103,134 +99,6 @@
#endif
/*
- * Global page accounting. One instance per CPU. Only unsigned longs are
- * allowed.
- *
- * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
- * any time safely (which protects the instance from modification by
- * interrupt.
- * - The __xxx_page_state variants can be used safely when interrupts are
- * disabled.
- * - The __xxx_page_state variants can be used if the field is only
- * modified from process context and protected from preemption, or only
- * modified from interrupt context. In this case, the field should be
- * commented here.
- */
-struct page_state {
- unsigned long nr_dirty; /* Dirty writeable pages */
- unsigned long nr_writeback; /* Pages under writeback */
- unsigned long nr_unstable; /* NFS unstable pages */
- unsigned long nr_page_table_pages;/* Pages used for pagetables */
- unsigned long nr_mapped; /* mapped into pagetables.
- * only modified from process context */
- unsigned long nr_slab; /* In slab */
-#define GET_PAGE_STATE_LAST nr_slab
-
- /*
- * The below are zeroed by get_page_state(). Use get_full_page_state()
- * to add up all these.
- */
- unsigned long pgpgin; /* Disk reads */
- unsigned long pgpgout; /* Disk writes */
- unsigned long pswpin; /* swap reads */
- unsigned long pswpout; /* swap writes */
-
- unsigned long pgalloc_high; /* page allocations */
- unsigned long pgalloc_normal;
- unsigned long pgalloc_dma32;
- unsigned long pgalloc_dma;
-
- unsigned long pgfree; /* page freeings */
- unsigned long pgactivate; /* pages moved inactive->active */
- unsigned long pgdeactivate; /* pages moved active->inactive */
-
- unsigned long pgfault; /* faults (major+minor) */
- unsigned long pgmajfault; /* faults (major only) */
-
- unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
- unsigned long pgrefill_normal;
- unsigned long pgrefill_dma32;
- unsigned long pgrefill_dma;
-
- unsigned long pgsteal_high; /* total highmem pages reclaimed */
- unsigned long pgsteal_normal;
- unsigned long pgsteal_dma32;
- unsigned long pgsteal_dma;
-
- unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
- unsigned long pgscan_kswapd_normal;
- unsigned long pgscan_kswapd_dma32;
- unsigned long pgscan_kswapd_dma;
-
- unsigned long pgscan_direct_high;/* total highmem pages scanned */
- unsigned long pgscan_direct_normal;
- unsigned long pgscan_direct_dma32;
- unsigned long pgscan_direct_dma;
-
- unsigned long pginodesteal; /* pages reclaimed via inode freeing */
- unsigned long slabs_scanned; /* slab objects scanned */
- unsigned long kswapd_steal; /* pages reclaimed by kswapd */
- unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
- unsigned long pageoutrun; /* kswapd's calls to page reclaim */
- unsigned long allocstall; /* direct reclaim calls */
-
- unsigned long pgrotated; /* pages rotated to tail of the LRU */
- unsigned long nr_bounce; /* pages for bounce buffers */
-};
-
-extern void get_page_state(struct page_state *ret);
-extern void get_page_state_node(struct page_state *ret, int node);
-extern void get_full_page_state(struct page_state *ret);
-extern unsigned long read_page_state_offset(unsigned long offset);
-extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
-extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
-
-#define read_page_state(member) \
- read_page_state_offset(offsetof(struct page_state, member))
-
-#define mod_page_state(member, delta) \
- mod_page_state_offset(offsetof(struct page_state, member), (delta))
-
-#define __mod_page_state(member, delta) \
- __mod_page_state_offset(offsetof(struct page_state, member), (delta))
-
-#define inc_page_state(member) mod_page_state(member, 1UL)
-#define dec_page_state(member) mod_page_state(member, 0UL - 1)
-#define add_page_state(member,delta) mod_page_state(member, (delta))
-#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
-
-#define __inc_page_state(member) __mod_page_state(member, 1UL)
-#define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
-#define __add_page_state(member,delta) __mod_page_state(member, (delta))
-#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
-
-#define page_state(member) (*__page_state(offsetof(struct page_state, member)))
-
-#define state_zone_offset(zone, member) \
-({ \
- unsigned offset; \
- if (is_highmem(zone)) \
- offset = offsetof(struct page_state, member##_high); \
- else if (is_normal(zone)) \
- offset = offsetof(struct page_state, member##_normal); \
- else if (is_dma32(zone)) \
- offset = offsetof(struct page_state, member##_dma32); \
- else \
- offset = offsetof(struct page_state, member##_dma); \
- offset; \
-})
-
-#define __mod_page_state_zone(zone, member, delta) \
- do { \
- __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
- } while (0)
-
-#define mod_page_state_zone(zone, member, delta) \
- do { \
- mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
- } while (0)
-
-/*
* Manipulation of page state flags
*/
#define PageLocked(page) \
@@ -254,7 +122,14 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
#define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags)
-#ifndef SetPageUptodate
+#ifdef CONFIG_S390
+#define SetPageUptodate(_page) \
+ do { \
+ struct page *__page = (_page); \
+ if (!test_and_set_bit(PG_uptodate, &__page->flags)) \
+ page_test_and_clear_dirty(_page); \
+ } while (0)
+#else
#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
#endif
#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
@@ -306,7 +181,7 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
do { \
if (!test_and_set_bit(PG_writeback, \
&(page)->flags)) \
- inc_page_state(nr_writeback); \
+ inc_zone_page_state(page, NR_WRITEBACK); \
} while (0)
#define TestSetPageWriteback(page) \
({ \
@@ -314,14 +189,14 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
ret = test_and_set_bit(PG_writeback, \
&(page)->flags); \
if (!ret) \
- inc_page_state(nr_writeback); \
+ inc_zone_page_state(page, NR_WRITEBACK); \
ret; \
})
#define ClearPageWriteback(page) \
do { \
if (test_and_clear_bit(PG_writeback, \
&(page)->flags)) \
- dec_page_state(nr_writeback); \
+ dec_zone_page_state(page, NR_WRITEBACK); \
} while (0)
#define TestClearPageWriteback(page) \
({ \
@@ -329,7 +204,7 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
ret = test_and_clear_bit(PG_writeback, \
&(page)->flags); \
if (ret) \
- dec_page_state(nr_writeback); \
+ dec_zone_page_state(page, NR_WRITEBACK); \
ret; \
})
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1245df7141aa..0a2f5d27f60e 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -113,51 +113,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
extern void remove_from_page_cache(struct page *page);
extern void __remove_from_page_cache(struct page *page);
-extern atomic_t nr_pagecache;
-
-#ifdef CONFIG_SMP
-
-#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2)
-DECLARE_PER_CPU(long, nr_pagecache_local);
-
-/*
- * pagecache_acct implements approximate accounting for pagecache.
- * vm_enough_memory() do not need high accuracy. Writers will keep
- * an offset in their per-cpu arena and will spill that into the
- * global count whenever the absolute value of the local count
- * exceeds the counter's threshold.
- *
- * MUST be protected from preemption.
- * current protection is mapping->page_lock.
- */
-static inline void pagecache_acct(int count)
-{
- long *local;
-
- local = &__get_cpu_var(nr_pagecache_local);
- *local += count;
- if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) {
- atomic_add(*local, &nr_pagecache);
- *local = 0;
- }
-}
-
-#else
-
-static inline void pagecache_acct(int count)
-{
- atomic_add(count, &nr_pagecache);
-}
-#endif
-
-static inline unsigned long get_page_cache_size(void)
-{
- int ret = atomic_read(&nr_pagecache);
- if (unlikely(ret < 0))
- ret = 0;
- return ret;
-}
-
/*
* Return byte-offset into filesystem object for page.
*/
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 9ae6b1a75366..b093479a531d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -729,6 +729,7 @@
#define PCI_DEVICE_ID_TI_4450 0x8011
#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
#define PCI_DEVICE_ID_TI_X515 0x8036
+#define PCI_DEVICE_ID_TI_XX12 0x8039
#define PCI_DEVICE_ID_TI_1130 0xac12
#define PCI_DEVICE_ID_TI_1031 0xac13
#define PCI_DEVICE_ID_TI_1131 0xac15
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 48dfe00070c7..b4ca73d65891 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -163,14 +163,22 @@ extern int rcu_needs_cpu(int cpu);
*
* It is illegal to block while in an RCU read-side critical section.
*/
-#define rcu_read_lock() preempt_disable()
+#define rcu_read_lock() \
+ do { \
+ preempt_disable(); \
+ __acquire(RCU); \
+ } while(0)
/**
* rcu_read_unlock - marks the end of an RCU read-side critical section.
*
* See rcu_read_lock() for more information.
*/
-#define rcu_read_unlock() preempt_enable()
+#define rcu_read_unlock() \
+ do { \
+ __release(RCU); \
+ preempt_enable(); \
+ } while(0)
/*
* So where is rcu_write_lock()? It does not exist, as there is no
@@ -193,14 +201,22 @@ extern int rcu_needs_cpu(int cpu);
* can use just rcu_read_lock().
*
*/
-#define rcu_read_lock_bh() local_bh_disable()
+#define rcu_read_lock_bh() \
+ do { \
+ local_bh_disable(); \
+ __acquire(RCU_BH); \
+ } while(0)
/*
* rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
*
* See rcu_read_lock_bh() for more information.
*/
-#define rcu_read_unlock_bh() local_bh_enable()
+#define rcu_read_unlock_bh() \
+ do { \
+ __release(RCU_BH); \
+ local_bh_enable(); \
+ } while(0)
/**
* rcu_dereference - fetch an RCU-protected pointer in an
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 36e2bf4b4315..5371e4e74595 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -34,8 +34,8 @@ struct rtc_time {
* alarm API.
*/
struct rtc_wkalrm {
- unsigned char enabled; /* 0 = alarm disable, 1 = alarm disabled */
- unsigned char pending; /* 0 = alarm pending, 1 = alarm not pending */
+ unsigned char enabled; /* 0 = alarm disabled, 1 = alarm enabled */
+ unsigned char pending; /* 0 = alarm not pending, 1 = alarm pending */
struct rtc_time time; /* time the alarm is set to */
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 821f0481ebe1..aaf723308ed4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1153,7 +1153,7 @@ extern int force_sig_info(int, struct siginfo *, struct task_struct *);
extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
extern int kill_pg_info(int, struct siginfo *, pid_t);
extern int kill_proc_info(int, struct siginfo *, pid_t);
-extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t);
+extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t, u32);
extern void do_notify_parent(struct task_struct *, int);
extern void force_sig(int, struct task_struct *);
extern void force_sig_specific(int, struct task_struct *);
diff --git a/include/linux/security.h b/include/linux/security.h
index c7ea15716dce..f75303831d09 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -567,6 +567,9 @@ struct swap_info_struct;
* @p.
* @p contains the task_struct for the process.
* Return 0 if permission is granted.
+ * @task_getsecid:
+ * Retrieve the security identifier of the process @p.
+ * @p contains the task_struct for the process and place is into @secid.
* @task_setgroups:
* Check permission before setting the supplementary group set of the
* current process.
@@ -582,6 +585,10 @@ struct swap_info_struct;
* @p contains the task_struct of process.
* @ioprio contains the new ioprio value
* Return 0 if permission is granted.
+ * @task_getioprio
+ * Check permission before getting the ioprio value of @p.
+ * @p contains the task_struct of process.
+ * Return 0 if permission is granted.
* @task_setrlimit:
* Check permission before setting the resource limits of the current
* process for @resource to @new_rlim. The old resource limit values can
@@ -615,6 +622,7 @@ struct swap_info_struct;
* @p contains the task_struct for process.
* @info contains the signal information.
* @sig contains the signal value.
+ * @secid contains the sid of the process where the signal originated
* Return 0 if permission is granted.
* @task_wait:
* Check permission before allowing a process to reap a child process @p
@@ -1219,16 +1227,18 @@ struct security_operations {
int (*task_setpgid) (struct task_struct * p, pid_t pgid);
int (*task_getpgid) (struct task_struct * p);
int (*task_getsid) (struct task_struct * p);
+ void (*task_getsecid) (struct task_struct * p, u32 * secid);
int (*task_setgroups) (struct group_info *group_info);
int (*task_setnice) (struct task_struct * p, int nice);
int (*task_setioprio) (struct task_struct * p, int ioprio);
+ int (*task_getioprio) (struct task_struct * p);
int (*task_setrlimit) (unsigned int resource, struct rlimit * new_rlim);
int (*task_setscheduler) (struct task_struct * p, int policy,
struct sched_param * lp);
int (*task_getscheduler) (struct task_struct * p);
int (*task_movememory) (struct task_struct * p);
int (*task_kill) (struct task_struct * p,
- struct siginfo * info, int sig);
+ struct siginfo * info, int sig, u32 secid);
int (*task_wait) (struct task_struct * p);
int (*task_prctl) (int option, unsigned long arg2,
unsigned long arg3, unsigned long arg4,
@@ -1839,6 +1849,11 @@ static inline int security_task_getsid (struct task_struct *p)
return security_ops->task_getsid (p);
}
+static inline void security_task_getsecid (struct task_struct *p, u32 *secid)
+{
+ security_ops->task_getsecid (p, secid);
+}
+
static inline int security_task_setgroups (struct group_info *group_info)
{
return security_ops->task_setgroups (group_info);
@@ -1854,6 +1869,11 @@ static inline int security_task_setioprio (struct task_struct *p, int ioprio)
return security_ops->task_setioprio (p, ioprio);
}
+static inline int security_task_getioprio (struct task_struct *p)
+{
+ return security_ops->task_getioprio (p);
+}
+
static inline int security_task_setrlimit (unsigned int resource,
struct rlimit *new_rlim)
{
@@ -1878,9 +1898,10 @@ static inline int security_task_movememory (struct task_struct *p)
}
static inline int security_task_kill (struct task_struct *p,
- struct siginfo *info, int sig)
+ struct siginfo *info, int sig,
+ u32 secid)
{
- return security_ops->task_kill (p, info, sig);
+ return security_ops->task_kill (p, info, sig, secid);
}
static inline int security_task_wait (struct task_struct *p)
@@ -2491,6 +2512,9 @@ static inline int security_task_getsid (struct task_struct *p)
return 0;
}
+static inline void security_task_getsecid (struct task_struct *p, u32 *secid)
+{ }
+
static inline int security_task_setgroups (struct group_info *group_info)
{
return 0;
@@ -2506,6 +2530,11 @@ static inline int security_task_setioprio (struct task_struct *p, int ioprio)
return 0;
}
+static inline int security_task_getioprio (struct task_struct *p)
+{
+ return 0;
+}
+
static inline int security_task_setrlimit (unsigned int resource,
struct rlimit *new_rlim)
{
@@ -2530,7 +2559,8 @@ static inline int security_task_movememory (struct task_struct *p)
}
static inline int security_task_kill (struct task_struct *p,
- struct siginfo *info, int sig)
+ struct siginfo *info, int sig,
+ u32 secid)
{
return 0;
}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index c93c3fe4308c..837e8bce1349 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -125,4 +125,6 @@ static inline void smp_send_reschedule(int cpu) { }
#define put_cpu() preempt_enable()
#define put_cpu_no_resched() preempt_enable_no_resched()
+void smp_setup_processor_id(void);
+
#endif /* __LINUX_SMP_H */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 503564384545..7b27c09b5604 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -159,7 +159,9 @@ struct svc_rqst {
* determine what device number
* to report (real or virtual)
*/
-
+ int rq_sendfile_ok; /* turned off in gss privacy
+ * to prevent encrypting page
+ * cache pages */
wait_queue_head_t rq_wait; /* synchronization */
};
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c41e2d6d1acc..cf6ca6e377bd 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -189,7 +189,6 @@ extern long vm_total_pages;
#ifdef CONFIG_NUMA
extern int zone_reclaim_mode;
-extern int zone_reclaim_interval;
extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
#else
#define zone_reclaim_mode 0
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
new file mode 100644
index 000000000000..3e0daf54133e
--- /dev/null
+++ b/include/linux/vmstat.h
@@ -0,0 +1,215 @@
+#ifndef _LINUX_VMSTAT_H
+#define _LINUX_VMSTAT_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <linux/config.h>
+#include <linux/mmzone.h>
+#include <asm/atomic.h>
+
+#ifdef CONFIG_VM_EVENT_COUNTERS
+/*
+ * Light weight per cpu counter implementation.
+ *
+ * Counters should only be incremented and no critical kernel component
+ * should rely on the counter values.
+ *
+ * Counters are handled completely inline. On many platforms the code
+ * generated will simply be the increment of a global address.
+ */
+
+#define FOR_ALL_ZONES(x) x##_DMA, x##_DMA32, x##_NORMAL, x##_HIGH
+
+enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
+ FOR_ALL_ZONES(PGALLOC),
+ PGFREE, PGACTIVATE, PGDEACTIVATE,
+ PGFAULT, PGMAJFAULT,
+ FOR_ALL_ZONES(PGREFILL),
+ FOR_ALL_ZONES(PGSTEAL),
+ FOR_ALL_ZONES(PGSCAN_KSWAPD),
+ FOR_ALL_ZONES(PGSCAN_DIRECT),
+ PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
+ PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+ NR_VM_EVENT_ITEMS
+};
+
+struct vm_event_state {
+ unsigned long event[NR_VM_EVENT_ITEMS];
+};
+
+DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
+
+static inline void __count_vm_event(enum vm_event_item item)
+{
+ __get_cpu_var(vm_event_states.event[item])++;
+}
+
+static inline void count_vm_event(enum vm_event_item item)
+{
+ get_cpu_var(vm_event_states.event[item])++;
+ put_cpu();
+}
+
+static inline void __count_vm_events(enum vm_event_item item, long delta)
+{
+ __get_cpu_var(vm_event_states.event[item]) += delta;
+}
+
+static inline void count_vm_events(enum vm_event_item item, long delta)
+{
+ get_cpu_var(vm_event_states.event[item])++;
+ put_cpu();
+}
+
+extern void all_vm_events(unsigned long *);
+extern void vm_events_fold_cpu(int cpu);
+
+#else
+
+/* Disable counters */
+#define get_cpu_vm_events(e) 0L
+#define count_vm_event(e) do { } while (0)
+#define count_vm_events(e,d) do { } while (0)
+#define __count_vm_event(e) do { } while (0)
+#define __count_vm_events(e,d) do { } while (0)
+#define vm_events_fold_cpu(x) do { } while (0)
+
+#endif /* CONFIG_VM_EVENT_COUNTERS */
+
+#define __count_zone_vm_events(item, zone, delta) \
+ __count_vm_events(item##_DMA + zone_idx(zone), delta)
+
+/*
+ * Zone based page accounting with per cpu differentials.
+ */
+extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+
+static inline void zone_page_state_add(long x, struct zone *zone,
+ enum zone_stat_item item)
+{
+ atomic_long_add(x, &zone->vm_stat[item]);
+ atomic_long_add(x, &vm_stat[item]);
+}
+
+static inline unsigned long global_page_state(enum zone_stat_item item)
+{
+ long x = atomic_long_read(&vm_stat[item]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
+}
+
+static inline unsigned long zone_page_state(struct zone *zone,
+ enum zone_stat_item item)
+{
+ long x = atomic_long_read(&zone->vm_stat[item]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
+}
+
+#ifdef CONFIG_NUMA
+/*
+ * Determine the per node value of a stat item. This function
+ * is called frequently in a NUMA machine, so try to be as
+ * frugal as possible.
+ */
+static inline unsigned long node_page_state(int node,
+ enum zone_stat_item item)
+{
+ struct zone *zones = NODE_DATA(node)->node_zones;
+
+ return
+#ifndef CONFIG_DMA_IS_NORMAL
+#if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
+ zone_page_state(&zones[ZONE_DMA32], item) +
+#endif
+ zone_page_state(&zones[ZONE_NORMAL], item) +
+#endif
+#ifdef CONFIG_HIGHMEM
+ zone_page_state(&zones[ZONE_HIGHMEM], item) +
+#endif
+ zone_page_state(&zones[ZONE_DMA], item);
+}
+
+extern void zone_statistics(struct zonelist *, struct zone *);
+
+#else
+
+#define node_page_state(node, item) global_page_state(item)
+#define zone_statistics(_zl,_z) do { } while (0)
+
+#endif /* CONFIG_NUMA */
+
+#define __add_zone_page_state(__z, __i, __d) \
+ __mod_zone_page_state(__z, __i, __d)
+#define __sub_zone_page_state(__z, __i, __d) \
+ __mod_zone_page_state(__z, __i,-(__d))
+
+#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
+#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
+
+static inline void zap_zone_vm_stats(struct zone *zone)
+{
+ memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
+}
+
+extern void inc_zone_state(struct zone *, enum zone_stat_item);
+
+#ifdef CONFIG_SMP
+void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
+void __inc_zone_page_state(struct page *, enum zone_stat_item);
+void __dec_zone_page_state(struct page *, enum zone_stat_item);
+
+void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
+void inc_zone_page_state(struct page *, enum zone_stat_item);
+void dec_zone_page_state(struct page *, enum zone_stat_item);
+
+extern void inc_zone_state(struct zone *, enum zone_stat_item);
+
+void refresh_cpu_vm_stats(int);
+void refresh_vm_stats(void);
+
+#else /* CONFIG_SMP */
+
+/*
+ * We do not maintain differentials in a single processor configuration.
+ * The functions directly modify the zone and global counters.
+ */
+static inline void __mod_zone_page_state(struct zone *zone,
+ enum zone_stat_item item, int delta)
+{
+ zone_page_state_add(delta, zone, item);
+}
+
+static inline void __inc_zone_page_state(struct page *page,
+ enum zone_stat_item item)
+{
+ atomic_long_inc(&page_zone(page)->vm_stat[item]);
+ atomic_long_inc(&vm_stat[item]);
+}
+
+static inline void __dec_zone_page_state(struct page *page,
+ enum zone_stat_item item)
+{
+ atomic_long_dec(&page_zone(page)->vm_stat[item]);
+ atomic_long_dec(&vm_stat[item]);
+}
+
+/*
+ * We only use atomic operations to update counters. So there is no need to
+ * disable interrupts.
+ */
+#define inc_zone_page_state __inc_zone_page_state
+#define dec_zone_page_state __dec_zone_page_state
+#define mod_zone_page_state __mod_zone_page_state
+
+static inline void refresh_cpu_vm_stats(int cpu) { }
+static inline void refresh_vm_stats(void) { }
+#endif
+
+#endif /* _LINUX_VMSTAT_H */