diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-13 05:19:46 +0100 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-13 05:19:46 +0100 |
commit | a70f730282019f487aa33a84e5ac9a5e89c5abd0 (patch) | |
tree | e6891ec5db5383c6f39617d0cc9671e1a0d1a988 | |
parent | cpumask: use topology_core_cpumask/topology_thread_cpumask instead of cpu_cor... (diff) | |
download | linux-a70f730282019f487aa33a84e5ac9a5e89c5abd0.tar.xz linux-a70f730282019f487aa33a84e5ac9a5e89c5abd0.zip |
cpumask: replace node_to_cpumask with cpumask_of_node.
Impact: cleanup
node_to_cpumask (and the blecherous node_to_cpumask_ptr which
contained a declaration) are replaced now everyone implements
cpumask_of_node.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r-- | drivers/base/node.c | 2 | ||||
-rw-r--r-- | drivers/pci/pci-driver.c | 3 | ||||
-rw-r--r-- | include/linux/topology.h | 6 | ||||
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/quicklist.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 6 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 3 |
8 files changed, 13 insertions, 17 deletions
diff --git a/drivers/base/node.c b/drivers/base/node.c index f8f578a71b25..40b809742a1c 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -24,7 +24,7 @@ static struct sysdev_class node_class = { static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf) { struct node *node_dev = to_node(dev); - node_to_cpumask_ptr(mask, node_dev->sysdev.id); + const struct cpumask *mask = cpumask_of_node(node_dev->sysdev.id); int len; /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 93eac1423585..b522f883d674 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -212,10 +212,9 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, node = dev_to_node(&dev->dev); if (node >= 0) { int cpu; - node_to_cpumask_ptr(nodecpumask, node); get_online_cpus(); - cpu = cpumask_any_and(nodecpumask, cpu_online_mask); + cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); if (cpu < nr_cpu_ids) error = work_on_cpu(cpu, local_pci_probe, &ddi); else diff --git a/include/linux/topology.h b/include/linux/topology.h index a16b9e06f2e5..16b7d6896ce9 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -38,11 +38,7 @@ #endif #ifndef nr_cpus_node -#define nr_cpus_node(node) \ - ({ \ - node_to_cpumask_ptr(__tmp__, node); \ - cpus_weight(*__tmp__); \ - }) +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) #endif #define for_each_node_with_cpus(node) \ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5c44ed49ca93..a92b0975b9a5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2134,7 +2134,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask) int n, val; int min_val = INT_MAX; int best_node = -1; - node_to_cpumask_ptr(tmp, 0); + const struct cpumask *tmp = cpumask_of_node(0); /* Use the local node if we haven't already */ if (!node_isset(node, *used_node_mask)) { @@ -2155,8 +2155,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask) val += (n < node); /* Give preference to headless and unused nodes */ - node_to_cpumask_ptr_next(tmp, n); - if (!cpus_empty(*tmp)) + tmp = cpumask_of_node(n); + if (!cpumask_empty(tmp)) val += PENALTY_FOR_NODE_WITH_CPUS; /* Slight preference for less loaded node */ diff --git a/mm/quicklist.c b/mm/quicklist.c index 8dbb6805ef35..e66d07d1b4ff 100644 --- a/mm/quicklist.c +++ b/mm/quicklist.c @@ -29,7 +29,7 @@ static unsigned long max_pages(unsigned long min_pages) int node = numa_node_id(); struct zone *zones = NODE_DATA(node)->node_zones; int num_cpus_on_node; - node_to_cpumask_ptr(cpumask_on_node, node); + const struct cpumask *cpumask_on_node = cpumask_of_node(node); node_free_pages = #ifdef CONFIG_ZONE_DMA diff --git a/mm/slab.c b/mm/slab.c index 4d00855629c4..2daaca0b4541 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1160,7 +1160,7 @@ static void __cpuinit cpuup_canceled(long cpu) struct kmem_cache *cachep; struct kmem_list3 *l3 = NULL; int node = cpu_to_node(cpu); - node_to_cpumask_ptr(mask, node); + const struct cpumask *mask = cpumask_of_node(node); list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; diff --git a/mm/vmscan.c b/mm/vmscan.c index 6177e3bcd66b..cc6135586b44 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1963,7 +1963,7 @@ static int kswapd(void *p) struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; - node_to_cpumask_ptr(cpumask, pgdat->node_id); + const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); if (!cpumask_empty(cpumask)) set_cpus_allowed_ptr(tsk, cpumask); @@ -2198,7 +2198,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb, if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { for_each_node_state(nid, N_HIGH_MEMORY) { pg_data_t *pgdat = NODE_DATA(nid); - node_to_cpumask_ptr(mask, pgdat->node_id); + const struct cpumask *mask; + + mask = cpumask_of_node(pgdat->node_id); if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) /* One of our CPUs online: restore mask */ diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index c51fed4d1af1..3bdd5bffaca8 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -317,8 +317,7 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) } case SVC_POOL_PERNODE: { - node_to_cpumask_ptr(nodecpumask, node); - set_cpus_allowed_ptr(task, nodecpumask); + set_cpus_allowed_ptr(task, cpumask_of_node(node)); break; } } |