summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-14 22:53:22 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-14 22:53:22 +0200
commitcf5046323ea254be72535648a9d090b18b8510f3 (patch)
treed44894722bd965b2f28a54c4dc4157f22b618c34 /drivers/net
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/jaswinder/headers-check-2.6 (diff)
parentMerge branches 'cxgb3', 'ehca', 'misc', 'mlx4', 'mthca' and 'nes' into for-linus (diff)
downloadlinux-cf5046323ea254be72535648a9d090b18b8510f3.tar.xz
linux-cf5046323ea254be72535648a9d090b18b8510f3.zip
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: mlx4_core: Don't double-free IRQs when falling back from MSI-X to INTx IB/mthca: Don't double-free IRQs when falling back from MSI-X to INTx IB/mlx4: Add strong ordering to local inval and fast reg work requests IB/ehca: Remove superfluous bitmasks from QP control block RDMA/cxgb3: Limit fast register size based on T3 limitations RDMA/cxgb3: Report correct port state and MTU mlx4_core: Add module parameter for number of MTTs per segment IB/mthca: Add module parameter for number of MTTs per segment RDMA/nes: Fix off-by-one bugs in reset_adapter_ne020() and init_serdes() infiniband: Remove void casts IB/ehca: Increment version number IB/ehca: Remove unnecessary memory operations for userspace queue pairs IB/ehca: Fall back to vmalloc() for big allocations IB/ehca: Replace vmalloc() with kmalloc() for queue allocation
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/mlx4/eq.c4
-rw-r--r--drivers/net/mlx4/main.c14
-rw-r--r--drivers/net/mlx4/mr.c6
-rw-r--r--drivers/net/mlx4/profile.c2
4 files changed, 19 insertions, 7 deletions
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 8830dcb92ec8..ce064e324200 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -497,8 +497,10 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
if (eq_table->have_irq)
free_irq(dev->pdev->irq, dev);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
- if (eq_table->eq[i].have_irq)
+ if (eq_table->eq[i].have_irq) {
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
+ eq_table->eq[i].have_irq = 0;
+ }
kfree(eq_table->irq_names);
}
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 30bea9689694..018348c01193 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -100,6 +100,10 @@ module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
"(0/1, default 0)");
+static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
+
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
@@ -203,12 +207,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
dev->caps.reserved_cqs = dev_cap->reserved_cqs;
dev->caps.reserved_eqs = dev_cap->reserved_eqs;
+ dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
- MLX4_MTT_ENTRY_PER_SEG);
+ dev->caps.mtts_per_seg);
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
dev->caps.reserved_uars = dev_cap->reserved_uars;
dev->caps.reserved_pds = dev_cap->reserved_pds;
- dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
+ dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
@@ -1304,6 +1309,11 @@ static int __init mlx4_verify_params(void)
return -1;
}
+ if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
+ printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
+ return -1;
+ }
+
return 0;
}
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 0caf74cae8bc..3b8973d19933 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -209,7 +209,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
} else
mtt->page_shift = page_shift;
- for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1)
+ for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
++mtt->order;
mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
@@ -350,7 +350,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
MLX4_MPT_PD_FLAG_RAE);
mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
- MLX4_MTT_ENTRY_PER_SEG);
+ dev->caps.mtts_per_seg);
} else {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
}
@@ -391,7 +391,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
(start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
return -EINVAL;
- if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1))
+ if (start_index & (dev->caps.mtts_per_seg - 1))
return -EINVAL;
mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index cebdf3243ca1..bd22df95adf9 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
- profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
+ profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
profile[MLX4_RES_QP].num = request->num_qp;