summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4/cq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4/cq.c')
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c325
1 files changed, 289 insertions, 36 deletions
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 7360bbafbe84..5e570bb0bb6f 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -85,6 +85,82 @@ static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
return get_sw_cqe(cq, cq->mcq.cons_index);
}
+int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+{
+ struct mlx4_ib_cq *mcq = to_mcq(cq);
+ struct mlx4_ib_dev *dev = to_mdev(cq->device);
+
+ return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
+}
+
+static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
+{
+ int err;
+
+ err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
+ PAGE_SIZE * 2, &buf->buf);
+
+ if (err)
+ goto out;
+
+ err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
+ &buf->mtt);
+ if (err)
+ goto err_buf;
+
+ err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
+ if (err)
+ goto err_mtt;
+
+ return 0;
+
+err_mtt:
+ mlx4_mtt_cleanup(dev->dev, &buf->mtt);
+
+err_buf:
+ mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
+ &buf->buf);
+
+out:
+ return err;
+}
+
+static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
+{
+ mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
+}
+
+static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
+ struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
+ u64 buf_addr, int cqe)
+{
+ int err;
+
+ *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(*umem))
+ return PTR_ERR(*umem);
+
+ err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
+ ilog2((*umem)->page_size), &buf->mtt);
+ if (err)
+ goto err_buf;
+
+ err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
+ if (err)
+ goto err_mtt;
+
+ return 0;
+
+err_mtt:
+ mlx4_mtt_cleanup(dev->dev, &buf->mtt);
+
+err_buf:
+ ib_umem_release(*umem);
+
+ return err;
+}
+
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
struct ib_ucontext *context,
struct ib_udata *udata)
@@ -92,7 +168,6 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_cq *cq;
struct mlx4_uar *uar;
- int buf_size;
int err;
if (entries < 1 || entries > dev->dev->caps.max_cqes)
@@ -104,8 +179,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
entries = roundup_pow_of_two(entries + 1);
cq->ibcq.cqe = entries - 1;
- buf_size = entries * sizeof (struct mlx4_cqe);
+ mutex_init(&cq->resize_mutex);
spin_lock_init(&cq->lock);
+ cq->resize_buf = NULL;
+ cq->resize_umem = NULL;
if (context) {
struct mlx4_ib_create_cq ucmd;
@@ -115,21 +192,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
goto err_cq;
}
- cq->umem = ib_umem_get(context, ucmd.buf_addr, buf_size,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(cq->umem)) {
- err = PTR_ERR(cq->umem);
- goto err_cq;
- }
-
- err = mlx4_mtt_init(dev->dev, ib_umem_page_count(cq->umem),
- ilog2(cq->umem->page_size), &cq->buf.mtt);
- if (err)
- goto err_buf;
-
- err = mlx4_ib_umem_write_mtt(dev, &cq->buf.mtt, cq->umem);
+ err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
+ ucmd.buf_addr, entries);
if (err)
- goto err_mtt;
+ goto err_cq;
err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
&cq->db);
@@ -138,7 +204,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
uar = &to_mucontext(context)->uar;
} else {
- err = mlx4_ib_db_alloc(dev, &cq->db, 1);
+ err = mlx4_db_alloc(dev->dev, &cq->db, 1);
if (err)
goto err_cq;
@@ -147,19 +213,9 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
*cq->mcq.set_ci_db = 0;
*cq->mcq.arm_db = 0;
- if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &cq->buf.buf)) {
- err = -ENOMEM;
- goto err_db;
- }
-
- err = mlx4_mtt_init(dev->dev, cq->buf.buf.npages, cq->buf.buf.page_shift,
- &cq->buf.mtt);
+ err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
if (err)
- goto err_buf;
-
- err = mlx4_buf_write_mtt(dev->dev, &cq->buf.mtt, &cq->buf.buf);
- if (err)
- goto err_mtt;
+ goto err_db;
uar = &dev->priv_uar;
}
@@ -187,16 +243,14 @@ err_dbmap:
err_mtt:
mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
-err_buf:
if (context)
ib_umem_release(cq->umem);
else
- mlx4_buf_free(dev->dev, entries * sizeof (struct mlx4_cqe),
- &cq->buf.buf);
+ mlx4_ib_free_cq_buf(dev, &cq->buf, entries);
err_db:
if (!context)
- mlx4_ib_db_free(dev, &cq->db);
+ mlx4_db_free(dev->dev, &cq->db);
err_cq:
kfree(cq);
@@ -204,6 +258,170 @@ err_cq:
return ERR_PTR(err);
}
+static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
+ int entries)
+{
+ int err;
+
+ if (cq->resize_buf)
+ return -EBUSY;
+
+ cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+ if (!cq->resize_buf)
+ return -ENOMEM;
+
+ err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
+ if (err) {
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ return err;
+ }
+
+ cq->resize_buf->cqe = entries - 1;
+
+ return 0;
+}
+
+static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
+ int entries, struct ib_udata *udata)
+{
+ struct mlx4_ib_resize_cq ucmd;
+ int err;
+
+ if (cq->resize_umem)
+ return -EBUSY;
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
+ return -EFAULT;
+
+ cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+ if (!cq->resize_buf)
+ return -ENOMEM;
+
+ err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
+ &cq->resize_umem, ucmd.buf_addr, entries);
+ if (err) {
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ return err;
+ }
+
+ cq->resize_buf->cqe = entries - 1;
+
+ return 0;
+}
+
+static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
+{
+ u32 i;
+
+ i = cq->mcq.cons_index;
+ while (get_sw_cqe(cq, i & cq->ibcq.cqe))
+ ++i;
+
+ return i - cq->mcq.cons_index;
+}
+
+static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
+{
+ struct mlx4_cqe *cqe;
+ int i;
+
+ i = cq->mcq.cons_index;
+ cqe = get_cqe(cq, i & cq->ibcq.cqe);
+ while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
+ memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
+ (i + 1) & cq->resize_buf->cqe),
+ get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
+ cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
+ }
+ ++cq->mcq.cons_index;
+}
+
+int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
+ struct mlx4_ib_cq *cq = to_mcq(ibcq);
+ int outst_cqe;
+ int err;
+
+ mutex_lock(&cq->resize_mutex);
+
+ if (entries < 1 || entries > dev->dev->caps.max_cqes) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ entries = roundup_pow_of_two(entries + 1);
+ if (entries == ibcq->cqe + 1) {
+ err = 0;
+ goto out;
+ }
+
+ if (ibcq->uobject) {
+ err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
+ if (err)
+ goto out;
+ } else {
+ /* Can't be smaller then the number of outstanding CQEs */
+ outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
+ if (entries < outst_cqe + 1) {
+ err = 0;
+ goto out;
+ }
+
+ err = mlx4_alloc_resize_buf(dev, cq, entries);
+ if (err)
+ goto out;
+ }
+
+ err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
+ if (err)
+ goto err_buf;
+
+ if (ibcq->uobject) {
+ cq->buf = cq->resize_buf->buf;
+ cq->ibcq.cqe = cq->resize_buf->cqe;
+ ib_umem_release(cq->umem);
+ cq->umem = cq->resize_umem;
+
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ cq->resize_umem = NULL;
+ } else {
+ spin_lock_irq(&cq->lock);
+ if (cq->resize_buf) {
+ mlx4_ib_cq_resize_copy_cqes(cq);
+ mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+ cq->buf = cq->resize_buf->buf;
+ cq->ibcq.cqe = cq->resize_buf->cqe;
+
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ }
+ spin_unlock_irq(&cq->lock);
+ }
+
+ goto out;
+
+err_buf:
+ if (!ibcq->uobject)
+ mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
+ cq->resize_buf->cqe);
+
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+
+ if (cq->resize_umem) {
+ ib_umem_release(cq->resize_umem);
+ cq->resize_umem = NULL;
+ }
+
+out:
+ mutex_unlock(&cq->resize_mutex);
+ return err;
+}
+
int mlx4_ib_destroy_cq(struct ib_cq *cq)
{
struct mlx4_ib_dev *dev = to_mdev(cq->device);
@@ -216,9 +434,8 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
ib_umem_release(mcq->umem);
} else {
- mlx4_buf_free(dev->dev, (cq->cqe + 1) * sizeof (struct mlx4_cqe),
- &mcq->buf.buf);
- mlx4_ib_db_free(dev, &mcq->db);
+ mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1);
+ mlx4_db_free(dev->dev, &mcq->db);
}
kfree(mcq);
@@ -297,6 +514,20 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
wc->vendor_err = cqe->vendor_err_syndrome;
}
+static int mlx4_ib_ipoib_csum_ok(__be32 status, __be16 checksum)
+{
+ return ((status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 |
+ MLX4_CQE_IPOIB_STATUS_IPV4F |
+ MLX4_CQE_IPOIB_STATUS_IPV4OPT |
+ MLX4_CQE_IPOIB_STATUS_IPV6 |
+ MLX4_CQE_IPOIB_STATUS_IPOK)) ==
+ cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 |
+ MLX4_CQE_IPOIB_STATUS_IPOK)) &&
+ (status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_UDP |
+ MLX4_CQE_IPOIB_STATUS_TCP)) &&
+ checksum == cpu_to_be16(0xffff);
+}
+
static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
struct mlx4_ib_qp **cur_qp,
struct ib_wc *wc)
@@ -310,6 +541,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
u32 g_mlpath_rqpn;
u16 wqe_ctr;
+repoll:
cqe = next_cqe_sw(cq);
if (!cqe)
return -EAGAIN;
@@ -332,6 +564,22 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
return -EINVAL;
}
+ /* Resize CQ in progress */
+ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
+ if (cq->resize_buf) {
+ struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
+
+ mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+ cq->buf = cq->resize_buf->buf;
+ cq->ibcq.cqe = cq->resize_buf->cqe;
+
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ }
+
+ goto repoll;
+ }
+
if (!*cur_qp ||
(be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
/*
@@ -406,6 +654,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
case MLX4_OPCODE_BIND_MW:
wc->opcode = IB_WC_BIND_MW;
break;
+ case MLX4_OPCODE_LSO:
+ wc->opcode = IB_WC_LSO;
+ break;
}
} else {
wc->byte_len = be32_to_cpu(cqe->byte_cnt);
@@ -434,6 +685,8 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
+ wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->ipoib_status,
+ cqe->checksum);
}
return 0;