summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c32
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c37
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c14
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c6
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c217
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h5
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c91
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c68
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c91
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c18
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c6
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c219
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h26
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c49
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c252
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c95
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h533
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c52
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c2
40 files changed, 1327 insertions, 577 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 00400c352c1a..766a71ccefed 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -604,16 +604,14 @@ static int c2_up(struct net_device *netdev)
tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
c2_port->mem_size = tx_size + rx_size;
- c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,
- &c2_port->dma);
+ c2_port->mem = pci_zalloc_consistent(c2dev->pcidev, c2_port->mem_size,
+ &c2_port->dma);
if (c2_port->mem == NULL) {
pr_debug("Unable to allocate memory for "
"host descriptor rings\n");
return -ENOMEM;
}
- memset(c2_port->mem, 0, c2_port->mem_size);
-
/* Create the Rx host descriptor ring */
if ((ret =
c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index 49e0e8533f74..1b63185b4ad4 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -260,11 +260,14 @@ static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
mq->msg_pool.host, dma_unmap_addr(mq, mapping));
}
-static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
- int msg_size)
+static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq,
+ size_t q_size, size_t msg_size)
{
u8 *pool_start;
+ if (q_size > SIZE_MAX / msg_size)
+ return -EINVAL;
+
pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
&mq->host_dma, GFP_KERNEL);
if (!pool_start)
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index c2fb71c182a8..fb61f6685809 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -236,10 +236,12 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
static void set_emss(struct c4iw_ep *ep, u16 opt)
{
ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] -
- sizeof(struct iphdr) - sizeof(struct tcphdr);
+ ((AF_INET == ep->com.remote_addr.ss_family) ?
+ sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
+ sizeof(struct tcphdr);
ep->mss = ep->emss;
if (GET_TCPOPT_TSTAMP(opt))
- ep->emss -= 12;
+ ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
if (ep->emss < 128)
ep->emss = 128;
if (ep->emss & 7)
@@ -415,6 +417,7 @@ static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
return NULL;
if (!our_interface(dev, n->dev) &&
!(n->dev->flags & IFF_LOOPBACK)) {
+ neigh_release(n);
dst_release(&rt->dst);
return NULL;
}
@@ -581,11 +584,14 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep,
}
static void best_mtu(const unsigned short *mtus, unsigned short mtu,
- unsigned int *idx, int use_ts)
+ unsigned int *idx, int use_ts, int ipv6)
{
- unsigned short hdr_size = sizeof(struct iphdr) +
+ unsigned short hdr_size = (ipv6 ?
+ sizeof(struct ipv6hdr) :
+ sizeof(struct iphdr)) +
sizeof(struct tcphdr) +
- (use_ts ? 12 : 0);
+ (use_ts ?
+ round_up(TCPOLEN_TIMESTAMP, 4) : 0);
unsigned short data_size = mtu - hdr_size;
cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
@@ -634,7 +640,8 @@ static int send_connect(struct c4iw_ep *ep)
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
- enable_tcp_timestamps);
+ enable_tcp_timestamps,
+ (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
wscale = compute_wscale(rcv_win);
/*
@@ -668,6 +675,7 @@ static int send_connect(struct c4iw_ep *ep)
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
opt2 |= T5_OPT_2_VALID;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+ opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
}
t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
@@ -713,8 +721,6 @@ static int send_connect(struct c4iw_ep *ep)
} else {
u32 isn = (prandom_u32() & ~7UL) - 1;
- opt2 |= T5_OPT_2_VALID;
- opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
if (peer2peer)
isn += 4;
@@ -756,10 +762,10 @@ static int send_connect(struct c4iw_ep *ep)
t5_req6->peer_ip_lo = *((__be64 *)
(ra6->sin6_addr.s6_addr + 8));
t5_req6->opt0 = cpu_to_be64(opt0);
- t5_req6->params = (__force __be64)cpu_to_be32(
+ t5_req6->params = cpu_to_be64(V_FILTER_TUPLE(
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
- ep->l2t));
+ ep->l2t)));
t5_req6->rsvd = cpu_to_be32(isn);
PDBG("%s snd_isn %u\n", __func__,
be32_to_cpu(t5_req6->rsvd));
@@ -1763,7 +1769,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
req->tcb.tx_max = (__force __be32) jiffies;
req->tcb.rcv_adv = htons(1);
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
- enable_tcp_timestamps);
+ enable_tcp_timestamps,
+ (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
wscale = compute_wscale(rcv_win);
/*
@@ -2162,7 +2169,8 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
ep->hwtid));
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
- enable_tcp_timestamps && req->tcpopt.tstamp);
+ enable_tcp_timestamps && req->tcpopt.tstamp,
+ (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
wscale = compute_wscale(rcv_win);
/*
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index f25df5276c22..72f1f052e88c 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -60,7 +60,7 @@ int c4iw_wr_log = 0;
module_param(c4iw_wr_log, int, 0444);
MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
-int c4iw_wr_log_size_order = 12;
+static int c4iw_wr_log_size_order = 12;
module_param(c4iw_wr_log_size_order, int, 0444);
MODULE_PARM_DESC(c4iw_wr_log_size_order,
"Number of entries (log2) in the work request timing log.");
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index fbe6051af254..c9df0549f51d 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -227,6 +227,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
chp = get_chp(dev, qid);
if (chp) {
+ t4_clear_cq_armed(&chp->cq);
spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index c158fcc02bca..41cd6882b648 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1105,7 +1105,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
struct c4iw_cq *schp)
{
int count;
- int flushed;
+ int rq_flushed, sq_flushed;
unsigned long flag;
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
@@ -1123,27 +1123,40 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
c4iw_flush_hw_cq(rchp);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
- flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
+ rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, flag);
- if (flushed) {
- spin_lock_irqsave(&rchp->comp_handler_lock, flag);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
- spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
- }
/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, flag);
spin_lock(&qhp->lock);
if (schp != rchp)
c4iw_flush_hw_cq(schp);
- flushed = c4iw_flush_sq(qhp);
+ sq_flushed = c4iw_flush_sq(qhp);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, flag);
- if (flushed) {
- spin_lock_irqsave(&schp->comp_handler_lock, flag);
- (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
- spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+
+ if (schp == rchp) {
+ if (t4_clear_cq_armed(&rchp->cq) &&
+ (rq_flushed || sq_flushed)) {
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+ rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+ }
+ } else {
+ if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+ rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+ }
+ if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
+ spin_lock_irqsave(&schp->comp_handler_lock, flag);
+ (*schp->ibcq.comp_handler)(&schp->ibcq,
+ schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+ }
}
}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index df5edfa31a8f..c04e5134b30c 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -524,6 +524,10 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq)
return !wq->rq.queue[wq->rq.size].status.db_off;
}
+enum t4_cq_flags {
+ CQ_ARMED = 1,
+};
+
struct t4_cq {
struct t4_cqe *queue;
dma_addr_t dma_addr;
@@ -544,12 +548,19 @@ struct t4_cq {
u16 cidx_inc;
u8 gen;
u8 error;
+ unsigned long flags;
};
+static inline int t4_clear_cq_armed(struct t4_cq *cq)
+{
+ return test_and_clear_bit(CQ_ARMED, &cq->flags);
+}
+
static inline int t4_arm_cq(struct t4_cq *cq, int se)
{
u32 val;
+ set_bit(CQ_ARMED, &cq->flags);
while (cq->cidx_inc > CIDXINC_MASK) {
val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
INGRESSQID(cq->cqid);
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index e0c404bdc4a8..4977082e081f 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -82,7 +82,6 @@ static int create_file(const char *name, umode_t mode,
{
int error;
- *dentry = NULL;
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry))
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 43f2d0424d4f..e890e5ba0e01 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -726,7 +726,7 @@ bail:
* @dd: the infinipath device
* @pkeys: the PKEY table
*/
-static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
+static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys, u8 port)
{
struct ipath_portdata *pd;
int i;
@@ -759,6 +759,7 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
}
if (changed) {
u64 pkey;
+ struct ib_event event;
pkey = (u64) dd->ipath_pkeys[0] |
((u64) dd->ipath_pkeys[1] << 16) |
@@ -768,12 +769,17 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
(unsigned long long) pkey);
ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
pkey);
+
+ event.event = IB_EVENT_PKEY_CHANGE;
+ event.device = &dd->verbs_dev->ibdev;
+ event.element.port_num = port;
+ ib_dispatch_event(&event);
}
return 0;
}
static int recv_subn_set_pkeytable(struct ib_smp *smp,
- struct ib_device *ibdev)
+ struct ib_device *ibdev, u8 port)
{
u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
__be16 *p = (__be16 *) smp->data;
@@ -784,7 +790,7 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
for (i = 0; i < n; i++)
q[i] = be16_to_cpu(p[i]);
- if (startpx != 0 || set_pkeys(dev->dd, q) != 0)
+ if (startpx != 0 || set_pkeys(dev->dd, q, port) != 0)
smp->status |= IB_SMP_INVALID_FIELD;
return recv_subn_get_pkeytable(smp, ibdev);
@@ -1342,7 +1348,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
ret = recv_subn_set_portinfo(smp, ibdev, port_num);
goto bail;
case IB_SMP_ATTR_PKEY_TABLE:
- ret = recv_subn_set_pkeytable(smp, ibdev);
+ ret = recv_subn_set_pkeytable(smp, ibdev, port_num);
goto bail;
case IB_SMP_ATTR_SM_INFO:
if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index dc66c4506916..1da1252dcdb3 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -54,7 +54,7 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages,
/* call with current->mm->mmap_sem held */
static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p, struct vm_area_struct **vma)
+ struct page **p)
{
unsigned long lock_limit;
size_t got;
@@ -74,7 +74,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
num_pages - got, 1, 1,
- p + got, vma);
+ p + got, NULL);
if (ret < 0)
goto bail_release;
}
@@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
down_write(&current->mm->mmap_sem);
- ret = __ipath_get_user_pages(start_page, num_pages, p, NULL);
+ ret = __ipath_get_user_pages(start_page, num_pages, p);
up_write(&current->mm->mmap_sem);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 287ad0564acd..82a7dd87089b 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -891,7 +891,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
q ? IB_QPT_GSI : IB_QPT_SMI,
NULL, 0, send_handler,
- NULL, NULL);
+ NULL, NULL, 0);
if (IS_ERR(agent)) {
ret = PTR_ERR(agent);
goto err;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0f7027e7db13..8b72cf392b34 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -59,6 +59,7 @@
#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
+#define MLX4_IB_CARD_REV_A0 0xA0
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
@@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev)
return dmfs;
}
+static int num_ib_ports(struct mlx4_dev *dev)
+{
+ int ib_ports = 0;
+ int i;
+
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ ib_ports++;
+
+ return ib_ports;
+}
+
static int mlx4_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
@@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
+ int have_ib_ports;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
memset(props, 0, sizeof *props);
+ have_ib_ports = num_ib_ports(dev->dev);
+
props->fw_ver = dev->dev->caps.fw_ver;
props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT |
@@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
- if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
- if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
+ if (dev->dev->caps.max_gso_sz &&
+ (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
+ (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
props->device_cap_flags |= IB_DEVICE_UD_TSO;
if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
@@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->state = IB_PORT_DOWN;
props->phys_state = state_to_phys_state(props->state);
props->active_mtu = IB_MTU_256;
- spin_lock(&iboe->lock);
+ spin_lock_bh(&iboe->lock);
ndev = iboe->netdevs[port - 1];
if (!ndev)
goto out_unlock;
@@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
IB_PORT_ACTIVE : IB_PORT_DOWN;
props->phys_state = state_to_phys_state(props->state);
out_unlock:
- spin_unlock(&iboe->lock);
+ spin_unlock_bh(&iboe->lock);
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return err;
@@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
if (!mqp->port)
return 0;
- spin_lock(&mdev->iboe.lock);
+ spin_lock_bh(&mdev->iboe.lock);
ndev = mdev->iboe.netdevs[mqp->port - 1];
if (ndev)
dev_hold(ndev);
- spin_unlock(&mdev->iboe.lock);
+ spin_unlock_bh(&mdev->iboe.lock);
if (ndev) {
ret = 1;
@@ -910,8 +927,7 @@ static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
const struct default_rules *pdefault_rules = default_table;
u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
- for (i = 0; i < sizeof(default_table)/sizeof(default_table[0]); i++,
- pdefault_rules++) {
+ for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
__u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
memset(&field_types, 0, sizeof(field_types));
@@ -965,8 +981,7 @@ static int __mlx4_ib_create_default_rules(
int size = 0;
int i;
- for (i = 0; i < sizeof(pdefault_rules->rules_create_list)/
- sizeof(pdefault_rules->rules_create_list[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
int ret;
union ib_flow_spec ib_spec;
switch (pdefault_rules->rules_create_list[i]) {
@@ -1091,6 +1106,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
return err;
}
+static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
+ u64 *reg_id)
+{
+ void *ib_flow;
+ union ib_flow_spec *ib_spec;
+ struct mlx4_dev *dev = to_mdev(qp->device)->dev;
+ int err = 0;
+
+ if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ return 0; /* do nothing */
+
+ ib_flow = flow_attr + 1;
+ ib_spec = (union ib_flow_spec *)ib_flow;
+
+ if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
+ return 0; /* do nothing */
+
+ err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
+ flow_attr->port, qp->qp_num,
+ MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
+ reg_id);
+ return err;
+}
+
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
@@ -1134,12 +1173,24 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
&mflow->reg_id[i]);
if (err)
- goto err_free;
+ goto err_create_flow;
+ i++;
+ }
+
+ if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
+ err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
+ if (err)
+ goto err_create_flow;
i++;
}
return &mflow->ibflow;
+err_create_flow:
+ while (i) {
+ (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
+ i--;
+ }
err_free:
kfree(mflow);
return ERR_PTR(err);
@@ -1264,11 +1315,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
mutex_lock(&mqp->mutex);
ge = find_gid_entry(mqp, gid->raw);
if (ge) {
- spin_lock(&mdev->iboe.lock);
+ spin_lock_bh(&mdev->iboe.lock);
ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
if (ndev)
dev_hold(ndev);
- spin_unlock(&mdev->iboe.lock);
+ spin_unlock_bh(&mdev->iboe.lock);
if (ndev)
dev_put(ndev);
list_del(&ge->list);
@@ -1389,6 +1440,9 @@ static void update_gids_task(struct work_struct *work)
int err;
struct mlx4_dev *dev = gw->dev->dev;
+ if (!gw->dev->ib_active)
+ return;
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
@@ -1419,6 +1473,9 @@ static void reset_gids_task(struct work_struct *work)
int err;
struct mlx4_dev *dev = gw->dev->dev;
+ if (!gw->dev->ib_active)
+ return;
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
pr_warn("reset gid table failed\n");
@@ -1553,7 +1610,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
return 0;
iboe = &ibdev->iboe;
- spin_lock(&iboe->lock);
+ spin_lock_bh(&iboe->lock);
for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
if ((netif_is_bond_master(real_dev) &&
@@ -1563,7 +1620,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
update_gid_table(ibdev, port, gid,
event == NETDEV_DOWN, 0);
- spin_unlock(&iboe->lock);
+ spin_unlock_bh(&iboe->lock);
return 0;
}
@@ -1636,13 +1693,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
new_smac = mlx4_mac_to_u64(dev->dev_addr);
read_unlock(&dev_base_lock);
+ atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
+
+ /* no need for update QP1 and mac registration in non-SRIOV */
+ if (!mlx4_is_mfunc(ibdev->dev))
+ return;
+
mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
qp = ibdev->qp1_proxy[port - 1];
if (qp) {
int new_smac_index;
- u64 old_smac = qp->pri.smac;
+ u64 old_smac;
struct mlx4_update_qp_params update_params;
+ mutex_lock(&qp->mutex);
+ old_smac = qp->pri.smac;
if (new_smac == old_smac)
goto unlock;
@@ -1652,22 +1717,25 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
goto unlock;
update_params.smac_index = new_smac_index;
- if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+ if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
&update_params)) {
release_mac = new_smac;
goto unlock;
}
-
+ /* if old port was zero, no mac was yet registered for this QP */
+ if (qp->pri.smac_port)
+ release_mac = old_smac;
qp->pri.smac = new_smac;
+ qp->pri.smac_port = port;
qp->pri.smac_index = new_smac_index;
-
- release_mac = old_smac;
}
unlock:
- mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
if (release_mac != MLX4_IB_INVALID_MAC)
mlx4_unregister_mac(ibdev->dev, port, release_mac);
+ if (qp)
+ mutex_unlock(&qp->mutex);
+ mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
}
static void mlx4_ib_get_dev_addr(struct net_device *dev,
@@ -1678,6 +1746,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
struct inet6_dev *in6_dev;
union ib_gid *pgid;
struct inet6_ifaddr *ifp;
+ union ib_gid default_gid;
#endif
union ib_gid gid;
@@ -1698,12 +1767,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
in_dev_put(in_dev);
}
#if IS_ENABLED(CONFIG_IPV6)
+ mlx4_make_default_gid(dev, &default_gid);
/* IPv6 gids */
in6_dev = in6_dev_get(dev);
if (in6_dev) {
read_lock_bh(&in6_dev->lock);
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
pgid = (union ib_gid *)&ifp->addr;
+ if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
+ continue;
update_gid_table(ibdev, port, pgid, 0, 0);
}
read_unlock_bh(&in6_dev->lock);
@@ -1725,24 +1797,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
struct net_device *dev;
struct mlx4_ib_iboe *iboe = &ibdev->iboe;
int i;
+ int err = 0;
- for (i = 1; i <= ibdev->num_ports; ++i)
- if (reset_gid_table(ibdev, i))
- return -1;
+ for (i = 1; i <= ibdev->num_ports; ++i) {
+ if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
+ IB_LINK_LAYER_ETHERNET) {
+ err = reset_gid_table(ibdev, i);
+ if (err)
+ goto out;
+ }
+ }
read_lock(&dev_base_lock);
- spin_lock(&iboe->lock);
+ spin_lock_bh(&iboe->lock);
for_each_netdev(&init_net, dev) {
u8 port = mlx4_ib_get_dev_port(dev, ibdev);
- if (port)
+ /* port will be non-zero only for ETH ports */
+ if (port) {
+ mlx4_ib_set_default_gid(ibdev, dev, port);
mlx4_ib_get_dev_addr(dev, ibdev, port);
+ }
}
- spin_unlock(&iboe->lock);
+ spin_unlock_bh(&iboe->lock);
read_unlock(&dev_base_lock);
-
- return 0;
+out:
+ return err;
}
static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
@@ -1756,7 +1837,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
iboe = &ibdev->iboe;
- spin_lock(&iboe->lock);
+ spin_lock_bh(&iboe->lock);
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
enum ib_port_state port_state = IB_PORT_NOP;
struct net_device *old_master = iboe->masters[port - 1];
@@ -1788,35 +1869,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
IB_PORT_ACTIVE : IB_PORT_DOWN;
mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
- } else {
- reset_gid_table(ibdev, port);
- }
- /* if using bonding/team and a slave port is down, we don't the bond IP
- * based gids in the table since flows that select port by gid may get
- * the down port.
- */
- if (curr_master && (port_state == IB_PORT_DOWN)) {
- reset_gid_table(ibdev, port);
- mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
- }
- /* if bonding is used it is possible that we add it to masters
- * only after IP address is assigned to the net bonding
- * interface.
- */
- if (curr_master && (old_master != curr_master)) {
- reset_gid_table(ibdev, port);
- mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
- mlx4_ib_get_dev_addr(curr_master, ibdev, port);
- }
+ if (curr_master) {
+ /* if using bonding/team and a slave port is down, we
+ * don't want the bond IP based gids in the table since
+ * flows that select port by gid may get the down port.
+ */
+ if (port_state == IB_PORT_DOWN) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev,
+ curr_netdev,
+ port);
+ } else {
+ /* gids from the upper dev (bond/team)
+ * should appear in port's gid table
+ */
+ mlx4_ib_get_dev_addr(curr_master,
+ ibdev, port);
+ }
+ }
+ /* if bonding is used it is possible that we add it to
+ * masters only after IP address is assigned to the
+ * net bonding interface.
+ */
+ if (curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev,
+ curr_netdev, port);
+ mlx4_ib_get_dev_addr(curr_master, ibdev, port);
+ }
- if (!curr_master && (old_master != curr_master)) {
+ if (!curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev,
+ curr_netdev, port);
+ mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
+ }
+ } else {
reset_gid_table(ibdev, port);
- mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
- mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
}
}
- spin_unlock(&iboe->lock);
+ spin_unlock_bh(&iboe->lock);
if (update_qps_port > 0)
mlx4_ib_update_qps(ibdev, dev, update_qps_port);
@@ -2007,6 +2100,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_REREG_MR) |
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
@@ -2059,6 +2153,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
+ ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
@@ -2156,6 +2251,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_steer_free_bitmap;
}
+ for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
+ atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
+
if (ib_register_device(&ibdev->ib_dev, NULL))
goto err_steer_free_bitmap;
@@ -2192,12 +2290,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
}
}
#endif
- for (i = 1 ; i <= ibdev->num_ports ; ++i)
- reset_gid_table(ibdev, i);
- rtnl_lock();
- mlx4_ib_scan_netdevs(ibdev, NULL, 0);
- rtnl_unlock();
- mlx4_ib_init_gid_table(ibdev);
+ if (mlx4_ib_init_gid_table(ibdev))
+ goto err_notif;
}
for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
@@ -2345,6 +2439,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
struct mlx4_ib_dev *ibdev = ibdev_ptr;
int p;
+ ibdev->ib_active = false;
+ flush_workqueue(wq);
+
mlx4_ib_close_sriov(ibdev);
mlx4_ib_mad_cleanup(ibdev);
ib_unregister_device(&ibdev->ib_dev);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 369da3ca5d64..6eb743f65f6f 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -451,6 +451,7 @@ struct mlx4_ib_iboe {
spinlock_t lock;
struct net_device *netdevs[MLX4_MAX_PORTS];
struct net_device *masters[MLX4_MAX_PORTS];
+ atomic64_t mac[MLX4_MAX_PORTS];
struct notifier_block nb;
struct notifier_block nb_inet;
struct notifier_block nb_inet6;
@@ -788,5 +789,9 @@ int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
int is_attach);
+int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
+ u64 start, u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata);
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index cb2a8727f3fb..8f9325cfc85d 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -144,8 +144,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
+ /* Force registering the memory as writable. */
+ /* Used for memory re-registeration. HCA protects the access */
mr->umem = ib_umem_get(pd->uobject->context, start, length,
- access_flags, 0);
+ access_flags | IB_ACCESS_LOCAL_WRITE, 0);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err_free;
@@ -183,6 +185,93 @@ err_free:
return ERR_PTR(err);
}
+int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
+ u64 start, u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata)
+{
+ struct mlx4_ib_dev *dev = to_mdev(mr->device);
+ struct mlx4_ib_mr *mmr = to_mmr(mr);
+ struct mlx4_mpt_entry *mpt_entry;
+ struct mlx4_mpt_entry **pmpt_entry = &mpt_entry;
+ int err;
+
+ /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs,
+ * we assume that the calls can't run concurrently. Otherwise, a
+ * race exists.
+ */
+ err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
+
+ if (err)
+ return err;
+
+ if (flags & IB_MR_REREG_PD) {
+ err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
+ to_mpd(pd)->pdn);
+
+ if (err)
+ goto release_mpt_entry;
+ }
+
+ if (flags & IB_MR_REREG_ACCESS) {
+ err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
+ convert_access(mr_access_flags));
+
+ if (err)
+ goto release_mpt_entry;
+ }
+
+ if (flags & IB_MR_REREG_TRANS) {
+ int shift;
+ int err;
+ int n;
+
+ mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
+ ib_umem_release(mmr->umem);
+ mmr->umem = ib_umem_get(mr->uobject->context, start, length,
+ mr_access_flags |
+ IB_ACCESS_LOCAL_WRITE,
+ 0);
+ if (IS_ERR(mmr->umem)) {
+ err = PTR_ERR(mmr->umem);
+ /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
+ mmr->umem = NULL;
+ goto release_mpt_entry;
+ }
+ n = ib_umem_page_count(mmr->umem);
+ shift = ilog2(mmr->umem->page_size);
+
+ err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
+ virt_addr, length, n, shift,
+ *pmpt_entry);
+ if (err) {
+ ib_umem_release(mmr->umem);
+ goto release_mpt_entry;
+ }
+ mmr->mmr.iova = virt_addr;
+ mmr->mmr.size = length;
+
+ err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
+ if (err) {
+ mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
+ ib_umem_release(mmr->umem);
+ goto release_mpt_entry;
+ }
+ }
+
+ /* If we couldn't transfer the MR to the HCA, just remember to
+ * return a failure. But dereg_mr will free the resources.
+ */
+ err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
+ if (!err && flags & IB_MR_REREG_ACCESS)
+ mmr->mmr.access = mr_access_flags;
+
+release_mpt_entry:
+ mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
+
+ return err;
+}
+
int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
{
struct mlx4_ib_mr *mr = to_mmr(ibmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 67780452f0cf..9c5150c3cb31 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -964,9 +964,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
pr_warn("modify QP %06x to RESET failed.\n",
qp->mqp.qpn);
- if (qp->pri.smac) {
+ if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
qp->pri.smac = 0;
+ qp->pri.smac_port = 0;
}
if (qp->alt.smac) {
mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
@@ -1325,7 +1326,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
* If one was already assigned, but the new mac differs,
* unregister the old one and register the new one.
*/
- if (!smac_info->smac || smac_info->smac != smac) {
+ if ((!smac_info->smac && !smac_info->smac_port) ||
+ smac_info->smac != smac) {
/* register candidate now, unreg if needed, after success */
smac_index = mlx4_register_mac(dev->dev, port, smac);
if (smac_index >= 0) {
@@ -1390,21 +1392,13 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac,
struct mlx4_qp_context *context)
{
- struct net_device *ndev;
u64 u64_mac;
int smac_index;
-
- ndev = dev->iboe.netdevs[qp->port - 1];
- if (ndev) {
- smac = ndev->dev_addr;
- u64_mac = mlx4_mac_to_u64(smac);
- } else {
- u64_mac = dev->dev->caps.def_mac[qp->port];
- }
+ u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]);
context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
- if (!qp->pri.smac) {
+ if (!qp->pri.smac && !qp->pri.smac_port) {
smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
if (smac_index >= 0) {
qp->pri.candidate_smac_index = smac_index;
@@ -1432,6 +1426,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
int steer_qp = 0;
int err = -EINVAL;
+ /* APM is not supported under RoCE */
+ if (attr_mask & IB_QP_ALT_PATH &&
+ rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
+ IB_LINK_LAYER_ETHERNET)
+ return -ENOTSUPP;
+
context = kzalloc(sizeof *context, GFP_KERNEL);
if (!context)
return -ENOMEM;
@@ -1677,9 +1677,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
+ if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
MLX4_IB_LINK_TYPE_ETH;
+ if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ /* set QP to receive both tunneled & non-tunneled packets */
+ if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
+ context->srqn = cpu_to_be32(7 << 28);
+ }
+ }
if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
int is_eth = rdma_port_get_link_layer(
@@ -1780,9 +1786,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_reg(dev, qp, 0);
}
- if (qp->pri.smac) {
+ if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
qp->pri.smac = 0;
+ qp->pri.smac_port = 0;
}
if (qp->alt.smac) {
mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
@@ -1806,11 +1813,12 @@ out:
if (err && steer_qp)
mlx4_ib_steer_qp_reg(dev, qp, 0);
kfree(context);
- if (qp->pri.candidate_smac) {
+ if (qp->pri.candidate_smac ||
+ (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) {
if (err) {
mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
} else {
- if (qp->pri.smac)
+ if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port))
mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
qp->pri.smac = qp->pri.candidate_smac;
qp->pri.smac_index = qp->pri.candidate_smac_index;
@@ -2083,6 +2091,16 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
return 0;
}
+static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac)
+{
+ int i;
+
+ for (i = ETH_ALEN; i; i--) {
+ dst_mac[i - 1] = src_mac & 0xff;
+ src_mac >>= 8;
+ }
+}
+
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
void *wqe, unsigned *mlx_seg_len)
{
@@ -2197,7 +2215,6 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
}
if (is_eth) {
- u8 *smac;
struct in6_addr in6;
u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
@@ -2210,12 +2227,17 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
memcpy(&in6, sgid.raw, sizeof(in6));
- if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev))
- smac = to_mdev(sqp->qp.ibqp.device)->
- iboe.netdevs[sqp->qp.port - 1]->dev_addr;
- else /* use the src mac of the tunnel */
- smac = ah->av.eth.s_mac;
- memcpy(sqp->ud_header.eth.smac_h, smac, 6);
+ if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+ u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]);
+ u8 smac[ETH_ALEN];
+
+ mlx4_u64_to_smac(smac, mac);
+ memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN);
+ } else {
+ /* use the src mac of the tunnel */
+ memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN);
+ }
+
if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
if (!is_vlan) {
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index e4056279166d..10cfce5119a9 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
return ERR_PTR(-EINVAL);
entries = roundup_pow_of_two(entries + 1);
- if (entries > dev->mdev->caps.max_cqes)
+ if (entries > dev->mdev->caps.gen.max_cqes)
return ERR_PTR(-EINVAL);
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
int err;
u32 fsel;
- if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
+ if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
return -ENOSYS;
in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
int uninitialized_var(cqe_size);
unsigned long flags;
- if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
+ if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
pr_info("Firmware does not support resize CQ\n");
return -ENOSYS;
}
@@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
return -EINVAL;
entries = roundup_pow_of_two(entries + 1);
- if (entries > dev->mdev->caps.max_cqes + 1)
+ if (entries > dev->mdev->caps.gen.max_cqes + 1)
return -EINVAL;
if (entries == ibcq->cqe + 1)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index b514bbb5610f..657af9a1167c 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
packet_error = be16_to_cpu(out_mad->status);
- dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
+ dev->mdev->caps.gen.ext_port_cap[port - 1] = (!err && !packet_error) ?
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
out:
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d8907b20522a..1ba6c42e4df8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -157,11 +157,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
+ struct mlx5_general_caps *gen;
int err = -ENOMEM;
int max_rq_sg;
int max_sq_sg;
u64 flags;
+ gen = &dev->mdev->caps.gen;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad)
@@ -183,7 +185,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_RC_RNR_NAK_GEN;
- flags = dev->mdev->caps.flags;
+ flags = gen->flags;
if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR)
@@ -213,30 +215,31 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
props->max_mr_size = ~0ull;
- props->page_size_cap = dev->mdev->caps.min_page_sz;
- props->max_qp = 1 << dev->mdev->caps.log_max_qp;
- props->max_qp_wr = dev->mdev->caps.max_wqes;
- max_rq_sg = dev->mdev->caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
- max_sq_sg = (dev->mdev->caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
+ props->page_size_cap = gen->min_page_sz;
+ props->max_qp = 1 << gen->log_max_qp;
+ props->max_qp_wr = gen->max_wqes;
+ max_rq_sg = gen->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
+ max_sq_sg = (gen->max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
sizeof(struct mlx5_wqe_data_seg);
props->max_sge = min(max_rq_sg, max_sq_sg);
- props->max_cq = 1 << dev->mdev->caps.log_max_cq;
- props->max_cqe = dev->mdev->caps.max_cqes - 1;
- props->max_mr = 1 << dev->mdev->caps.log_max_mkey;
- props->max_pd = 1 << dev->mdev->caps.log_max_pd;
- props->max_qp_rd_atom = dev->mdev->caps.max_ra_req_qp;
- props->max_qp_init_rd_atom = dev->mdev->caps.max_ra_res_qp;
+ props->max_cq = 1 << gen->log_max_cq;
+ props->max_cqe = gen->max_cqes - 1;
+ props->max_mr = 1 << gen->log_max_mkey;
+ props->max_pd = 1 << gen->log_max_pd;
+ props->max_qp_rd_atom = 1 << gen->log_max_ra_req_qp;
+ props->max_qp_init_rd_atom = 1 << gen->log_max_ra_res_qp;
+ props->max_srq = 1 << gen->log_max_srq;
+ props->max_srq_wr = gen->max_srq_wqes - 1;
+ props->local_ca_ack_delay = gen->local_ca_ack_delay;
props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
- props->max_srq = 1 << dev->mdev->caps.log_max_srq;
- props->max_srq_wr = dev->mdev->caps.max_srq_wqes - 1;
props->max_srq_sge = max_rq_sg - 1;
props->max_fast_reg_page_list_len = (unsigned int)-1;
- props->local_ca_ack_delay = dev->mdev->caps.local_ca_ack_delay;
+ props->local_ca_ack_delay = gen->local_ca_ack_delay;
props->atomic_cap = IB_ATOMIC_NONE;
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
- props->max_mcast_grp = 1 << dev->mdev->caps.log_max_mcg;
- props->max_mcast_qp_attach = dev->mdev->caps.max_qp_mcg;
+ props->max_mcast_grp = 1 << gen->log_max_mcg;
+ props->max_mcast_qp_attach = gen->max_qp_mcg;
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
@@ -254,10 +257,12 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
+ struct mlx5_general_caps *gen;
int ext_active_speed;
int err = -ENOMEM;
- if (port < 1 || port > dev->mdev->caps.num_ports) {
+ gen = &dev->mdev->caps.gen;
+ if (port < 1 || port > gen->num_ports) {
mlx5_ib_warn(dev, "invalid port number %d\n", port);
return -EINVAL;
}
@@ -288,8 +293,8 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
props->phys_state = out_mad->data[33] >> 4;
props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
props->gid_tbl_len = out_mad->data[50];
- props->max_msg_sz = 1 << to_mdev(ibdev)->mdev->caps.log_max_msg;
- props->pkey_tbl_len = to_mdev(ibdev)->mdev->caps.port[port - 1].pkey_table_len;
+ props->max_msg_sz = 1 << gen->log_max_msg;
+ props->pkey_tbl_len = gen->port[port - 1].pkey_table_len;
props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
props->active_width = out_mad->data[31] & 0xf;
@@ -316,7 +321,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
/* If reported active speed is QDR, check if is FDR-10 */
if (props->active_speed == 4) {
- if (dev->mdev->caps.ext_port_cap[port - 1] &
+ if (gen->ext_port_cap[port - 1] &
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
@@ -470,6 +475,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx5_ib_alloc_ucontext_req_v2 req;
struct mlx5_ib_alloc_ucontext_resp resp;
struct mlx5_ib_ucontext *context;
+ struct mlx5_general_caps *gen;
struct mlx5_uuar_info *uuari;
struct mlx5_uar *uars;
int gross_uuars;
@@ -480,6 +486,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
int i;
size_t reqlen;
+ gen = &dev->mdev->caps.gen;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
@@ -512,14 +519,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
- resp.qp_tab_size = 1 << dev->mdev->caps.log_max_qp;
- resp.bf_reg_size = dev->mdev->caps.bf_reg_size;
+ resp.qp_tab_size = 1 << gen->log_max_qp;
+ resp.bf_reg_size = gen->bf_reg_size;
resp.cache_line_size = L1_CACHE_BYTES;
- resp.max_sq_desc_sz = dev->mdev->caps.max_sq_desc_sz;
- resp.max_rq_desc_sz = dev->mdev->caps.max_rq_desc_sz;
- resp.max_send_wqebb = dev->mdev->caps.max_wqes;
- resp.max_recv_wr = dev->mdev->caps.max_wqes;
- resp.max_srq_recv_wr = dev->mdev->caps.max_srq_wqes;
+ resp.max_sq_desc_sz = gen->max_sq_desc_sz;
+ resp.max_rq_desc_sz = gen->max_rq_desc_sz;
+ resp.max_send_wqebb = gen->max_wqes;
+ resp.max_recv_wr = gen->max_wqes;
+ resp.max_srq_recv_wr = gen->max_srq_wqes;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
@@ -565,7 +572,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
mutex_init(&context->db_page_mutex);
resp.tot_uuars = req.total_num_uuars;
- resp.num_ports = dev->mdev->caps.num_ports;
+ resp.num_ports = gen->num_ports;
err = ib_copy_to_udata(udata, &resp,
sizeof(resp) - sizeof(resp.reserved));
if (err)
@@ -650,13 +657,13 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
return -EINVAL;
idx = get_index(vma->vm_pgoff);
+ if (idx >= uuari->num_uars)
+ return -EINVAL;
+
pfn = uar_index2pfn(dev, uuari->uars[idx].index);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
(unsigned long long)pfn);
- if (idx >= uuari->num_uars)
- return -EINVAL;
-
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, pfn,
PAGE_SIZE, vma->vm_page_prot))
@@ -967,9 +974,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
static void get_ext_port_caps(struct mlx5_ib_dev *dev)
{
+ struct mlx5_general_caps *gen;
int port;
- for (port = 1; port <= dev->mdev->caps.num_ports; port++)
+ gen = &dev->mdev->caps.gen;
+ for (port = 1; port <= gen->num_ports; port++)
mlx5_query_ext_port_caps(dev, port);
}
@@ -977,9 +986,11 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
{
struct ib_device_attr *dprops = NULL;
struct ib_port_attr *pprops = NULL;
+ struct mlx5_general_caps *gen;
int err = 0;
int port;
+ gen = &dev->mdev->caps.gen;
pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
if (!pprops)
goto out;
@@ -994,14 +1005,14 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
goto out;
}
- for (port = 1; port <= dev->mdev->caps.num_ports; port++) {
+ for (port = 1; port <= gen->num_ports; port++) {
err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
if (err) {
mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err);
break;
}
- dev->mdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys;
- dev->mdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len;
+ gen->port[port - 1].pkey_table_len = dprops->max_pkeys;
+ gen->port[port - 1].gid_table_len = pprops->gid_tbl_len;
mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
dprops->max_pkeys, pprops->gid_tbl_len);
}
@@ -1279,8 +1290,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
- dev->ib_dev.local_dma_lkey = mdev->caps.reserved_lkey;
- dev->num_ports = mdev->caps.num_ports;
+ dev->ib_dev.local_dma_lkey = mdev->caps.gen.reserved_lkey;
+ dev->num_ports = mdev->caps.gen.num_ports;
dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors = dev->num_comp_vectors;
dev->ib_dev.dma_device = &mdev->pdev->dev;
@@ -1355,7 +1366,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
- if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) {
+ if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
dev->ib_dev.uverbs_cmd_mask |=
@@ -1414,8 +1425,8 @@ err_dealloc:
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
{
struct mlx5_ib_dev *dev = context;
- destroy_umrc_res(dev);
ib_unregister_device(&dev->ib_dev);
+ destroy_umrc_res(dev);
destroy_dev_resources(&dev->devr);
free_comp_eqs(dev);
ib_dealloc_device(&dev->ib_dev);
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index a3e81444c825..dae07eae9507 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -55,16 +55,17 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
u64 pfn;
struct scatterlist *sg;
int entry;
+ unsigned long page_shift = ilog2(umem->page_size);
- addr = addr >> PAGE_SHIFT;
+ addr = addr >> page_shift;
tmp = (unsigned long)addr;
m = find_first_bit(&tmp, sizeof(tmp));
skip = 1 << m;
mask = skip - 1;
i = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> PAGE_SHIFT;
- pfn = sg_dma_address(sg) >> PAGE_SHIFT;
+ len = sg_dma_len(sg) >> page_shift;
+ pfn = sg_dma_address(sg) >> page_shift;
for (k = 0; k < len; k++) {
if (!(i & mask)) {
tmp = (unsigned long)pfn;
@@ -103,14 +104,15 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
*ncont = 0;
}
- *shift = PAGE_SHIFT + m;
+ *shift = page_shift + m;
*count = i;
}
void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, __be64 *pas, int umr)
{
- int shift = page_shift - PAGE_SHIFT;
+ unsigned long umem_page_shift = ilog2(umem->page_size);
+ int shift = page_shift - umem_page_shift;
int mask = (1 << shift) - 1;
int i, k;
u64 cur = 0;
@@ -121,11 +123,11 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
i = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> PAGE_SHIFT;
+ len = sg_dma_len(sg) >> umem_page_shift;
base = sg_dma_address(sg);
for (k = 0; k < len; k++) {
if (!(i & mask)) {
- cur = base + (k << PAGE_SHIFT);
+ cur = base + (k << umem_page_shift);
if (umr)
cur |= 3;
@@ -134,7 +136,7 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
i >> shift, be64_to_cpu(pas[i >> shift]));
} else
mlx5_ib_dbg(dev, "=====> 0x%llx\n",
- base + (k << PAGE_SHIFT));
+ base + (k << umem_page_shift));
i++;
}
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 80b3c63eab5d..8ee7cb46e059 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -881,12 +881,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int order;
int err;
- mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx\n",
- start, virt_addr, length);
+ mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
+ start, virt_addr, length, access_flags);
umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
0);
if (IS_ERR(umem)) {
- mlx5_ib_dbg(dev, "umem get failed\n");
+ mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
return (void *)umem;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 7efe6e3f3542..e261a53f9a02 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -158,11 +158,13 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
{
+ struct mlx5_general_caps *gen;
int wqe_size;
int wq_size;
+ gen = &dev->mdev->caps.gen;
/* Sanity check RQ size before proceeding */
- if (cap->max_recv_wr > dev->mdev->caps.max_wqes)
+ if (cap->max_recv_wr > gen->max_wqes)
return -EINVAL;
if (!has_rq) {
@@ -182,10 +184,10 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
qp->rq.wqe_cnt = wq_size / wqe_size;
- if (wqe_size > dev->mdev->caps.max_rq_desc_sz) {
+ if (wqe_size > gen->max_rq_desc_sz) {
mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
wqe_size,
- dev->mdev->caps.max_rq_desc_sz);
+ gen->max_rq_desc_sz);
return -EINVAL;
}
qp->rq.wqe_shift = ilog2(wqe_size);
@@ -266,9 +268,11 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
struct mlx5_ib_qp *qp)
{
+ struct mlx5_general_caps *gen;
int wqe_size;
int wq_size;
+ gen = &dev->mdev->caps.gen;
if (!attr->cap.max_send_wr)
return 0;
@@ -277,9 +281,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
if (wqe_size < 0)
return wqe_size;
- if (wqe_size > dev->mdev->caps.max_sq_desc_sz) {
+ if (wqe_size > gen->max_sq_desc_sz) {
mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
- wqe_size, dev->mdev->caps.max_sq_desc_sz);
+ wqe_size, gen->max_sq_desc_sz);
return -EINVAL;
}
@@ -292,9 +296,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
- if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) {
+ if (qp->sq.wqe_cnt > gen->max_wqes) {
mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
- qp->sq.wqe_cnt, dev->mdev->caps.max_wqes);
+ qp->sq.wqe_cnt, gen->max_wqes);
return -ENOMEM;
}
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -309,11 +313,13 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp,
struct mlx5_ib_create_qp *ucmd)
{
+ struct mlx5_general_caps *gen;
int desc_sz = 1 << qp->sq.wqe_shift;
- if (desc_sz > dev->mdev->caps.max_sq_desc_sz) {
+ gen = &dev->mdev->caps.gen;
+ if (desc_sz > gen->max_sq_desc_sz) {
mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
- desc_sz, dev->mdev->caps.max_sq_desc_sz);
+ desc_sz, gen->max_sq_desc_sz);
return -EINVAL;
}
@@ -325,9 +331,9 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
qp->sq.wqe_cnt = ucmd->sq_wqe_count;
- if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) {
+ if (qp->sq.wqe_cnt > gen->max_wqes) {
mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
- qp->sq.wqe_cnt, dev->mdev->caps.max_wqes);
+ qp->sq.wqe_cnt, gen->max_wqes);
return -EINVAL;
}
@@ -803,16 +809,18 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_resources *devr = &dev->devr;
struct mlx5_ib_create_qp_resp resp;
struct mlx5_create_qp_mbox_in *in;
+ struct mlx5_general_caps *gen;
struct mlx5_ib_create_qp ucmd;
int inlen = sizeof(*in);
int err;
+ gen = &dev->mdev->caps.gen;
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
- if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
+ if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
return -EINVAL;
} else {
@@ -851,9 +859,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
mlx5_ib_dbg(dev, "invalid rq params\n");
return -EINVAL;
}
- if (ucmd.sq_wqe_count > dev->mdev->caps.max_wqes) {
+ if (ucmd.sq_wqe_count > gen->max_wqes) {
mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
- ucmd.sq_wqe_count, dev->mdev->caps.max_wqes);
+ ucmd.sq_wqe_count, gen->max_wqes);
return -EINVAL;
}
err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
@@ -1144,6 +1152,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
+ struct mlx5_general_caps *gen;
struct mlx5_ib_dev *dev;
struct mlx5_ib_qp *qp;
u16 xrcdn = 0;
@@ -1161,11 +1170,12 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
}
dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
}
+ gen = &dev->mdev->caps.gen;
switch (init_attr->qp_type) {
case IB_QPT_XRC_TGT:
case IB_QPT_XRC_INI:
- if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) {
+ if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) {
mlx5_ib_dbg(dev, "XRC not supported\n");
return ERR_PTR(-ENOSYS);
}
@@ -1272,6 +1282,9 @@ enum {
static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
{
+ struct mlx5_general_caps *gen;
+
+ gen = &dev->mdev->caps.gen;
if (rate == IB_RATE_PORT_CURRENT) {
return 0;
} else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
@@ -1279,7 +1292,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
} else {
while (rate != IB_RATE_2_5_GBPS &&
!(1 << (rate + MLX5_STAT_RATE_OFFSET) &
- dev->mdev->caps.stat_rate_support))
+ gen->stat_rate_support))
--rate;
}
@@ -1290,8 +1303,10 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
struct mlx5_qp_path *path, u8 port, int attr_mask,
u32 path_flags, const struct ib_qp_attr *attr)
{
+ struct mlx5_general_caps *gen;
int err;
+ gen = &dev->mdev->caps.gen;
path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
@@ -1302,6 +1317,11 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
path->rlid = cpu_to_be16(ah->dlid);
if (ah->ah_flags & IB_AH_GRH) {
+ if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
+ pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
+ ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
+ return -EINVAL;
+ }
path->grh_mlid |= 1 << 7;
path->mgid_index = ah->grh.sgid_index;
path->hop_limit = ah->grh.hop_limit;
@@ -1317,22 +1337,6 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
path->static_rate = err;
path->port = port;
- if (ah->ah_flags & IB_AH_GRH) {
- if (ah->grh.sgid_index >= dev->mdev->caps.port[port - 1].gid_table_len) {
- pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
- ah->grh.sgid_index, dev->mdev->caps.port[port - 1].gid_table_len);
- return -EINVAL;
- }
-
- path->grh_mlid |= 1 << 7;
- path->mgid_index = ah->grh.sgid_index;
- path->hop_limit = ah->grh.hop_limit;
- path->tclass_flowlabel =
- cpu_to_be32((ah->grh.traffic_class << 20) |
- (ah->grh.flow_label));
- memcpy(path->rgid, ah->grh.dgid.raw, 16);
- }
-
if (attr_mask & IB_QP_TIMEOUT)
path->ackto_lt = attr->timeout << 3;
@@ -1492,6 +1496,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_qp_context *context;
+ struct mlx5_general_caps *gen;
struct mlx5_modify_qp_mbox_in *in;
struct mlx5_ib_pd *pd;
enum mlx5_qp_state mlx5_cur, mlx5_new;
@@ -1500,6 +1505,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
int mlx5_st;
int err;
+ gen = &dev->mdev->caps.gen;
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1539,7 +1545,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
err = -EINVAL;
goto out;
}
- context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev->caps.log_max_msg;
+ context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg;
}
if (attr_mask & IB_QP_DEST_QPN)
@@ -1685,9 +1691,11 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_ib_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state;
+ struct mlx5_general_caps *gen;
int err = -EINVAL;
int port;
+ gen = &dev->mdev->caps.gen;
mutex_lock(&qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
@@ -1699,21 +1707,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
if ((attr_mask & IB_QP_PORT) &&
- (attr->port_num == 0 || attr->port_num > dev->mdev->caps.num_ports))
+ (attr->port_num == 0 || attr->port_num > gen->num_ports))
goto out;
if (attr_mask & IB_QP_PKEY_INDEX) {
port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- if (attr->pkey_index >= dev->mdev->caps.port[port - 1].pkey_table_len)
+ if (attr->pkey_index >= gen->port[port - 1].pkey_table_len)
goto out;
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
- attr->max_rd_atomic > dev->mdev->caps.max_ra_res_qp)
+ attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp))
goto out;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
- attr->max_dest_rd_atomic > dev->mdev->caps.max_ra_req_qp)
+ attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp))
goto out;
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -2020,56 +2028,31 @@ static u8 bs_selector(int block_size)
}
}
-static int format_selector(struct ib_sig_attrs *attr,
- struct ib_sig_domain *domain,
- int *selector)
+static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
+ struct mlx5_bsf_inl *inl)
{
+ /* Valid inline section and allow BSF refresh */
+ inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
+ MLX5_BSF_REFRESH_DIF);
+ inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
+ inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
+ /* repeating block */
+ inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
+ inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
+ MLX5_DIF_CRC : MLX5_DIF_IPCS;
-#define FORMAT_DIF_NONE 0
-#define FORMAT_DIF_CRC_INC 8
-#define FORMAT_DIF_CRC_NO_INC 12
-#define FORMAT_DIF_CSUM_INC 13
-#define FORMAT_DIF_CSUM_NO_INC 14
+ if (domain->sig.dif.ref_remap)
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
- switch (domain->sig.dif.type) {
- case IB_T10DIF_NONE:
- /* No DIF */
- *selector = FORMAT_DIF_NONE;
- break;
- case IB_T10DIF_TYPE1: /* Fall through */
- case IB_T10DIF_TYPE2:
- switch (domain->sig.dif.bg_type) {
- case IB_T10DIF_CRC:
- *selector = FORMAT_DIF_CRC_INC;
- break;
- case IB_T10DIF_CSUM:
- *selector = FORMAT_DIF_CSUM_INC;
- break;
- default:
- return 1;
- }
- break;
- case IB_T10DIF_TYPE3:
- switch (domain->sig.dif.bg_type) {
- case IB_T10DIF_CRC:
- *selector = domain->sig.dif.type3_inc_reftag ?
- FORMAT_DIF_CRC_INC :
- FORMAT_DIF_CRC_NO_INC;
- break;
- case IB_T10DIF_CSUM:
- *selector = domain->sig.dif.type3_inc_reftag ?
- FORMAT_DIF_CSUM_INC :
- FORMAT_DIF_CSUM_NO_INC;
- break;
- default:
- return 1;
- }
- break;
- default:
- return 1;
+ if (domain->sig.dif.app_escape) {
+ if (domain->sig.dif.ref_escape)
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
+ else
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
}
- return 0;
+ inl->dif_app_bitmask_check =
+ cpu_to_be16(domain->sig.dif.apptag_check_mask);
}
static int mlx5_set_bsf(struct ib_mr *sig_mr,
@@ -2080,45 +2063,49 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
struct mlx5_bsf_basic *basic = &bsf->basic;
struct ib_sig_domain *mem = &sig_attrs->mem;
struct ib_sig_domain *wire = &sig_attrs->wire;
- int ret, selector;
memset(bsf, 0, sizeof(*bsf));
+
+ /* Basic + Extended + Inline */
+ basic->bsf_size_sbs = 1 << 7;
+ /* Input domain check byte mask */
+ basic->check_byte_mask = sig_attrs->check_mask;
+ basic->raw_data_size = cpu_to_be32(data_size);
+
+ /* Memory domain */
switch (sig_attrs->mem.sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
case IB_SIG_TYPE_T10_DIF:
- if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF)
- return -EINVAL;
+ basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
+ basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
+ mlx5_fill_inl_bsf(mem, &bsf->m_inl);
+ break;
+ default:
+ return -EINVAL;
+ }
- /* Input domain check byte mask */
- basic->check_byte_mask = sig_attrs->check_mask;
+ /* Wire domain */
+ switch (sig_attrs->wire.sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
- mem->sig.dif.type == wire->sig.dif.type) {
+ mem->sig_type == wire->sig_type) {
/* Same block structure */
- basic->bsf_size_sbs = 1 << 4;
+ basic->bsf_size_sbs |= 1 << 4;
if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
- basic->wire.copy_byte_mask |= 0xc0;
+ basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
- basic->wire.copy_byte_mask |= 0x30;
+ basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
- basic->wire.copy_byte_mask |= 0x0f;
+ basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
} else
basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
- basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
- basic->raw_data_size = cpu_to_be32(data_size);
-
- ret = format_selector(sig_attrs, mem, &selector);
- if (ret)
- return -EINVAL;
- basic->m_bfs_psv = cpu_to_be32(selector << 24 |
- msig->psv_memory.psv_idx);
-
- ret = format_selector(sig_attrs, wire, &selector);
- if (ret)
- return -EINVAL;
- basic->w_bfs_psv = cpu_to_be32(selector << 24 |
- msig->psv_wire.psv_idx);
+ basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
+ mlx5_fill_inl_bsf(wire, &bsf->w_inl);
break;
-
default:
return -EINVAL;
}
@@ -2317,20 +2304,21 @@ static int set_psv_wr(struct ib_sig_domain *domain,
memset(psv_seg, 0, sizeof(*psv_seg));
psv_seg->psv_num = cpu_to_be32(psv_idx);
switch (domain->sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
case IB_SIG_TYPE_T10_DIF:
psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
domain->sig.dif.app_tag);
psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
-
- *seg += sizeof(*psv_seg);
- *size += sizeof(*psv_seg) / 16;
break;
-
default:
pr_err("Bad signature type given.\n");
return 1;
}
+ *seg += sizeof(*psv_seg);
+ *size += sizeof(*psv_seg) / 16;
+
return 0;
}
@@ -2501,7 +2489,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags);
for (nreq = 0; wr; nreq++, wr = wr->next) {
- if (unlikely(wr->opcode >= sizeof(mlx5_ib_opcode) / sizeof(mlx5_ib_opcode[0]))) {
+ if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
mlx5_ib_warn(dev, "\n");
err = -EINVAL;
*bad_wr = wr;
@@ -2893,7 +2881,8 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at
memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
ib_ah_attr->port_num = path->port;
- if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
+ if (ib_ah_attr->port_num == 0 ||
+ ib_ah_attr->port_num > dev->caps.gen.num_ports)
return;
ib_ah_attr->sl = path->sl & 0xf;
@@ -3011,10 +3000,12 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_general_caps *gen;
struct mlx5_ib_xrcd *xrcd;
int err;
- if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC))
+ gen = &dev->mdev->caps.gen;
+ if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC))
return ERR_PTR(-ENOSYS);
xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 70bd131ba646..97cc1baaa8e3 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -238,6 +238,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_general_caps *gen;
struct mlx5_ib_srq *srq;
int desc_size;
int buf_size;
@@ -247,11 +248,12 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
int is_xrc;
u32 flgs, xrcdn;
+ gen = &dev->mdev->caps.gen;
/* Sanity check SRQ size before proceeding */
- if (init_attr->attr.max_wr >= dev->mdev->caps.max_srq_wqes) {
+ if (init_attr->attr.max_wr >= gen->max_srq_wqes) {
mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
init_attr->attr.max_wr,
- dev->mdev->caps.max_srq_wqes);
+ gen->max_srq_wqes);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index b6f7f457fc55..8881fa376e06 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -294,7 +294,7 @@ int mthca_create_agents(struct mthca_dev *dev)
agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
q ? IB_QPT_GSI : IB_QPT_SMI,
NULL, 0, send_handler,
- NULL, NULL);
+ NULL, NULL, 0);
if (IS_ERR(agent)) {
ret = PTR_ERR(agent);
goto err;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 90200245c5eb..02120d340d50 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1003,13 +1003,13 @@ int nes_init_cqp(struct nes_device *nesdev)
(sizeof(struct nes_hw_aeqe) * nesadapter->max_qp) +
sizeof(struct nes_hw_cqp_qp_context);
- nesdev->cqp_vbase = pci_alloc_consistent(nesdev->pcidev, nesdev->cqp_mem_size,
- &nesdev->cqp_pbase);
+ nesdev->cqp_vbase = pci_zalloc_consistent(nesdev->pcidev,
+ nesdev->cqp_mem_size,
+ &nesdev->cqp_pbase);
if (!nesdev->cqp_vbase) {
nes_debug(NES_DBG_INIT, "Unable to allocate memory for host descriptor rings\n");
return -ENOMEM;
}
- memset(nesdev->cqp_vbase, 0, nesdev->cqp_mem_size);
/* Allocate a twice the number of CQP requests as the SQ size */
nesdev->nes_cqp_requests = kzalloc(sizeof(struct nes_cqp_request) *
@@ -1691,13 +1691,13 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
(NES_NIC_WQ_SIZE * 2 * sizeof(struct nes_hw_nic_cqe)) +
sizeof(struct nes_hw_nic_qp_context);
- nesvnic->nic_vbase = pci_alloc_consistent(nesdev->pcidev, nesvnic->nic_mem_size,
- &nesvnic->nic_pbase);
+ nesvnic->nic_vbase = pci_zalloc_consistent(nesdev->pcidev,
+ nesvnic->nic_mem_size,
+ &nesvnic->nic_pbase);
if (!nesvnic->nic_vbase) {
nes_debug(NES_DBG_INIT, "Unable to allocate memory for NIC host descriptor rings\n");
return -ENOMEM;
}
- memset(nesvnic->nic_vbase, 0, nesvnic->nic_mem_size);
nes_debug(NES_DBG_INIT, "Allocated NIC QP structures at %p (phys = %016lX), size = %u.\n",
nesvnic->nic_vbase, (unsigned long)nesvnic->nic_pbase, nesvnic->nic_mem_size);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 218dd3574285..fef067c959fc 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1616,8 +1616,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
entries, nescq->cq_mem_size, nescq->hw_cq.cq_number);
/* allocate the physical buffer space */
- mem = pci_alloc_consistent(nesdev->pcidev, nescq->cq_mem_size,
- &nescq->hw_cq.cq_pbase);
+ mem = pci_zalloc_consistent(nesdev->pcidev, nescq->cq_mem_size,
+ &nescq->hw_cq.cq_pbase);
if (!mem) {
printk(KERN_ERR PFX "Unable to allocate pci memory for cq\n");
nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
@@ -1625,7 +1625,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
return ERR_PTR(-ENOMEM);
}
- memset(mem, 0, nescq->cq_mem_size);
nescq->hw_cq.cq_vbase = mem;
nescq->hw_cq.cq_head = 0;
nes_debug(NES_DBG_CQ, "CQ%u virtual address @ %p, phys = 0x%08X\n",
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 19011dbb930f..b43456ae124b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -40,7 +40,7 @@
#include <be_roce.h>
#include "ocrdma_sli.h"
-#define OCRDMA_ROCE_DRV_VERSION "10.2.145.0u"
+#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u"
#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
@@ -137,6 +137,7 @@ struct mqe_ctx {
u16 cqe_status;
u16 ext_status;
bool cmd_done;
+ bool fw_error_state;
};
struct ocrdma_hw_mr {
@@ -235,7 +236,10 @@ struct ocrdma_dev {
struct list_head entry;
struct rcu_head rcu;
int id;
- u64 stag_arr[OCRDMA_MAX_STAG];
+ u64 *stag_arr;
+ u8 sl; /* service level */
+ bool pfc_state;
+ atomic_t update_sl;
u16 pvid;
u32 asic_id;
@@ -518,4 +522,22 @@ static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev)
OCRDMA_SLI_ASIC_GEN_NUM_SHIFT;
}
+static inline u8 ocrdma_get_pfc_prio(u8 *pfc, u8 prio)
+{
+ return *(pfc + prio);
+}
+
+static inline u8 ocrdma_get_app_prio(u8 *app_prio, u8 prio)
+{
+ return *(app_prio + prio);
+}
+
+static inline u8 ocrdma_is_enabled_and_synced(u32 state)
+{ /* May also be used to interpret TC-state, QCN-state
+ * Appl-state and Logical-link-state in future.
+ */
+ return (state & OCRDMA_STATE_FLAG_ENABLED) &&
+ (state & OCRDMA_STATE_FLAG_SYNC);
+}
+
#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index d4cc01f10c01..ac02ce4e8040 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -35,8 +35,10 @@
#include "ocrdma_ah.h"
#include "ocrdma_hw.h"
+#define OCRDMA_VID_PCP_SHIFT 0xD
+
static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
- struct ib_ah_attr *attr, int pdid)
+ struct ib_ah_attr *attr, union ib_gid *sgid, int pdid)
{
int status = 0;
u16 vlan_tag; bool vlan_enabled = false;
@@ -47,15 +49,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
memset(&eth, 0, sizeof(eth));
memset(&grh, 0, sizeof(grh));
- ah->sgid_index = attr->grh.sgid_index;
-
+ /* VLAN */
vlan_tag = attr->vlan_id;
if (!vlan_tag || (vlan_tag > 0xFFF))
vlan_tag = dev->pvid;
if (vlan_tag && (vlan_tag < 0x1000)) {
eth.eth_type = cpu_to_be16(0x8100);
eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
- vlan_tag |= (attr->sl & 7) << 13;
+ vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
eth.vlan_tag = cpu_to_be16(vlan_tag);
eth_sz = sizeof(struct ocrdma_eth_vlan);
vlan_enabled = true;
@@ -63,15 +64,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
eth_sz = sizeof(struct ocrdma_eth_basic);
}
+ /* MAC */
memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
- memcpy(&eth.dmac[0], attr->dmac, ETH_ALEN);
status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
if (status)
return status;
- status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index,
- (union ib_gid *)&grh.sgid[0]);
- if (status)
- return status;
+ ah->sgid_index = attr->grh.sgid_index;
+ memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
+ memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
grh.tclass_flow = cpu_to_be32((6 << 28) |
(attr->grh.traffic_class << 24) |
@@ -79,8 +79,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
/* 0x1b is next header value in GRH */
grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
(0x1b << 8) | attr->grh.hop_limit);
-
- memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
+ /* Eth HDR */
memcpy(&ah->av->eth_hdr, &eth, eth_sz);
memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
if (vlan_enabled)
@@ -96,10 +95,14 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
struct ocrdma_ah *ah;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+ union ib_gid sgid;
+ u8 zmac[ETH_ALEN];
if (!(attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
+ if (atomic_cmpxchg(&dev->update_sl, 1, 0))
+ ocrdma_init_service_level(dev);
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
@@ -107,7 +110,27 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
status = ocrdma_alloc_av(dev, ah);
if (status)
goto av_err;
- status = set_av_attr(dev, ah, attr, pd->id);
+
+ status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid);
+ if (status) {
+ pr_err("%s(): Failed to query sgid, status = %d\n",
+ __func__, status);
+ goto av_conf_err;
+ }
+
+ memset(&zmac, 0, ETH_ALEN);
+ if (pd->uctx &&
+ memcmp(attr->dmac, &zmac, ETH_ALEN)) {
+ status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
+ attr->dmac, &attr->vlan_id);
+ if (status) {
+ pr_err("%s(): Failed to resolve dmac from gid."
+ "status = %d\n", __func__, status);
+ goto av_conf_err;
+ }
+ }
+
+ status = set_av_attr(dev, ah, attr, &sgid, pd->id);
if (status)
goto av_conf_err;
@@ -141,7 +164,7 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
struct ocrdma_av *av = ah->av;
struct ocrdma_grh *grh;
attr->ah_flags |= IB_AH_GRH;
- if (ah->av->valid & Bit(1)) {
+ if (ah->av->valid & OCRDMA_AV_VALID) {
grh = (struct ocrdma_grh *)((u8 *)ah->av +
sizeof(struct ocrdma_eth_vlan));
attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 3bbf2010a821..638bff1ffc6c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -348,11 +348,6 @@ static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
return mqe;
}
-static void *ocrdma_alloc_mqe(void)
-{
- return kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
-}
-
static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
{
dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
@@ -525,7 +520,7 @@ static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
cmd->eqn = eq->id;
- cmd->cqe_count = cq->size / sizeof(struct ocrdma_mcqe);
+ cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe);
ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
cq->dma, PAGE_SIZE_4K);
@@ -566,8 +561,8 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
- cmd->async_event_bitmap = Bit(OCRDMA_ASYNC_GRP5_EVE_CODE);
- cmd->async_event_bitmap |= Bit(OCRDMA_ASYNC_RDMA_EVE_CODE);
+ cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
+ cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
cmd->async_cqid_ringsize = cq->id;
cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
@@ -661,7 +656,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
{
struct ocrdma_qp *qp = NULL;
struct ocrdma_cq *cq = NULL;
- struct ib_event ib_evt = { 0 };
+ struct ib_event ib_evt;
int cq_event = 0;
int qp_event = 1;
int srq_event = 0;
@@ -674,6 +669,8 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID)
cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK];
+ memset(&ib_evt, 0, sizeof(ib_evt));
+
ib_evt.device = &dev->ibdev;
switch (type) {
@@ -771,6 +768,10 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
break;
+
+ case OCRDMA_ASYNC_EVENT_COS_VALUE:
+ atomic_set(&dev->update_sl, 1);
+ break;
default:
/* Not interested evts. */
break;
@@ -962,8 +963,12 @@ static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
msecs_to_jiffies(30000));
if (status)
return 0;
- else
+ else {
+ dev->mqe_ctx.fw_error_state = true;
+ pr_err("%s(%d) mailbox timeout: fw not responding\n",
+ __func__, dev->id);
return -1;
+ }
}
/* issue a mailbox command on the MQ */
@@ -975,6 +980,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
struct ocrdma_mbx_rsp *rsp = NULL;
mutex_lock(&dev->mqe_ctx.lock);
+ if (dev->mqe_ctx.fw_error_state)
+ goto mbx_err;
ocrdma_post_mqe(dev, mqe);
status = ocrdma_wait_mqe_cmpl(dev);
if (status)
@@ -1078,7 +1085,8 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
attr->max_mw = rsp->max_mw;
attr->max_mr = rsp->max_mr;
- attr->max_mr_size = ~0ull;
+ attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
+ rsp->max_mr_size_lo;
attr->max_fmr = 0;
attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
@@ -1176,10 +1184,10 @@ int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset)
{
struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
struct ocrdma_mqe *mqe = &dev->stats_mem.mqe;
- struct ocrdma_rdma_stats_resp *old_stats = NULL;
+ struct ocrdma_rdma_stats_resp *old_stats;
int status;
- old_stats = kzalloc(sizeof(*old_stats), GFP_KERNEL);
+ old_stats = kmalloc(sizeof(*old_stats), GFP_KERNEL);
if (old_stats == NULL)
return -ENOMEM;
@@ -1222,10 +1230,9 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp;
struct mgmt_hba_attribs *hba_attribs;
- mqe = ocrdma_alloc_mqe();
+ mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
if (!mqe)
return status;
- memset(mqe, 0, sizeof(*mqe));
dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp);
dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev,
@@ -1252,7 +1259,9 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
- dev->hba_port_num = hba_attribs->phy_port;
+ dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
+ OCRDMA_HBA_ATTRB_PTNUM_MASK)
+ >> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
strncpy(dev->model_number,
hba_attribs->controller_model_number, 31);
}
@@ -1302,7 +1311,8 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
goto mbx_err;
rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
- *lnk_speed = rsp->phys_port_speed;
+ *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
+ >> OCRDMA_PHY_PS_SHIFT;
mbx_err:
kfree(cmd);
@@ -1328,11 +1338,16 @@ static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
goto mbx_err;
rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
- dev->phy.phy_type = le16_to_cpu(rsp->phy_type);
+ dev->phy.phy_type =
+ (rsp->ityp_ptyp & OCRDMA_PHY_TYPE_MASK);
+ dev->phy.interface_type =
+ (rsp->ityp_ptyp & OCRDMA_IF_TYPE_MASK)
+ >> OCRDMA_IF_TYPE_SHIFT;
dev->phy.auto_speeds_supported =
- le16_to_cpu(rsp->auto_speeds_supported);
+ (rsp->fspeed_aspeed & OCRDMA_ASPEED_SUPP_MASK);
dev->phy.fixed_speeds_supported =
- le16_to_cpu(rsp->fixed_speeds_supported);
+ (rsp->fspeed_aspeed & OCRDMA_FSPEED_SUPP_MASK)
+ >> OCRDMA_FSPEED_SUPP_SHIFT;
mbx_err:
kfree(cmd);
return status;
@@ -1457,8 +1472,8 @@ static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
- pbes[i].pa_lo = (u32) (pa & 0xffffffff);
- pbes[i].pa_hi = (u32) upper_32_bits(pa);
+ pbes[i].pa_lo = (u32)cpu_to_le32(pa & 0xffffffff);
+ pbes[i].pa_hi = (u32)cpu_to_le32(upper_32_bits(pa));
pa += PAGE_SIZE;
}
cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
@@ -1501,6 +1516,7 @@ static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
dev->av_tbl.pa);
+ dev->av_tbl.va = NULL;
dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
dev->av_tbl.pbl.pa);
kfree(cmd);
@@ -1624,14 +1640,16 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
OCRDMA_CREATE_CQ_TYPE_SHIFT;
cq->phase_change = false;
- cmd->cmd.cqe_count = (cq->len / cqe_size);
+ cmd->cmd.pdid_cqecnt = (cq->len / cqe_size);
} else {
- cmd->cmd.cqe_count = (cq->len / cqe_size) - 1;
+ cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1;
cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
cq->phase_change = true;
}
- cmd->cmd.pd_id = pd_id; /* valid only for v3 */
+ /* pd_id valid only for v3 */
+ cmd->cmd.pdid_cqecnt |= (pd_id <<
+ OCRDMA_CREATE_CQ_CMD_PDID_SHIFT);
ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status)
@@ -2206,7 +2224,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
qp->rq_cq = cq;
- if (pd->dpp_enabled && pd->num_dpp_qp) {
+ if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
+ (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
dpp_cq_id);
}
@@ -2254,7 +2273,8 @@ mbx_err:
static int ocrdma_set_av_params(struct ocrdma_qp *qp,
struct ocrdma_modify_qp *cmd,
- struct ib_qp_attr *attrs)
+ struct ib_qp_attr *attrs,
+ int attr_mask)
{
int status;
struct ib_ah_attr *ah_attr = &attrs->ah_attr;
@@ -2264,6 +2284,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
return -EINVAL;
+ if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0))
+ ocrdma_init_service_level(qp->dev);
cmd->params.tclass_sq_psn |=
(ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
cmd->params.rnt_rc_sl_fl |=
@@ -2292,11 +2314,13 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
- vlan_id = ah_attr->vlan_id;
- if (vlan_id && (vlan_id < 0x1000)) {
+ if (attr_mask & IB_QP_VID) {
+ vlan_id = attrs->vlan_id;
cmd->params.vlan_dmac_b4_to_b5 |=
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
+ cmd->params.rnt_rc_sl_fl |=
+ (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
}
return 0;
}
@@ -2318,7 +2342,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
}
if (attr_mask & IB_QP_AV) {
- status = ocrdma_set_av_params(qp, cmd, attrs);
+ status = ocrdma_set_av_params(qp, cmd, attrs, attr_mask);
if (status)
return status;
} else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
@@ -2604,6 +2628,168 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
return status;
}
+static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
+ struct ocrdma_dcbx_cfg *dcbxcfg)
+{
+ int status = 0;
+ dma_addr_t pa;
+ struct ocrdma_mqe cmd;
+
+ struct ocrdma_get_dcbx_cfg_req *req = NULL;
+ struct ocrdma_get_dcbx_cfg_rsp *rsp = NULL;
+ struct pci_dev *pdev = dev->nic_info.pdev;
+ struct ocrdma_mqe_sge *mqe_sge = cmd.u.nonemb_req.sge;
+
+ memset(&cmd, 0, sizeof(struct ocrdma_mqe));
+ cmd.hdr.pyld_len = max_t (u32, sizeof(struct ocrdma_get_dcbx_cfg_rsp),
+ sizeof(struct ocrdma_get_dcbx_cfg_req));
+ req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL);
+ if (!req) {
+ status = -ENOMEM;
+ goto mem_err;
+ }
+
+ cmd.hdr.spcl_sge_cnt_emb |= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
+ OCRDMA_MQE_HDR_SGE_CNT_MASK;
+ mqe_sge->pa_lo = (u32) (pa & 0xFFFFFFFFUL);
+ mqe_sge->pa_hi = (u32) upper_32_bits(pa);
+ mqe_sge->len = cmd.hdr.pyld_len;
+
+ memset(req, 0, sizeof(struct ocrdma_get_dcbx_cfg_req));
+ ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG,
+ OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len);
+ req->param_type = ptype;
+
+ status = ocrdma_mbx_cmd(dev, &cmd);
+ if (status)
+ goto mbx_err;
+
+ rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req;
+ ocrdma_le32_to_cpu(rsp, sizeof(struct ocrdma_get_dcbx_cfg_rsp));
+ memcpy(dcbxcfg, &rsp->cfg, sizeof(struct ocrdma_dcbx_cfg));
+
+mbx_err:
+ dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa);
+mem_err:
+ return status;
+}
+
+#define OCRDMA_MAX_SERVICE_LEVEL_INDEX 0x08
+#define OCRDMA_DEFAULT_SERVICE_LEVEL 0x05
+
+static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype,
+ struct ocrdma_dcbx_cfg *dcbxcfg,
+ u8 *srvc_lvl)
+{
+ int status = -EINVAL, indx, slindx;
+ int ventry_cnt;
+ struct ocrdma_app_parameter *app_param;
+ u8 valid, proto_sel;
+ u8 app_prio, pfc_prio;
+ u16 proto;
+
+ if (!(dcbxcfg->tcv_aev_opv_st & OCRDMA_DCBX_STATE_MASK)) {
+ pr_info("%s ocrdma%d DCBX is disabled\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id);
+ goto out;
+ }
+
+ if (!ocrdma_is_enabled_and_synced(dcbxcfg->pfc_state)) {
+ pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id,
+ (ptype > 0 ? "operational" : "admin"),
+ (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_ENABLED) ?
+ "enabled" : "disabled",
+ (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_SYNC) ?
+ "" : ", not sync'ed");
+ goto out;
+ } else {
+ pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id);
+ }
+
+ ventry_cnt = (dcbxcfg->tcv_aev_opv_st >>
+ OCRDMA_DCBX_APP_ENTRY_SHIFT)
+ & OCRDMA_DCBX_STATE_MASK;
+
+ for (indx = 0; indx < ventry_cnt; indx++) {
+ app_param = &dcbxcfg->app_param[indx];
+ valid = (app_param->valid_proto_app >>
+ OCRDMA_APP_PARAM_VALID_SHIFT)
+ & OCRDMA_APP_PARAM_VALID_MASK;
+ proto_sel = (app_param->valid_proto_app
+ >> OCRDMA_APP_PARAM_PROTO_SEL_SHIFT)
+ & OCRDMA_APP_PARAM_PROTO_SEL_MASK;
+ proto = app_param->valid_proto_app &
+ OCRDMA_APP_PARAM_APP_PROTO_MASK;
+
+ if (
+ valid && proto == OCRDMA_APP_PROTO_ROCE &&
+ proto_sel == OCRDMA_PROTO_SELECT_L2) {
+ for (slindx = 0; slindx <
+ OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) {
+ app_prio = ocrdma_get_app_prio(
+ (u8 *)app_param->app_prio,
+ slindx);
+ pfc_prio = ocrdma_get_pfc_prio(
+ (u8 *)dcbxcfg->pfc_prio,
+ slindx);
+
+ if (app_prio && pfc_prio) {
+ *srvc_lvl = slindx;
+ status = 0;
+ goto out;
+ }
+ }
+ if (slindx == OCRDMA_MAX_SERVICE_LEVEL_INDEX) {
+ pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n",
+ dev_name(&dev->nic_info.pdev->dev),
+ dev->id, proto);
+ }
+ }
+ }
+
+out:
+ return status;
+}
+
+void ocrdma_init_service_level(struct ocrdma_dev *dev)
+{
+ int status = 0, indx;
+ struct ocrdma_dcbx_cfg dcbxcfg;
+ u8 srvc_lvl = OCRDMA_DEFAULT_SERVICE_LEVEL;
+ int ptype = OCRDMA_PARAMETER_TYPE_OPER;
+
+ for (indx = 0; indx < 2; indx++) {
+ status = ocrdma_mbx_get_dcbx_config(dev, ptype, &dcbxcfg);
+ if (status) {
+ pr_err("%s(): status=%d\n", __func__, status);
+ ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
+ continue;
+ }
+
+ status = ocrdma_parse_dcbxcfg_rsp(dev, ptype,
+ &dcbxcfg, &srvc_lvl);
+ if (status) {
+ ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
+ continue;
+ }
+
+ break;
+ }
+
+ if (status)
+ pr_info("%s ocrdma%d service level default\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id);
+ else
+ pr_info("%s ocrdma%d service level %d\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id,
+ srvc_lvl);
+
+ dev->pfc_state = ocrdma_is_enabled_and_synced(dcbxcfg.pfc_state);
+ dev->sl = srvc_lvl;
+}
+
int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
{
int i;
@@ -2709,13 +2895,15 @@ int ocrdma_init_hw(struct ocrdma_dev *dev)
goto conf_err;
status = ocrdma_mbx_get_phy_info(dev);
if (status)
- goto conf_err;
+ goto info_attrb_err;
status = ocrdma_mbx_get_ctrl_attribs(dev);
if (status)
- goto conf_err;
+ goto info_attrb_err;
return 0;
+info_attrb_err:
+ ocrdma_mbx_delete_ah_tbl(dev);
conf_err:
ocrdma_destroy_mq(dev);
mq_err:
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index e513f7293142..6eed8f191322 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -135,4 +135,6 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
char *port_speed_string(struct ocrdma_dev *dev);
+void ocrdma_init_service_level(struct ocrdma_dev *);
+
#endif /* __OCRDMA_HW_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 7c504e079744..b0b2257b8e04 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -324,6 +324,11 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
if (!dev->qp_tbl)
goto alloc_err;
}
+
+ dev->stag_arr = kzalloc(sizeof(u64) * OCRDMA_MAX_STAG, GFP_KERNEL);
+ if (dev->stag_arr == NULL)
+ goto alloc_err;
+
spin_lock_init(&dev->av_tbl.lock);
spin_lock_init(&dev->flush_q_lock);
return 0;
@@ -334,6 +339,7 @@ alloc_err:
static void ocrdma_free_resources(struct ocrdma_dev *dev)
{
+ kfree(dev->stag_arr);
kfree(dev->qp_tbl);
kfree(dev->cq_tbl);
kfree(dev->sgid_tbl);
@@ -353,15 +359,25 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
{
struct ocrdma_dev *dev = dev_get_drvdata(device);
- return scnprintf(buf, PAGE_SIZE, "%s", &dev->attr.fw_ver[0]);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->attr.fw_ver[0]);
+}
+
+static ssize_t show_hca_type(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ocrdma_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]);
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
static struct device_attribute *ocrdma_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver
+ &dev_attr_fw_ver,
+ &dev_attr_hca_type
};
static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
@@ -372,6 +388,68 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]);
}
+static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
+{
+ /* GID Index 0 - Invariant manufacturer-assigned EUI-64 */
+ union ib_gid *sgid = &dev->sgid_tbl[0];
+
+ sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ ocrdma_get_guid(dev, &sgid->raw[8]);
+}
+
+static void ocrdma_init_ipv4_gids(struct ocrdma_dev *dev,
+ struct net_device *net)
+{
+ struct in_device *in_dev;
+ union ib_gid gid;
+ in_dev = in_dev_get(net);
+ if (in_dev) {
+ for_ifa(in_dev) {
+ ipv6_addr_set_v4mapped(ifa->ifa_address,
+ (struct in6_addr *)&gid);
+ ocrdma_add_sgid(dev, &gid);
+ }
+ endfor_ifa(in_dev);
+ in_dev_put(in_dev);
+ }
+}
+
+static void ocrdma_init_ipv6_gids(struct ocrdma_dev *dev,
+ struct net_device *net)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_dev *in6_dev;
+ union ib_gid *pgid;
+ struct inet6_ifaddr *ifp;
+ in6_dev = in6_dev_get(net);
+ if (in6_dev) {
+ read_lock_bh(&in6_dev->lock);
+ list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
+ pgid = (union ib_gid *)&ifp->addr;
+ ocrdma_add_sgid(dev, pgid);
+ }
+ read_unlock_bh(&in6_dev->lock);
+ in6_dev_put(in6_dev);
+ }
+#endif
+}
+
+static void ocrdma_init_gid_table(struct ocrdma_dev *dev)
+{
+ struct net_device *net_dev;
+
+ for_each_netdev(&init_net, net_dev) {
+ struct net_device *real_dev = rdma_vlan_dev_real_dev(net_dev) ?
+ rdma_vlan_dev_real_dev(net_dev) : net_dev;
+
+ if (real_dev == dev->nic_info.netdev) {
+ ocrdma_add_default_sgid(dev);
+ ocrdma_init_ipv4_gids(dev, net_dev);
+ ocrdma_init_ipv6_gids(dev, net_dev);
+ }
+ }
+}
+
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{
int status = 0, i;
@@ -399,6 +477,8 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (status)
goto alloc_err;
+ ocrdma_init_service_level(dev);
+ ocrdma_init_gid_table(dev);
status = ocrdma_register_device(dev);
if (status)
goto alloc_err;
@@ -508,6 +588,12 @@ static int ocrdma_close(struct ocrdma_dev *dev)
return 0;
}
+static void ocrdma_shutdown(struct ocrdma_dev *dev)
+{
+ ocrdma_close(dev);
+ ocrdma_remove(dev);
+}
+
/* event handling via NIC driver ensures that all the NIC specific
* initialization done before RoCE driver notifies
* event to stack.
@@ -521,6 +607,9 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
case BE_DEV_DOWN:
ocrdma_close(dev);
break;
+ case BE_DEV_SHUTDOWN:
+ ocrdma_shutdown(dev);
+ break;
}
}
@@ -567,8 +656,10 @@ static int __init ocrdma_init_module(void)
return 0;
err_be_reg:
+#if IS_ENABLED(CONFIG_IPV6)
ocrdma_unregister_inet6addr_notifier();
err_notifier6:
+#endif
ocrdma_unregister_inetaddr_notifier();
return status;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 96c9ee602ba4..4e036480c1a8 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -28,8 +28,6 @@
#ifndef __OCRDMA_SLI_H__
#define __OCRDMA_SLI_H__
-#define Bit(_b) (1 << (_b))
-
enum {
OCRDMA_ASIC_GEN_SKH_R = 0x04,
OCRDMA_ASIC_GEN_LANCER = 0x0B
@@ -44,35 +42,39 @@ enum {
#define OCRDMA_SUBSYS_ROCE 10
enum {
OCRDMA_CMD_QUERY_CONFIG = 1,
- OCRDMA_CMD_ALLOC_PD,
- OCRDMA_CMD_DEALLOC_PD,
-
- OCRDMA_CMD_CREATE_AH_TBL,
- OCRDMA_CMD_DELETE_AH_TBL,
-
- OCRDMA_CMD_CREATE_QP,
- OCRDMA_CMD_QUERY_QP,
- OCRDMA_CMD_MODIFY_QP,
- OCRDMA_CMD_DELETE_QP,
-
- OCRDMA_CMD_RSVD1,
- OCRDMA_CMD_ALLOC_LKEY,
- OCRDMA_CMD_DEALLOC_LKEY,
- OCRDMA_CMD_REGISTER_NSMR,
- OCRDMA_CMD_REREGISTER_NSMR,
- OCRDMA_CMD_REGISTER_NSMR_CONT,
- OCRDMA_CMD_QUERY_NSMR,
- OCRDMA_CMD_ALLOC_MW,
- OCRDMA_CMD_QUERY_MW,
-
- OCRDMA_CMD_CREATE_SRQ,
- OCRDMA_CMD_QUERY_SRQ,
- OCRDMA_CMD_MODIFY_SRQ,
- OCRDMA_CMD_DELETE_SRQ,
-
- OCRDMA_CMD_ATTACH_MCAST,
- OCRDMA_CMD_DETACH_MCAST,
- OCRDMA_CMD_GET_RDMA_STATS,
+ OCRDMA_CMD_ALLOC_PD = 2,
+ OCRDMA_CMD_DEALLOC_PD = 3,
+
+ OCRDMA_CMD_CREATE_AH_TBL = 4,
+ OCRDMA_CMD_DELETE_AH_TBL = 5,
+
+ OCRDMA_CMD_CREATE_QP = 6,
+ OCRDMA_CMD_QUERY_QP = 7,
+ OCRDMA_CMD_MODIFY_QP = 8 ,
+ OCRDMA_CMD_DELETE_QP = 9,
+
+ OCRDMA_CMD_RSVD1 = 10,
+ OCRDMA_CMD_ALLOC_LKEY = 11,
+ OCRDMA_CMD_DEALLOC_LKEY = 12,
+ OCRDMA_CMD_REGISTER_NSMR = 13,
+ OCRDMA_CMD_REREGISTER_NSMR = 14,
+ OCRDMA_CMD_REGISTER_NSMR_CONT = 15,
+ OCRDMA_CMD_QUERY_NSMR = 16,
+ OCRDMA_CMD_ALLOC_MW = 17,
+ OCRDMA_CMD_QUERY_MW = 18,
+
+ OCRDMA_CMD_CREATE_SRQ = 19,
+ OCRDMA_CMD_QUERY_SRQ = 20,
+ OCRDMA_CMD_MODIFY_SRQ = 21,
+ OCRDMA_CMD_DELETE_SRQ = 22,
+
+ OCRDMA_CMD_ATTACH_MCAST = 23,
+ OCRDMA_CMD_DETACH_MCAST = 24,
+
+ OCRDMA_CMD_CREATE_RBQ = 25,
+ OCRDMA_CMD_DESTROY_RBQ = 26,
+
+ OCRDMA_CMD_GET_RDMA_STATS = 27,
OCRDMA_CMD_MAX
};
@@ -99,11 +101,11 @@ enum {
QTYPE_MCCQ = 3
};
-#define OCRDMA_MAX_SGID (8)
+#define OCRDMA_MAX_SGID 8
#define OCRDMA_MAX_QP 2048
#define OCRDMA_MAX_CQ 2048
-#define OCRDMA_MAX_STAG 8192
+#define OCRDMA_MAX_STAG 16384
enum {
OCRDMA_DB_RQ_OFFSET = 0xE0,
@@ -124,33 +126,33 @@ enum {
#define OCRDMA_DB_CQ_RING_ID_EXT_MASK 0x0C00 /* bits 10-11 of qid at 12-11 */
/* qid #2 msbits at 12-11 */
#define OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT 0x1
-#define OCRDMA_DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
+#define OCRDMA_DB_CQ_NUM_POPPED_SHIFT 16 /* bits 16 - 28 */
/* Rearm bit */
-#define OCRDMA_DB_CQ_REARM_SHIFT (29) /* bit 29 */
+#define OCRDMA_DB_CQ_REARM_SHIFT 29 /* bit 29 */
/* solicited bit */
-#define OCRDMA_DB_CQ_SOLICIT_SHIFT (31) /* bit 31 */
+#define OCRDMA_DB_CQ_SOLICIT_SHIFT 31 /* bit 31 */
#define OCRDMA_EQ_ID_MASK 0x1FF /* bits 0 - 8 */
#define OCRDMA_EQ_ID_EXT_MASK 0x3e00 /* bits 9-13 */
-#define OCRDMA_EQ_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 at 11-15 */
+#define OCRDMA_EQ_ID_EXT_MASK_SHIFT 2 /* qid bits 9-13 at 11-15 */
/* Clear the interrupt for this eq */
-#define OCRDMA_EQ_CLR_SHIFT (9) /* bit 9 */
+#define OCRDMA_EQ_CLR_SHIFT 9 /* bit 9 */
/* Must be 1 */
-#define OCRDMA_EQ_TYPE_SHIFT (10) /* bit 10 */
+#define OCRDMA_EQ_TYPE_SHIFT 10 /* bit 10 */
/* Number of event entries processed */
-#define OCRDMA_NUM_EQE_SHIFT (16) /* bits 16 - 28 */
+#define OCRDMA_NUM_EQE_SHIFT 16 /* bits 16 - 28 */
/* Rearm bit */
-#define OCRDMA_REARM_SHIFT (29) /* bit 29 */
+#define OCRDMA_REARM_SHIFT 29 /* bit 29 */
#define OCRDMA_MQ_ID_MASK 0x7FF /* bits 0 - 10 */
/* Number of entries posted */
-#define OCRDMA_MQ_NUM_MQE_SHIFT (16) /* bits 16 - 29 */
+#define OCRDMA_MQ_NUM_MQE_SHIFT 16 /* bits 16 - 29 */
-#define OCRDMA_MIN_HPAGE_SIZE (4096)
+#define OCRDMA_MIN_HPAGE_SIZE 4096
-#define OCRDMA_MIN_Q_PAGE_SIZE (4096)
-#define OCRDMA_MAX_Q_PAGES (8)
+#define OCRDMA_MIN_Q_PAGE_SIZE 4096
+#define OCRDMA_MAX_Q_PAGES 8
#define OCRDMA_SLI_ASIC_ID_OFFSET 0x9C
#define OCRDMA_SLI_ASIC_REV_MASK 0x000000FF
@@ -166,14 +168,14 @@ enum {
# 6: 256K Bytes
# 7: 512K Bytes
*/
-#define OCRDMA_MAX_Q_PAGE_SIZE_CNT (8)
+#define OCRDMA_MAX_Q_PAGE_SIZE_CNT 8
#define OCRDMA_Q_PAGE_BASE_SIZE (OCRDMA_MIN_Q_PAGE_SIZE * OCRDMA_MAX_Q_PAGES)
-#define MAX_OCRDMA_QP_PAGES (8)
+#define MAX_OCRDMA_QP_PAGES 8
#define OCRDMA_MAX_WQE_MEM_SIZE (MAX_OCRDMA_QP_PAGES * OCRDMA_MIN_HQ_PAGE_SIZE)
-#define OCRDMA_CREATE_CQ_MAX_PAGES (4)
-#define OCRDMA_DPP_CQE_SIZE (4)
+#define OCRDMA_CREATE_CQ_MAX_PAGES 4
+#define OCRDMA_DPP_CQE_SIZE 4
#define OCRDMA_GEN2_MAX_CQE 1024
#define OCRDMA_GEN2_CQ_PAGE_SIZE 4096
@@ -234,7 +236,7 @@ struct ocrdma_mqe_sge {
enum {
OCRDMA_MQE_HDR_EMB_SHIFT = 0,
- OCRDMA_MQE_HDR_EMB_MASK = Bit(0),
+ OCRDMA_MQE_HDR_EMB_MASK = BIT(0),
OCRDMA_MQE_HDR_SGE_CNT_SHIFT = 3,
OCRDMA_MQE_HDR_SGE_CNT_MASK = 0x1F << OCRDMA_MQE_HDR_SGE_CNT_SHIFT,
OCRDMA_MQE_HDR_SPECIAL_SHIFT = 24,
@@ -288,7 +290,7 @@ struct ocrdma_pa {
u32 hi;
};
-#define MAX_OCRDMA_EQ_PAGES (8)
+#define MAX_OCRDMA_EQ_PAGES 8
struct ocrdma_create_eq_req {
struct ocrdma_mbx_hdr req;
u32 num_pages;
@@ -300,7 +302,7 @@ struct ocrdma_create_eq_req {
};
enum {
- OCRDMA_CREATE_EQ_VALID = Bit(29),
+ OCRDMA_CREATE_EQ_VALID = BIT(29),
OCRDMA_CREATE_EQ_CNT_SHIFT = 26,
OCRDMA_CREATE_CQ_DELAY_SHIFT = 13,
};
@@ -310,7 +312,7 @@ struct ocrdma_create_eq_rsp {
u32 vector_eqid;
};
-#define OCRDMA_EQ_MINOR_OTHER (0x1)
+#define OCRDMA_EQ_MINOR_OTHER 0x1
enum {
OCRDMA_MCQE_STATUS_SHIFT = 0,
@@ -318,13 +320,13 @@ enum {
OCRDMA_MCQE_ESTATUS_SHIFT = 16,
OCRDMA_MCQE_ESTATUS_MASK = 0xFFFF << OCRDMA_MCQE_ESTATUS_SHIFT,
OCRDMA_MCQE_CONS_SHIFT = 27,
- OCRDMA_MCQE_CONS_MASK = Bit(27),
+ OCRDMA_MCQE_CONS_MASK = BIT(27),
OCRDMA_MCQE_CMPL_SHIFT = 28,
- OCRDMA_MCQE_CMPL_MASK = Bit(28),
+ OCRDMA_MCQE_CMPL_MASK = BIT(28),
OCRDMA_MCQE_AE_SHIFT = 30,
- OCRDMA_MCQE_AE_MASK = Bit(30),
+ OCRDMA_MCQE_AE_MASK = BIT(30),
OCRDMA_MCQE_VALID_SHIFT = 31,
- OCRDMA_MCQE_VALID_MASK = Bit(31)
+ OCRDMA_MCQE_VALID_MASK = BIT(31)
};
struct ocrdma_mcqe {
@@ -335,13 +337,13 @@ struct ocrdma_mcqe {
};
enum {
- OCRDMA_AE_MCQE_QPVALID = Bit(31),
+ OCRDMA_AE_MCQE_QPVALID = BIT(31),
OCRDMA_AE_MCQE_QPID_MASK = 0xFFFF,
- OCRDMA_AE_MCQE_CQVALID = Bit(31),
+ OCRDMA_AE_MCQE_CQVALID = BIT(31),
OCRDMA_AE_MCQE_CQID_MASK = 0xFFFF,
- OCRDMA_AE_MCQE_VALID = Bit(31),
- OCRDMA_AE_MCQE_AE = Bit(30),
+ OCRDMA_AE_MCQE_VALID = BIT(31),
+ OCRDMA_AE_MCQE_AE = BIT(30),
OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT = 16,
OCRDMA_AE_MCQE_EVENT_TYPE_MASK =
0xFF << OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT,
@@ -382,9 +384,9 @@ enum {
OCRDMA_AE_MPA_MCQE_EVENT_TYPE_MASK = 0xFF <<
OCRDMA_AE_MPA_MCQE_EVENT_TYPE_SHIFT,
OCRDMA_AE_MPA_MCQE_EVENT_AE_SHIFT = 30,
- OCRDMA_AE_MPA_MCQE_EVENT_AE_MASK = Bit(30),
+ OCRDMA_AE_MPA_MCQE_EVENT_AE_MASK = BIT(30),
OCRDMA_AE_MPA_MCQE_EVENT_VALID_SHIFT = 31,
- OCRDMA_AE_MPA_MCQE_EVENT_VALID_MASK = Bit(31)
+ OCRDMA_AE_MPA_MCQE_EVENT_VALID_MASK = BIT(31)
};
struct ocrdma_ae_mpa_mcqe {
@@ -408,9 +410,9 @@ enum {
OCRDMA_AE_QP_MCQE_EVENT_TYPE_MASK = 0xFF <<
OCRDMA_AE_QP_MCQE_EVENT_TYPE_SHIFT,
OCRDMA_AE_QP_MCQE_EVENT_AE_SHIFT = 30,
- OCRDMA_AE_QP_MCQE_EVENT_AE_MASK = Bit(30),
+ OCRDMA_AE_QP_MCQE_EVENT_AE_MASK = BIT(30),
OCRDMA_AE_QP_MCQE_EVENT_VALID_SHIFT = 31,
- OCRDMA_AE_QP_MCQE_EVENT_VALID_MASK = Bit(31)
+ OCRDMA_AE_QP_MCQE_EVENT_VALID_MASK = BIT(31)
};
struct ocrdma_ae_qp_mcqe {
@@ -422,7 +424,12 @@ struct ocrdma_ae_qp_mcqe {
#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14
#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5
-#define OCRDMA_ASYNC_EVENT_PVID_STATE 0x3
+
+enum ocrdma_async_grp5_events {
+ OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01,
+ OCRDMA_ASYNC_EVENT_COS_VALUE = 0x02,
+ OCRDMA_ASYNC_EVENT_PVID_STATE = 0x03
+};
enum OCRDMA_ASYNC_EVENT_TYPE {
OCRDMA_CQ_ERROR = 0x00,
@@ -440,9 +447,9 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
/* mailbox command request and responses */
enum {
OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2,
- OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK = Bit(2),
+ OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK = BIT(2),
OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT = 3,
- OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK = Bit(3),
+ OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK = BIT(3),
OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT = 8,
OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK = 0xFFFFFF <<
OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT,
@@ -525,8 +532,8 @@ struct ocrdma_mbx_query_config {
u32 max_ird_ord_per_qp;
u32 max_shared_ird_ord;
u32 max_mr;
- u32 max_mr_size_lo;
u32 max_mr_size_hi;
+ u32 max_mr_size_lo;
u32 max_num_mr_pbl;
u32 max_mw;
u32 max_fmr;
@@ -580,17 +587,26 @@ enum {
OCRDMA_FN_MODE_RDMA = 0x4
};
+enum {
+ OCRDMA_IF_TYPE_MASK = 0xFFFF0000,
+ OCRDMA_IF_TYPE_SHIFT = 0x10,
+ OCRDMA_PHY_TYPE_MASK = 0x0000FFFF,
+ OCRDMA_FUTURE_DETAILS_MASK = 0xFFFF0000,
+ OCRDMA_FUTURE_DETAILS_SHIFT = 0x10,
+ OCRDMA_EX_PHY_DETAILS_MASK = 0x0000FFFF,
+ OCRDMA_FSPEED_SUPP_MASK = 0xFFFF0000,
+ OCRDMA_FSPEED_SUPP_SHIFT = 0x10,
+ OCRDMA_ASPEED_SUPP_MASK = 0x0000FFFF
+};
+
struct ocrdma_get_phy_info_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
- u16 phy_type;
- u16 interface_type;
+ u32 ityp_ptyp;
u32 misc_params;
- u16 ext_phy_details;
- u16 rsvd;
- u16 auto_speeds_supported;
- u16 fixed_speeds_supported;
+ u32 ftrdtl_exphydtl;
+ u32 fspeed_aspeed;
u32 future_use[2];
};
@@ -603,19 +619,34 @@ enum {
OCRDMA_PHY_SPEED_40GBPS = 0x20
};
+enum {
+ OCRDMA_PORT_NUM_MASK = 0x3F,
+ OCRDMA_PT_MASK = 0xC0,
+ OCRDMA_PT_SHIFT = 0x6,
+ OCRDMA_LINK_DUP_MASK = 0x0000FF00,
+ OCRDMA_LINK_DUP_SHIFT = 0x8,
+ OCRDMA_PHY_PS_MASK = 0x00FF0000,
+ OCRDMA_PHY_PS_SHIFT = 0x10,
+ OCRDMA_PHY_PFLT_MASK = 0xFF000000,
+ OCRDMA_PHY_PFLT_SHIFT = 0x18,
+ OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000,
+ OCRDMA_QOS_LNKSP_SHIFT = 0x10,
+ OCRDMA_LLST_MASK = 0xFF,
+ OCRDMA_PLFC_MASK = 0x00000400,
+ OCRDMA_PLFC_SHIFT = 0x8,
+ OCRDMA_PLRFC_MASK = 0x00000200,
+ OCRDMA_PLRFC_SHIFT = 0x8,
+ OCRDMA_PLTFC_MASK = 0x00000100,
+ OCRDMA_PLTFC_SHIFT = 0x8
+};
struct ocrdma_get_link_speed_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
- u8 pt_port_num;
- u8 link_duplex;
- u8 phys_port_speed;
- u8 phys_port_fault;
- u16 rsvd1;
- u16 qos_lnk_speed;
- u8 logical_lnk_status;
- u8 rsvd2[3];
+ u32 pflt_pps_ld_pnum;
+ u32 qos_lsp;
+ u32 res_lls;
};
enum {
@@ -639,9 +670,9 @@ enum {
OCRDMA_CREATE_CQ_PAGE_SIZE_MASK = 0xFF,
OCRDMA_CREATE_CQ_COALESCWM_SHIFT = 12,
- OCRDMA_CREATE_CQ_COALESCWM_MASK = Bit(13) | Bit(12),
- OCRDMA_CREATE_CQ_FLAGS_NODELAY = Bit(14),
- OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID = Bit(15),
+ OCRDMA_CREATE_CQ_COALESCWM_MASK = BIT(13) | BIT(12),
+ OCRDMA_CREATE_CQ_FLAGS_NODELAY = BIT(14),
+ OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID = BIT(15),
OCRDMA_CREATE_CQ_EQ_ID_MASK = 0xFFFF,
OCRDMA_CREATE_CQ_CQE_COUNT_MASK = 0xFFFF
@@ -654,8 +685,8 @@ enum {
OCRDMA_CREATE_CQ_EQID_SHIFT = 22,
OCRDMA_CREATE_CQ_CNT_SHIFT = 27,
- OCRDMA_CREATE_CQ_FLAGS_VALID = Bit(29),
- OCRDMA_CREATE_CQ_FLAGS_EVENTABLE = Bit(31),
+ OCRDMA_CREATE_CQ_FLAGS_VALID = BIT(29),
+ OCRDMA_CREATE_CQ_FLAGS_EVENTABLE = BIT(31),
OCRDMA_CREATE_CQ_DEF_FLAGS = OCRDMA_CREATE_CQ_FLAGS_VALID |
OCRDMA_CREATE_CQ_FLAGS_EVENTABLE |
OCRDMA_CREATE_CQ_FLAGS_NODELAY
@@ -666,8 +697,7 @@ struct ocrdma_create_cq_cmd {
u32 pgsz_pgcnt;
u32 ev_cnt_flags;
u32 eqn;
- u16 cqe_count;
- u16 pd_id;
+ u32 pdid_cqecnt;
u32 rsvd6;
struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES];
};
@@ -678,6 +708,10 @@ struct ocrdma_create_cq {
};
enum {
+ OCRDMA_CREATE_CQ_CMD_PDID_SHIFT = 0x10
+};
+
+enum {
OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF
};
@@ -695,8 +729,8 @@ enum {
OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT = 22,
OCRDMA_CREATE_MQ_CQ_ID_SHIFT = 16,
OCRDMA_CREATE_MQ_RING_SIZE_SHIFT = 16,
- OCRDMA_CREATE_MQ_VALID = Bit(31),
- OCRDMA_CREATE_MQ_ASYNC_CQ_VALID = Bit(0)
+ OCRDMA_CREATE_MQ_VALID = BIT(31),
+ OCRDMA_CREATE_MQ_ASYNC_CQ_VALID = BIT(0)
};
struct ocrdma_create_mq_req {
@@ -747,7 +781,7 @@ enum {
OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT = 16,
OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT = 19,
OCRDMA_CREATE_QP_REQ_QPT_SHIFT = 29,
- OCRDMA_CREATE_QP_REQ_QPT_MASK = Bit(31) | Bit(30) | Bit(29),
+ OCRDMA_CREATE_QP_REQ_QPT_MASK = BIT(31) | BIT(30) | BIT(29),
OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT = 0,
OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK = 0xFFFF,
@@ -762,23 +796,23 @@ enum {
OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT,
OCRDMA_CREATE_QP_REQ_FMR_EN_SHIFT = 0,
- OCRDMA_CREATE_QP_REQ_FMR_EN_MASK = Bit(0),
+ OCRDMA_CREATE_QP_REQ_FMR_EN_MASK = BIT(0),
OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_SHIFT = 1,
- OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK = Bit(1),
+ OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK = BIT(1),
OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_SHIFT = 2,
- OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK = Bit(2),
+ OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK = BIT(2),
OCRDMA_CREATE_QP_REQ_INB_WREN_SHIFT = 3,
- OCRDMA_CREATE_QP_REQ_INB_WREN_MASK = Bit(3),
+ OCRDMA_CREATE_QP_REQ_INB_WREN_MASK = BIT(3),
OCRDMA_CREATE_QP_REQ_INB_RDEN_SHIFT = 4,
- OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK = Bit(4),
+ OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK = BIT(4),
OCRDMA_CREATE_QP_REQ_USE_SRQ_SHIFT = 5,
- OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK = Bit(5),
+ OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK = BIT(5),
OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_SHIFT = 6,
- OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_MASK = Bit(6),
+ OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_MASK = BIT(6),
OCRDMA_CREATE_QP_REQ_ENABLE_DPP_SHIFT = 7,
- OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK = Bit(7),
+ OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK = BIT(7),
OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_SHIFT = 8,
- OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_MASK = Bit(8),
+ OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_MASK = BIT(8),
OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT = 16,
OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK = 0xFFFF <<
OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT,
@@ -891,7 +925,7 @@ enum {
OCRDMA_CREATE_QP_RSP_SQ_ID_MASK = 0xFFFF <<
OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT,
- OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK = Bit(0),
+ OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK = BIT(0),
OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT = 1,
OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK = 0x7FFF <<
OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT,
@@ -928,38 +962,38 @@ enum {
OCRDMA_MODIFY_QP_ID_SHIFT = 0,
OCRDMA_MODIFY_QP_ID_MASK = 0xFFFF,
- OCRDMA_QP_PARA_QPS_VALID = Bit(0),
- OCRDMA_QP_PARA_SQD_ASYNC_VALID = Bit(1),
- OCRDMA_QP_PARA_PKEY_VALID = Bit(2),
- OCRDMA_QP_PARA_QKEY_VALID = Bit(3),
- OCRDMA_QP_PARA_PMTU_VALID = Bit(4),
- OCRDMA_QP_PARA_ACK_TO_VALID = Bit(5),
- OCRDMA_QP_PARA_RETRY_CNT_VALID = Bit(6),
- OCRDMA_QP_PARA_RRC_VALID = Bit(7),
- OCRDMA_QP_PARA_RQPSN_VALID = Bit(8),
- OCRDMA_QP_PARA_MAX_IRD_VALID = Bit(9),
- OCRDMA_QP_PARA_MAX_ORD_VALID = Bit(10),
- OCRDMA_QP_PARA_RNT_VALID = Bit(11),
- OCRDMA_QP_PARA_SQPSN_VALID = Bit(12),
- OCRDMA_QP_PARA_DST_QPN_VALID = Bit(13),
- OCRDMA_QP_PARA_MAX_WQE_VALID = Bit(14),
- OCRDMA_QP_PARA_MAX_RQE_VALID = Bit(15),
- OCRDMA_QP_PARA_SGE_SEND_VALID = Bit(16),
- OCRDMA_QP_PARA_SGE_RECV_VALID = Bit(17),
- OCRDMA_QP_PARA_SGE_WR_VALID = Bit(18),
- OCRDMA_QP_PARA_INB_RDEN_VALID = Bit(19),
- OCRDMA_QP_PARA_INB_WREN_VALID = Bit(20),
- OCRDMA_QP_PARA_FLOW_LBL_VALID = Bit(21),
- OCRDMA_QP_PARA_BIND_EN_VALID = Bit(22),
- OCRDMA_QP_PARA_ZLKEY_EN_VALID = Bit(23),
- OCRDMA_QP_PARA_FMR_EN_VALID = Bit(24),
- OCRDMA_QP_PARA_INBAT_EN_VALID = Bit(25),
- OCRDMA_QP_PARA_VLAN_EN_VALID = Bit(26),
-
- OCRDMA_MODIFY_QP_FLAGS_RD = Bit(0),
- OCRDMA_MODIFY_QP_FLAGS_WR = Bit(1),
- OCRDMA_MODIFY_QP_FLAGS_SEND = Bit(2),
- OCRDMA_MODIFY_QP_FLAGS_ATOMIC = Bit(3)
+ OCRDMA_QP_PARA_QPS_VALID = BIT(0),
+ OCRDMA_QP_PARA_SQD_ASYNC_VALID = BIT(1),
+ OCRDMA_QP_PARA_PKEY_VALID = BIT(2),
+ OCRDMA_QP_PARA_QKEY_VALID = BIT(3),
+ OCRDMA_QP_PARA_PMTU_VALID = BIT(4),
+ OCRDMA_QP_PARA_ACK_TO_VALID = BIT(5),
+ OCRDMA_QP_PARA_RETRY_CNT_VALID = BIT(6),
+ OCRDMA_QP_PARA_RRC_VALID = BIT(7),
+ OCRDMA_QP_PARA_RQPSN_VALID = BIT(8),
+ OCRDMA_QP_PARA_MAX_IRD_VALID = BIT(9),
+ OCRDMA_QP_PARA_MAX_ORD_VALID = BIT(10),
+ OCRDMA_QP_PARA_RNT_VALID = BIT(11),
+ OCRDMA_QP_PARA_SQPSN_VALID = BIT(12),
+ OCRDMA_QP_PARA_DST_QPN_VALID = BIT(13),
+ OCRDMA_QP_PARA_MAX_WQE_VALID = BIT(14),
+ OCRDMA_QP_PARA_MAX_RQE_VALID = BIT(15),
+ OCRDMA_QP_PARA_SGE_SEND_VALID = BIT(16),
+ OCRDMA_QP_PARA_SGE_RECV_VALID = BIT(17),
+ OCRDMA_QP_PARA_SGE_WR_VALID = BIT(18),
+ OCRDMA_QP_PARA_INB_RDEN_VALID = BIT(19),
+ OCRDMA_QP_PARA_INB_WREN_VALID = BIT(20),
+ OCRDMA_QP_PARA_FLOW_LBL_VALID = BIT(21),
+ OCRDMA_QP_PARA_BIND_EN_VALID = BIT(22),
+ OCRDMA_QP_PARA_ZLKEY_EN_VALID = BIT(23),
+ OCRDMA_QP_PARA_FMR_EN_VALID = BIT(24),
+ OCRDMA_QP_PARA_INBAT_EN_VALID = BIT(25),
+ OCRDMA_QP_PARA_VLAN_EN_VALID = BIT(26),
+
+ OCRDMA_MODIFY_QP_FLAGS_RD = BIT(0),
+ OCRDMA_MODIFY_QP_FLAGS_WR = BIT(1),
+ OCRDMA_MODIFY_QP_FLAGS_SEND = BIT(2),
+ OCRDMA_MODIFY_QP_FLAGS_ATOMIC = BIT(3)
};
enum {
@@ -978,15 +1012,15 @@ enum {
OCRDMA_QP_PARAMS_MAX_SGE_SEND_MASK = 0xFFFF <<
OCRDMA_QP_PARAMS_MAX_SGE_SEND_SHIFT,
- OCRDMA_QP_PARAMS_FLAGS_FMR_EN = Bit(0),
- OCRDMA_QP_PARAMS_FLAGS_LKEY_0_EN = Bit(1),
- OCRDMA_QP_PARAMS_FLAGS_BIND_MW_EN = Bit(2),
- OCRDMA_QP_PARAMS_FLAGS_INBWR_EN = Bit(3),
- OCRDMA_QP_PARAMS_FLAGS_INBRD_EN = Bit(4),
+ OCRDMA_QP_PARAMS_FLAGS_FMR_EN = BIT(0),
+ OCRDMA_QP_PARAMS_FLAGS_LKEY_0_EN = BIT(1),
+ OCRDMA_QP_PARAMS_FLAGS_BIND_MW_EN = BIT(2),
+ OCRDMA_QP_PARAMS_FLAGS_INBWR_EN = BIT(3),
+ OCRDMA_QP_PARAMS_FLAGS_INBRD_EN = BIT(4),
OCRDMA_QP_PARAMS_STATE_SHIFT = 5,
- OCRDMA_QP_PARAMS_STATE_MASK = Bit(5) | Bit(6) | Bit(7),
- OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC = Bit(8),
- OCRDMA_QP_PARAMS_FLAGS_INB_ATEN = Bit(9),
+ OCRDMA_QP_PARAMS_STATE_MASK = BIT(5) | BIT(6) | BIT(7),
+ OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC = BIT(8),
+ OCRDMA_QP_PARAMS_FLAGS_INB_ATEN = BIT(9),
OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT = 16,
OCRDMA_QP_PARAMS_MAX_SGE_RECV_MASK = 0xFFFF <<
OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT,
@@ -1231,7 +1265,6 @@ struct ocrdma_destroy_srq {
enum {
OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16),
- OCRDMA_PD_MAX_DPP_ENABLED_QP = 8,
OCRDMA_DPP_PAGE_SIZE = 4096
};
@@ -1242,7 +1275,7 @@ struct ocrdma_alloc_pd {
};
enum {
- OCRDMA_ALLOC_PD_RSP_DPP = Bit(16),
+ OCRDMA_ALLOC_PD_RSP_DPP = BIT(16),
OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT = 20,
OCRDMA_ALLOC_PD_RSP_PDID_MASK = 0xFFFF,
};
@@ -1274,18 +1307,18 @@ enum {
OCRDMA_ALLOC_LKEY_PD_ID_MASK = 0xFFFF,
OCRDMA_ALLOC_LKEY_ADDR_CHECK_SHIFT = 0,
- OCRDMA_ALLOC_LKEY_ADDR_CHECK_MASK = Bit(0),
+ OCRDMA_ALLOC_LKEY_ADDR_CHECK_MASK = BIT(0),
OCRDMA_ALLOC_LKEY_FMR_SHIFT = 1,
- OCRDMA_ALLOC_LKEY_FMR_MASK = Bit(1),
+ OCRDMA_ALLOC_LKEY_FMR_MASK = BIT(1),
OCRDMA_ALLOC_LKEY_REMOTE_INV_SHIFT = 2,
- OCRDMA_ALLOC_LKEY_REMOTE_INV_MASK = Bit(2),
+ OCRDMA_ALLOC_LKEY_REMOTE_INV_MASK = BIT(2),
OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT = 3,
- OCRDMA_ALLOC_LKEY_REMOTE_WR_MASK = Bit(3),
+ OCRDMA_ALLOC_LKEY_REMOTE_WR_MASK = BIT(3),
OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT = 4,
- OCRDMA_ALLOC_LKEY_REMOTE_RD_MASK = Bit(4),
+ OCRDMA_ALLOC_LKEY_REMOTE_RD_MASK = BIT(4),
OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT = 5,
- OCRDMA_ALLOC_LKEY_LOCAL_WR_MASK = Bit(5),
- OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_MASK = Bit(6),
+ OCRDMA_ALLOC_LKEY_LOCAL_WR_MASK = BIT(5),
+ OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_MASK = BIT(6),
OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT = 6,
OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT = 16,
OCRDMA_ALLOC_LKEY_PBL_SIZE_MASK = 0xFFFF <<
@@ -1344,21 +1377,21 @@ enum {
OCRDMA_REG_NSMR_HPAGE_SIZE_MASK = 0xFF <<
OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT,
OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT = 24,
- OCRDMA_REG_NSMR_BIND_MEMWIN_MASK = Bit(24),
+ OCRDMA_REG_NSMR_BIND_MEMWIN_MASK = BIT(24),
OCRDMA_REG_NSMR_ZB_SHIFT = 25,
- OCRDMA_REG_NSMR_ZB_SHIFT_MASK = Bit(25),
+ OCRDMA_REG_NSMR_ZB_SHIFT_MASK = BIT(25),
OCRDMA_REG_NSMR_REMOTE_INV_SHIFT = 26,
- OCRDMA_REG_NSMR_REMOTE_INV_MASK = Bit(26),
+ OCRDMA_REG_NSMR_REMOTE_INV_MASK = BIT(26),
OCRDMA_REG_NSMR_REMOTE_WR_SHIFT = 27,
- OCRDMA_REG_NSMR_REMOTE_WR_MASK = Bit(27),
+ OCRDMA_REG_NSMR_REMOTE_WR_MASK = BIT(27),
OCRDMA_REG_NSMR_REMOTE_RD_SHIFT = 28,
- OCRDMA_REG_NSMR_REMOTE_RD_MASK = Bit(28),
+ OCRDMA_REG_NSMR_REMOTE_RD_MASK = BIT(28),
OCRDMA_REG_NSMR_LOCAL_WR_SHIFT = 29,
- OCRDMA_REG_NSMR_LOCAL_WR_MASK = Bit(29),
+ OCRDMA_REG_NSMR_LOCAL_WR_MASK = BIT(29),
OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT = 30,
- OCRDMA_REG_NSMR_REMOTE_ATOMIC_MASK = Bit(30),
+ OCRDMA_REG_NSMR_REMOTE_ATOMIC_MASK = BIT(30),
OCRDMA_REG_NSMR_LAST_SHIFT = 31,
- OCRDMA_REG_NSMR_LAST_MASK = Bit(31)
+ OCRDMA_REG_NSMR_LAST_MASK = BIT(31)
};
struct ocrdma_reg_nsmr {
@@ -1385,7 +1418,7 @@ enum {
OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT,
OCRDMA_REG_NSMR_CONT_LAST_SHIFT = 31,
- OCRDMA_REG_NSMR_CONT_LAST_MASK = Bit(31)
+ OCRDMA_REG_NSMR_CONT_LAST_MASK = BIT(31)
};
struct ocrdma_reg_nsmr_cont {
@@ -1531,7 +1564,7 @@ struct ocrdma_delete_ah_tbl_rsp {
enum {
OCRDMA_EQE_VALID_SHIFT = 0,
- OCRDMA_EQE_VALID_MASK = Bit(0),
+ OCRDMA_EQE_VALID_MASK = BIT(0),
OCRDMA_EQE_FOR_CQE_MASK = 0xFFFE,
OCRDMA_EQE_RESOURCE_ID_SHIFT = 16,
OCRDMA_EQE_RESOURCE_ID_MASK = 0xFFFF <<
@@ -1589,11 +1622,11 @@ enum {
OCRDMA_CQE_UD_STATUS_MASK = 0x7 << OCRDMA_CQE_UD_STATUS_SHIFT,
OCRDMA_CQE_STATUS_SHIFT = 16,
OCRDMA_CQE_STATUS_MASK = 0xFF << OCRDMA_CQE_STATUS_SHIFT,
- OCRDMA_CQE_VALID = Bit(31),
- OCRDMA_CQE_INVALIDATE = Bit(30),
- OCRDMA_CQE_QTYPE = Bit(29),
- OCRDMA_CQE_IMM = Bit(28),
- OCRDMA_CQE_WRITE_IMM = Bit(27),
+ OCRDMA_CQE_VALID = BIT(31),
+ OCRDMA_CQE_INVALIDATE = BIT(30),
+ OCRDMA_CQE_QTYPE = BIT(29),
+ OCRDMA_CQE_IMM = BIT(28),
+ OCRDMA_CQE_WRITE_IMM = BIT(27),
OCRDMA_CQE_QTYPE_SQ = 0,
OCRDMA_CQE_QTYPE_RQ = 1,
OCRDMA_CQE_SRCQP_MASK = 0xFFFFFF
@@ -1737,8 +1770,8 @@ struct ocrdma_grh {
u16 rsvd;
} __packed;
-#define OCRDMA_AV_VALID Bit(7)
-#define OCRDMA_AV_VLAN_VALID Bit(1)
+#define OCRDMA_AV_VALID BIT(7)
+#define OCRDMA_AV_VLAN_VALID BIT(1)
struct ocrdma_av {
struct ocrdma_eth_vlan eth_hdr;
@@ -1896,12 +1929,62 @@ struct ocrdma_rdma_stats_resp {
struct ocrdma_rx_dbg_stats rx_dbg_stats;
} __packed;
+enum {
+ OCRDMA_HBA_ATTRB_EPROM_VER_LO_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_EPROM_VER_HI_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_EPROM_VER_HI_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_CDBLEN_MASK = 0xFFFF,
+ OCRDMA_HBA_ATTRB_ASIC_REV_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_ASIC_REV_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_GUID0_MASK = 0xFF000000,
+ OCRDMA_HBA_ATTRB_GUID0_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_GUID13_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_GUID14_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_GUID14_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_GUID15_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_GUID15_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_PCNT_MASK = 0xFF000000,
+ OCRDMA_HBA_ATTRB_PCNT_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_LDTOUT_MASK = 0xFFFF,
+ OCRDMA_HBA_ATTRB_ISCSI_VER_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_ISCSI_VER_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_MFUNC_DEV_MASK = 0xFF000000,
+ OCRDMA_HBA_ATTRB_MFUNC_DEV_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_CV_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_HBA_ST_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_HBA_ST_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_MAX_DOMS_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_MAX_DOMS_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_PTNUM_MASK = 0x3F000000,
+ OCRDMA_HBA_ATTRB_PTNUM_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_PT_MASK = 0xC0000000,
+ OCRDMA_HBA_ATTRB_PT_SHIFT = 0x1E,
+ OCRDMA_HBA_ATTRB_ISCSI_FET_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_ASIC_GEN_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_ASIC_GEN_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_PCI_VID_MASK = 0xFFFF,
+ OCRDMA_HBA_ATTRB_PCI_DID_MASK = 0xFFFF0000,
+ OCRDMA_HBA_ATTRB_PCI_DID_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_PCI_SVID_MASK = 0xFFFF,
+ OCRDMA_HBA_ATTRB_PCI_SSID_MASK = 0xFFFF0000,
+ OCRDMA_HBA_ATTRB_PCI_SSID_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_PCI_BUSNUM_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_PCI_DEVNUM_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_PCI_DEVNUM_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_PCI_FUNCNUM_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_PCI_FUNCNUM_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_IF_TYPE_MASK = 0xFF000000,
+ OCRDMA_HBA_ATTRB_IF_TYPE_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_NETFIL_MASK =0xFF
+};
struct mgmt_hba_attribs {
u8 flashrom_version_string[32];
u8 manufacturer_name[32];
u32 supported_modes;
- u32 rsvd0[3];
+ u32 rsvd_eprom_verhi_verlo;
+ u32 mbx_ds_ver;
+ u32 epfw_ds_ver;
u8 ncsi_ver_string[12];
u32 default_extended_timeout;
u8 controller_model_number[32];
@@ -1914,34 +1997,26 @@ struct mgmt_hba_attribs {
u8 driver_version_string[32];
u8 fw_on_flash_version_string[32];
u32 functionalities_supported;
- u16 max_cdblength;
- u8 asic_revision;
- u8 generational_guid[16];
- u8 hba_port_count;
- u16 default_link_down_timeout;
- u8 iscsi_ver_min_max;
- u8 multifunction_device;
- u8 cache_valid;
- u8 hba_status;
- u8 max_domains_supported;
- u8 phy_port;
+ u32 guid0_asicrev_cdblen;
+ u8 generational_guid[12];
+ u32 portcnt_guid15;
+ u32 mfuncdev_iscsi_ldtout;
+ u32 ptpnum_maxdoms_hbast_cv;
u32 firmware_post_status;
u32 hba_mtu[8];
- u32 rsvd1[4];
+ u32 res_asicgen_iscsi_feaures;
+ u32 rsvd1[3];
};
struct mgmt_controller_attrib {
struct mgmt_hba_attribs hba_attribs;
- u16 pci_vendor_id;
- u16 pci_device_id;
- u16 pci_sub_vendor_id;
- u16 pci_sub_system_id;
- u8 pci_bus_number;
- u8 pci_device_number;
- u8 pci_function_number;
- u8 interface_type;
- u64 unique_identifier;
- u32 rsvd0[5];
+ u32 pci_did_vid;
+ u32 pci_ssid_svid;
+ u32 ityp_fnum_devnum_bnum;
+ u32 uid_hi;
+ u32 uid_lo;
+ u32 res_nnetfil;
+ u32 rsvd0[4];
};
struct ocrdma_get_ctrl_attribs_rsp {
@@ -1949,5 +2024,79 @@ struct ocrdma_get_ctrl_attribs_rsp {
struct mgmt_controller_attrib ctrl_attribs;
};
+#define OCRDMA_SUBSYS_DCBX 0x10
+
+enum OCRDMA_DCBX_OPCODE {
+ OCRDMA_CMD_GET_DCBX_CONFIG = 0x01
+};
+
+enum OCRDMA_DCBX_PARAM_TYPE {
+ OCRDMA_PARAMETER_TYPE_ADMIN = 0x00,
+ OCRDMA_PARAMETER_TYPE_OPER = 0x01,
+ OCRDMA_PARAMETER_TYPE_PEER = 0x02
+};
+
+enum OCRDMA_DCBX_APP_PROTO {
+ OCRDMA_APP_PROTO_ROCE = 0x8915
+};
+
+enum OCRDMA_DCBX_PROTO {
+ OCRDMA_PROTO_SELECT_L2 = 0x00,
+ OCRDMA_PROTO_SELECT_L4 = 0x01
+};
+
+enum OCRDMA_DCBX_APP_PARAM {
+ OCRDMA_APP_PARAM_APP_PROTO_MASK = 0xFFFF,
+ OCRDMA_APP_PARAM_PROTO_SEL_MASK = 0xFF,
+ OCRDMA_APP_PARAM_PROTO_SEL_SHIFT = 0x10,
+ OCRDMA_APP_PARAM_VALID_MASK = 0xFF,
+ OCRDMA_APP_PARAM_VALID_SHIFT = 0x18
+};
+
+enum OCRDMA_DCBX_STATE_FLAGS {
+ OCRDMA_STATE_FLAG_ENABLED = 0x01,
+ OCRDMA_STATE_FLAG_ADDVERTISED = 0x02,
+ OCRDMA_STATE_FLAG_WILLING = 0x04,
+ OCRDMA_STATE_FLAG_SYNC = 0x08,
+ OCRDMA_STATE_FLAG_UNSUPPORTED = 0x40000000,
+ OCRDMA_STATE_FLAG_NEG_FAILD = 0x80000000
+};
+
+enum OCRDMA_TCV_AEV_OPV_ST {
+ OCRDMA_DCBX_TC_SUPPORT_MASK = 0xFF,
+ OCRDMA_DCBX_TC_SUPPORT_SHIFT = 0x18,
+ OCRDMA_DCBX_APP_ENTRY_SHIFT = 0x10,
+ OCRDMA_DCBX_OP_PARAM_SHIFT = 0x08,
+ OCRDMA_DCBX_STATE_MASK = 0xFF
+};
+
+struct ocrdma_app_parameter {
+ u32 valid_proto_app;
+ u32 oui;
+ u32 app_prio[2];
+};
+
+struct ocrdma_dcbx_cfg {
+ u32 tcv_aev_opv_st;
+ u32 tc_state;
+ u32 pfc_state;
+ u32 qcn_state;
+ u32 appl_state;
+ u32 ll_state;
+ u32 tc_bw[2];
+ u32 tc_prio[8];
+ u32 pfc_prio[2];
+ struct ocrdma_app_parameter app_param[15];
+};
+
+struct ocrdma_get_dcbx_cfg_req {
+ struct ocrdma_mbx_hdr hdr;
+ u32 param_type;
+} __packed;
+
+struct ocrdma_get_dcbx_cfg_rsp {
+ struct ocrdma_mbx_rsp hdr;
+ struct ocrdma_dcbx_cfg cfg;
+} __packed;
#endif /* __OCRDMA_SLI_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 8bae8718fa53..4c68305ee781 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -69,11 +69,11 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
- attr->max_mr_size = ~0ull;
+ attr->max_mr_size = dev->attr.max_mr_size;
attr->page_size_cap = 0xffff000;
attr->vendor_id = dev->nic_info.pdev->vendor;
attr->vendor_part_id = dev->nic_info.pdev->device;
- attr->hw_ver = 0;
+ attr->hw_ver = dev->asic_id;
attr->max_qp = dev->attr.max_qp;
attr->max_ah = OCRDMA_MAX_AH;
attr->max_qp_wr = dev->attr.max_wqe;
@@ -101,7 +101,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
attr->max_srq_sge = dev->attr.max_srq_sge;
attr->max_srq_wr = dev->attr.max_rqe;
attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
- attr->max_fast_reg_page_list_len = 0;
+ attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
attr->max_pkeys = 1;
return 0;
}
@@ -268,7 +268,8 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
pd->dpp_enabled =
ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
pd->num_dpp_qp =
- pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
+ pd->dpp_enabled ? (dev->nic_info.db_page_size /
+ dev->attr.wqe_size) : 0;
}
retry:
@@ -328,7 +329,10 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
struct ocrdma_pd *pd = uctx->cntxt_pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
- BUG_ON(uctx->pd_in_use);
+ if (uctx->pd_in_use) {
+ pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
+ __func__, dev->id, pd->id);
+ }
uctx->cntxt_pd = NULL;
status = _ocrdma_dealloc_pd(dev, pd);
return status;
@@ -384,7 +388,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
memset(&resp, 0, sizeof(resp));
resp.ah_tbl_len = ctx->ah_tbl.len;
- resp.ah_tbl_page = ctx->ah_tbl.pa;
+ resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
if (status)
@@ -843,6 +847,13 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
if (mr->umem)
ib_umem_release(mr->umem);
kfree(mr);
+
+ /* Don't stop cleanup, in case FW is unresponsive */
+ if (dev->mqe_ctx.fw_error_state) {
+ status = 0;
+ pr_err("%s(%d) fw not responding.\n",
+ __func__, dev->id);
+ }
return status;
}
@@ -859,7 +870,7 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
uresp.page_size = PAGE_ALIGN(cq->len);
uresp.num_pages = 1;
uresp.max_hw_cqe = cq->max_hw_cqe;
- uresp.page_addr[0] = cq->pa;
+ uresp.page_addr[0] = virt_to_phys(cq->va);
uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
uresp.db_page_size = dev->nic_info.db_page_size;
uresp.phase_change = cq->phase_change ? 1 : 0;
@@ -1112,13 +1123,13 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
uresp.sq_dbid = qp->sq.dbid;
uresp.num_sq_pages = 1;
uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
- uresp.sq_page_addr[0] = qp->sq.pa;
+ uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
uresp.num_wqe_allocated = qp->sq.max_cnt;
if (!srq) {
uresp.rq_dbid = qp->rq.dbid;
uresp.num_rq_pages = 1;
uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
- uresp.rq_page_addr[0] = qp->rq.pa;
+ uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
uresp.num_rqe_allocated = qp->rq.max_cnt;
}
uresp.db_page_addr = usr_db;
@@ -1669,7 +1680,7 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
memset(&uresp, 0, sizeof(uresp));
uresp.rq_dbid = srq->rq.dbid;
uresp.num_rq_pages = 1;
- uresp.rq_page_addr[0] = srq->rq.pa;
+ uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
uresp.rq_page_size = srq->rq.len;
uresp.db_page_addr = dev->nic_info.unmapped_db +
(srq->pd->id * dev->nic_info.db_page_size);
@@ -2054,6 +2065,13 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
while (wr) {
+ if (qp->qp_type == IB_QPT_UD &&
+ (wr->opcode != IB_WR_SEND &&
+ wr->opcode != IB_WR_SEND_WITH_IMM)) {
+ *bad_wr = wr;
+ status = -EINVAL;
+ break;
+ }
if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
wr->num_sge > qp->sq.max_sges) {
*bad_wr = wr;
@@ -2488,6 +2506,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
*stop = true;
expand = false;
}
+ } else if (is_hw_sq_empty(qp)) {
+ /* Do nothing */
+ expand = false;
+ *polled = false;
+ *stop = false;
} else {
*polled = true;
expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
@@ -2593,6 +2616,11 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
*stop = true;
expand = false;
}
+ } else if (is_hw_rq_empty(qp)) {
+ /* Do nothing */
+ expand = false;
+ *polled = false;
+ *stop = false;
} else {
*polled = true;
expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
@@ -2818,11 +2846,9 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
if (cq->first_arm) {
ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
cq->first_arm = false;
- goto skip_defer;
}
- cq->deferred_arm = true;
-skip_defer:
+ cq->deferred_arm = true;
cq->deferred_sol = sol_needed;
spin_unlock_irqrestore(&cq->cq_lock, flags);
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
index 799a0c3bffc4..6abd3ed3cd51 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
@@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
struct qib_qp_iter *iter;
loff_t n = *pos;
+ rcu_read_lock();
iter = qib_qp_iter_init(s->private);
if (!iter)
return NULL;
@@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
{
- /* nothing for now */
+ rcu_read_unlock();
}
static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index cab610ccd50e..81854586c081 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -89,7 +89,6 @@ static int create_file(const char *name, umode_t mode,
{
int error;
- *dentry = NULL;
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry))
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 8d3c78ddc906..729da39c49ed 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1222,7 +1222,7 @@ static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
#define PFX QIB_DRV_NAME ": "
-static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
+static const struct pci_device_id qib_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 22c720e5740d..636be117b578 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -2476,7 +2476,7 @@ int qib_create_agents(struct qib_ibdev *dev)
ibp = &dd->pport[p].ibport_data;
agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
NULL, 0, send_handler,
- NULL, NULL);
+ NULL, NULL, 0);
if (IS_ERR(agent)) {
ret = PTR_ERR(agent);
goto err;
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 7fcc150d603c..6ddc0264aad2 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -1325,7 +1325,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
struct qib_qp *pqp = iter->qp;
struct qib_qp *qp;
- rcu_read_lock();
for (; n < dev->qp_table_size; n++) {
if (pqp)
qp = rcu_dereference(pqp->next);
@@ -1333,18 +1332,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
qp = rcu_dereference(dev->qp_table[n]);
pqp = qp;
if (qp) {
- if (iter->qp)
- atomic_dec(&iter->qp->refcount);
- atomic_inc(&qp->refcount);
- rcu_read_unlock();
iter->qp = qp;
iter->n = n;
return 0;
}
}
- rcu_read_unlock();
- if (iter->qp)
- atomic_dec(&iter->qp->refcount);
return ret;
}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 2bc1d2b96298..74f90b2619f6 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -52,7 +52,7 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
* Call with current->mm->mmap_sem held.
*/
static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p, struct vm_area_struct **vma)
+ struct page **p)
{
unsigned long lock_limit;
size_t got;
@@ -69,7 +69,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
num_pages - got, 1, 1,
- p + got, vma);
+ p + got, NULL);
if (ret < 0)
goto bail_release;
}
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
down_write(&current->mm->mmap_sem);
- ret = __qib_get_user_pages(start_page, num_pages, p, NULL);
+ ret = __qib_get_user_pages(start_page, num_pages, p);
up_write(&current->mm->mmap_sem);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index fb6d026f92cd..0d0f98695d53 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -490,7 +490,7 @@ out:
/* Start of PCI section */
-static DEFINE_PCI_DEVICE_TABLE(usnic_ib_pci_ids) = {
+static const struct pci_device_id usnic_ib_pci_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)},
{0,}
};
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 801a1d6937e4..417de1f32960 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -507,7 +507,7 @@ int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
if (err)
goto out_free_dev;
- if (!iommu_domain_has_cap(pd->domain, IOMMU_CAP_CACHE_COHERENCY)) {
+ if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
usnic_err("IOMMU of %s does not support cache coherency\n",
dev_name(dev));
err = -EINVAL;