summaryrefslogtreecommitdiffstats
path: root/include/rdma/ib_verbs.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/rdma/ib_verbs.h')
-rw-r--r--include/rdma/ib_verbs.h149
1 files changed, 141 insertions, 8 deletions
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 31d30b1852e8..90b529f7a154 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -34,8 +34,6 @@
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
- *
- * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $
*/
#if !defined(IB_VERBS_H)
@@ -93,7 +91,7 @@ enum ib_device_cap_flags {
IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
IB_DEVICE_SRQ_RESIZE = (1<<13),
IB_DEVICE_N_NOTIFY_CQ = (1<<14),
- IB_DEVICE_ZERO_STAG = (1<<15),
+ IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */
IB_DEVICE_MEM_WINDOW = (1<<17),
/*
@@ -105,6 +103,8 @@ enum ib_device_cap_flags {
*/
IB_DEVICE_UD_IP_CSUM = (1<<18),
IB_DEVICE_UD_TSO = (1<<19),
+ IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
+ IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
};
enum ib_atomic_cap {
@@ -150,6 +150,7 @@ struct ib_device_attr {
int max_srq;
int max_srq_wr;
int max_srq_sge;
+ unsigned int max_fast_reg_page_list_len;
u16 max_pkeys;
u8 local_ca_ack_delay;
};
@@ -226,6 +227,57 @@ static inline int ib_width_enum_to_int(enum ib_port_width width)
}
}
+struct ib_protocol_stats {
+ /* TBD... */
+};
+
+struct iw_protocol_stats {
+ u64 ipInReceives;
+ u64 ipInHdrErrors;
+ u64 ipInTooBigErrors;
+ u64 ipInNoRoutes;
+ u64 ipInAddrErrors;
+ u64 ipInUnknownProtos;
+ u64 ipInTruncatedPkts;
+ u64 ipInDiscards;
+ u64 ipInDelivers;
+ u64 ipOutForwDatagrams;
+ u64 ipOutRequests;
+ u64 ipOutDiscards;
+ u64 ipOutNoRoutes;
+ u64 ipReasmTimeout;
+ u64 ipReasmReqds;
+ u64 ipReasmOKs;
+ u64 ipReasmFails;
+ u64 ipFragOKs;
+ u64 ipFragFails;
+ u64 ipFragCreates;
+ u64 ipInMcastPkts;
+ u64 ipOutMcastPkts;
+ u64 ipInBcastPkts;
+ u64 ipOutBcastPkts;
+
+ u64 tcpRtoAlgorithm;
+ u64 tcpRtoMin;
+ u64 tcpRtoMax;
+ u64 tcpMaxConn;
+ u64 tcpActiveOpens;
+ u64 tcpPassiveOpens;
+ u64 tcpAttemptFails;
+ u64 tcpEstabResets;
+ u64 tcpCurrEstab;
+ u64 tcpInSegs;
+ u64 tcpOutSegs;
+ u64 tcpRetransSegs;
+ u64 tcpInErrs;
+ u64 tcpOutRsts;
+};
+
+union rdma_protocol_stats {
+ struct ib_protocol_stats ib;
+ struct iw_protocol_stats iw;
+};
+
struct ib_port_attr {
enum ib_port_state state;
enum ib_mtu max_mtu;
@@ -413,6 +465,8 @@ enum ib_wc_opcode {
IB_WC_FETCH_ADD,
IB_WC_BIND_MW,
IB_WC_LSO,
+ IB_WC_LOCAL_INV,
+ IB_WC_FAST_REG_MR,
/*
* Set value of IB_WC_RECV so consumers can test if a completion is a
* receive by testing (opcode & IB_WC_RECV).
@@ -423,7 +477,8 @@ enum ib_wc_opcode {
enum ib_wc_flags {
IB_WC_GRH = 1,
- IB_WC_WITH_IMM = (1<<1)
+ IB_WC_WITH_IMM = (1<<1),
+ IB_WC_WITH_INVALIDATE = (1<<2),
};
struct ib_wc {
@@ -433,7 +488,10 @@ struct ib_wc {
u32 vendor_err;
u32 byte_len;
struct ib_qp *qp;
- __be32 imm_data;
+ union {
+ __be32 imm_data;
+ u32 invalidate_rkey;
+ } ex;
u32 src_qp;
int wc_flags;
u16 pkey_index;
@@ -498,7 +556,8 @@ enum ib_qp_type {
};
enum ib_qp_create_flags {
- IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
+ IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
};
struct ib_qp_init_attr {
@@ -627,6 +686,9 @@ enum ib_wr_opcode {
IB_WR_ATOMIC_FETCH_AND_ADD,
IB_WR_LSO,
IB_WR_SEND_WITH_INV,
+ IB_WR_RDMA_READ_WITH_INV,
+ IB_WR_LOCAL_INV,
+ IB_WR_FAST_REG_MR,
};
enum ib_send_flags {
@@ -643,6 +705,12 @@ struct ib_sge {
u32 lkey;
};
+struct ib_fast_reg_page_list {
+ struct ib_device *device;
+ u64 *page_list;
+ unsigned int max_page_list_len;
+};
+
struct ib_send_wr {
struct ib_send_wr *next;
u64 wr_id;
@@ -675,6 +743,15 @@ struct ib_send_wr {
u16 pkey_index; /* valid for GSI only */
u8 port_num; /* valid for DR SMPs on switch only */
} ud;
+ struct {
+ u64 iova_start;
+ struct ib_fast_reg_page_list *page_list;
+ unsigned int page_shift;
+ unsigned int page_list_len;
+ u32 length;
+ int access_flags;
+ u32 rkey;
+ } fast_reg;
} wr;
};
@@ -777,7 +854,7 @@ struct ib_cq {
struct ib_uobject *uobject;
ib_comp_handler comp_handler;
void (*event_handler)(struct ib_event *, void *);
- void * cq_context;
+ void *cq_context;
int cqe;
atomic_t usecnt; /* count number of work queues */
};
@@ -883,7 +960,7 @@ struct ib_dma_mapping_ops {
void (*sync_single_for_cpu)(struct ib_device *dev,
u64 dma_handle,
size_t size,
- enum dma_data_direction dir);
+ enum dma_data_direction dir);
void (*sync_single_for_device)(struct ib_device *dev,
u64 dma_handle,
size_t size,
@@ -919,6 +996,8 @@ struct ib_device {
struct iw_cm_verbs *iwcm;
+ int (*get_protocol_stats)(struct ib_device *device,
+ union rdma_protocol_stats *stats);
int (*query_device)(struct ib_device *device,
struct ib_device_attr *device_attr);
int (*query_port)(struct ib_device *device,
@@ -1013,6 +1092,11 @@ struct ib_device {
int (*query_mr)(struct ib_mr *mr,
struct ib_mr_attr *mr_attr);
int (*dereg_mr)(struct ib_mr *mr);
+ struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
+ int max_page_list_len);
+ struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
+ int page_list_len);
+ void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
int (*rereg_phys_mr)(struct ib_mr *mr,
int mr_rereg_mask,
struct ib_pd *pd,
@@ -1065,6 +1149,7 @@ struct ib_device {
char node_desc[64];
__be64 node_guid;
+ u32 local_dma_lkey;
u8 node_type;
u8 phys_port_cnt;
};
@@ -1807,6 +1892,54 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
int ib_dereg_mr(struct ib_mr *mr);
/**
+ * ib_alloc_fast_reg_mr - Allocates memory region usable with the
+ * IB_WR_FAST_REG_MR send work request.
+ * @pd: The protection domain associated with the region.
+ * @max_page_list_len: requested max physical buffer list length to be
+ * used with fast register work requests for this MR.
+ */
+struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
+
+/**
+ * ib_alloc_fast_reg_page_list - Allocates a page list array
+ * @device - ib device pointer.
+ * @page_list_len - size of the page list array to be allocated.
+ *
+ * This allocates and returns a struct ib_fast_reg_page_list * and a
+ * page_list array that is at least page_list_len in size. The actual
+ * size is returned in max_page_list_len. The caller is responsible
+ * for initializing the contents of the page_list array before posting
+ * a send work request with the IB_WC_FAST_REG_MR opcode.
+ *
+ * The page_list array entries must be translated using one of the
+ * ib_dma_*() functions just like the addresses passed to
+ * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct
+ * ib_fast_reg_page_list must not be modified by the caller until the
+ * IB_WC_FAST_REG_MR work request completes.
+ */
+struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
+ struct ib_device *device, int page_list_len);
+
+/**
+ * ib_free_fast_reg_page_list - Deallocates a previously allocated
+ * page list array.
+ * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
+ */
+void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
+
+/**
+ * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
+ * R_Key and L_Key.
+ * @mr - struct ib_mr pointer to be updated.
+ * @newkey - new key to be used.
+ */
+static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
+{
+ mr->lkey = (mr->lkey & 0xffffff00) | newkey;
+ mr->rkey = (mr->rkey & 0xffffff00) | newkey;
+}
+
+/**
* ib_alloc_mw - Allocates a memory window.
* @pd: The protection domain associated with the memory window.
*/