diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-05 02:26:31 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-05 02:26:31 +0200 |
commit | 84e39eeb08c0ea7e9ec43ac820bf76a6fe8ecbad (patch) | |
tree | 680f704b29ec68cee50a6456088ffac1902bbf95 /include/rdma | |
parent | Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledfo... (diff) | |
parent | Merge branches 'hfi1' and 'sge-limit' into k.o/for-4.8-2 (diff) | |
download | linux-84e39eeb08c0ea7e9ec43ac820bf76a6fe8ecbad.tar.xz linux-84e39eeb08c0ea7e9ec43ac820bf76a6fe8ecbad.zip |
Merge tag 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull second round of rdma updates from Doug Ledford:
"This can be split out into just two categories:
- fixes to the RDMA R/W API in regards to SG list length limits
(about 5 patches)
- fixes/features for the Intel hfi1 driver (everything else)
The hfi1 driver is still being brought to full feature support by
Intel, and they have a lot of people working on it, so that amounts to
almost the entirety of this pull request"
* tag 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (84 commits)
IB/hfi1: Add cache evict LRU list
IB/hfi1: Fix memory leak during unexpected shutdown
IB/hfi1: Remove unneeded mm argument in remove function
IB/hfi1: Consistently call ops->remove outside spinlock
IB/hfi1: Use evict mmu rb operation
IB/hfi1: Add evict operation to the mmu rb handler
IB/hfi1: Fix TID caching actions
IB/hfi1: Make the cache handler own its rb tree root
IB/hfi1: Make use of mm consistent
IB/hfi1: Fix user SDMA racy user request claim
IB/hfi1: Fix error condition that needs to clean up
IB/hfi1: Release node on insert failure
IB/hfi1: Validate SDMA user iovector count
IB/hfi1: Validate SDMA user request index
IB/hfi1: Use the same capability state for all shared contexts
IB/hfi1: Prevent null pointer dereference
IB/hfi1: Rename TID mmu_rb_* functions
IB/hfi1: Remove unneeded empty check in hfi1_mmu_rb_unregister()
IB/hfi1: Restructure hfi1_file_open
IB/hfi1: Make iovec loop index easy to understand
...
Diffstat (limited to 'include/rdma')
-rw-r--r-- | include/rdma/ib_verbs.h | 6 | ||||
-rw-r--r-- | include/rdma/opa_port_info.h | 16 | ||||
-rw-r--r-- | include/rdma/rdma_vt.h | 7 | ||||
-rw-r--r-- | include/rdma/rdmavt_mr.h | 1 | ||||
-rw-r--r-- | include/rdma/rdmavt_qp.h | 92 |
5 files changed, 96 insertions, 26 deletions
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 94a0bc5b5bdd..8e90dd28bb75 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1490,6 +1490,10 @@ struct ib_rwq_ind_table_init_attr { struct ib_wq **ind_tbl; }; +/* + * @max_write_sge: Maximum SGE elements per RDMA WRITE request. + * @max_read_sge: Maximum SGE elements per RDMA READ request. + */ struct ib_qp { struct ib_device *device; struct ib_pd *pd; @@ -1511,6 +1515,8 @@ struct ib_qp { void (*event_handler)(struct ib_event *, void *); void *qp_context; u32 qp_num; + u32 max_write_sge; + u32 max_read_sge; enum ib_qp_type qp_type; struct ib_rwq_ind_table *rwq_ind_tbl; }; diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h index 2b95c2c336eb..9303e0e4f508 100644 --- a/include/rdma/opa_port_info.h +++ b/include/rdma/opa_port_info.h @@ -33,11 +33,6 @@ #if !defined(OPA_PORT_INFO_H) #define OPA_PORT_INFO_H -/* Temporary until HFI driver is updated */ -#ifndef USE_PI_LED_ENABLE -#define USE_PI_LED_ENABLE 0 -#endif - #define OPA_PORT_LINK_MODE_NOP 0 /* No change */ #define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */ @@ -274,23 +269,12 @@ enum port_info_field_masks { OPA_PI_MASK_MTU_CAP = 0x0F, }; -#if USE_PI_LED_ENABLE struct opa_port_states { u8 reserved; u8 ledenable_offlinereason; /* 1 res, 1 bit, 6 bits */ u8 reserved2; u8 portphysstate_portstate; /* 4 bits, 4 bits */ }; -#define PI_LED_ENABLE_SUP 1 -#else -struct opa_port_states { - u8 reserved; - u8 offline_reason; /* 2 res, 6 bits */ - u8 reserved2; - u8 portphysstate_portstate; /* 4 bits, 4 bits */ -}; -#define PI_LED_ENABLE_SUP 0 -#endif struct opa_port_state_info { struct opa_port_states port_states; diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 9c9a27d42aaa..e31502107a58 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -158,6 +158,7 @@ struct rvt_driver_params { u32 max_mad_size; u8 qos_shift; u8 max_rdma_atomic; + u8 reserved_operations; }; /* Protection domain */ @@ -351,6 +352,9 @@ struct rvt_dev_info { /* Driver specific properties */ struct rvt_driver_params dparms; + /* post send table */ + const struct rvt_operation_params *post_parms; + struct rvt_mregion __rcu *dma_mr; struct rvt_lkey_table lkey_table; @@ -484,6 +488,9 @@ void rvt_unregister_device(struct rvt_dev_info *rvd); int rvt_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port, int port_index, u16 *pkey_table); +int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key, + int access); +int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey); int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, u32 len, u64 vaddr, u32 rkey, int acc); int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h index 5edffdca8c53..6b3c6c8b6b77 100644 --- a/include/rdma/rdmavt_mr.h +++ b/include/rdma/rdmavt_mr.h @@ -81,6 +81,7 @@ struct rvt_mregion { u32 mapsz; /* size of the map array */ u8 page_shift; /* 0 - non unform/non powerof2 sizes */ u8 lkey_published; /* in global table */ + atomic_t lkey_invalid; /* true if current lkey is invalid */ struct completion comp; /* complete when refcount goes to zero */ atomic_t refcount; struct rvt_segarray *map[0]; /* the segments */ diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 6d23b879416a..bd34d0b56bf7 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -145,6 +145,12 @@ (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND) /* + * Internal send flags + */ +#define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START +#define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1) + +/* * Send work request queue entry. * The size of the sg_list is determined when the QP is created and stored * in qp->s_max_sge. @@ -216,23 +222,43 @@ struct rvt_mmap_info { * to send a RDMA read response or atomic operation. */ struct rvt_ack_entry { - u8 opcode; - u8 sent; + struct rvt_sge rdma_sge; + u64 atomic_data; u32 psn; u32 lpsn; - union { - struct rvt_sge rdma_sge; - u64 atomic_data; - }; + u8 opcode; + u8 sent; }; #define RC_QP_SCALING_INTERVAL 5 -/* - * Variables prefixed with s_ are for the requester (sender). - * Variables prefixed with r_ are for the responder (receiver). - * Variables prefixed with ack_ are for responder replies. +#define RVT_OPERATION_PRIV 0x00000001 +#define RVT_OPERATION_ATOMIC 0x00000002 +#define RVT_OPERATION_ATOMIC_SGE 0x00000004 +#define RVT_OPERATION_LOCAL 0x00000008 +#define RVT_OPERATION_USE_RESERVE 0x00000010 + +#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1) + +/** + * rvt_operation_params - op table entry + * @length - the length to copy into the swqe entry + * @qpt_support - a bit mask indicating QP type support + * @flags - RVT_OPERATION flags (see above) * + * This supports table driven post send so that + * the driver can have differing an potentially + * different sets of operations. + * + **/ + +struct rvt_operation_params { + size_t length; + u32 qpt_support; + u32 flags; +}; + +/* * Common variables are protected by both r_rq.lock and s_lock in that order * which only happens in modify_qp() or changing the QP 'state'. */ @@ -307,6 +333,7 @@ struct rvt_qp { u32 s_next_psn; /* PSN for next request */ u32 s_avail; /* number of entries avail */ u32 s_ssn; /* SSN of tail entry */ + atomic_t s_reserved_used; /* reserved entries in use */ spinlock_t s_lock ____cacheline_aligned_in_smp; u32 s_flags; @@ -343,6 +370,8 @@ struct rvt_qp { struct rvt_sge_state s_ack_rdma_sge; struct timer_list s_timer; + atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */ + /* * This sge list MUST be last. Do not add anything below here. */ @@ -436,6 +465,49 @@ static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) rq->max_sge * sizeof(struct ib_sge)) * n); } +/** + * rvt_qp_wqe_reserve - reserve operation + * @qp - the rvt qp + * @wqe - the send wqe + * + * This routine used in post send to record + * a wqe relative reserved operation use. + */ +static inline void rvt_qp_wqe_reserve( + struct rvt_qp *qp, + struct rvt_swqe *wqe) +{ + wqe->wr.send_flags |= RVT_SEND_RESERVE_USED; + atomic_inc(&qp->s_reserved_used); +} + +/** + * rvt_qp_wqe_unreserve - clean reserved operation + * @qp - the rvt qp + * @wqe - the send wqe + * + * This decrements the reserve use count. + * + * This call MUST precede the change to + * s_last to insure that post send sees a stable + * s_avail. + * + * An smp_mp__after_atomic() is used to insure + * the compiler does not juggle the order of the s_last + * ring index and the decrementing of s_reserved_used. + */ +static inline void rvt_qp_wqe_unreserve( + struct rvt_qp *qp, + struct rvt_swqe *wqe) +{ + if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) { + wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED; + atomic_dec(&qp->s_reserved_used); + /* insure no compiler re-order up to s_last change */ + smp_mb__after_atomic(); + } +} + extern const int ib_rvt_state_ops[]; struct rvt_dev_info; |