diff options
Diffstat (limited to 'drivers/infiniband/sw')
21 files changed, 278 insertions, 317 deletions
diff --git a/drivers/infiniband/sw/rdmavt/mad.c b/drivers/infiniband/sw/rdmavt/mad.c index fa5be13a4394..207bc0ed96ff 100644 --- a/drivers/infiniband/sw/rdmavt/mad.c +++ b/drivers/infiniband/sw/rdmavt/mad.c @@ -70,7 +70,7 @@ * * Return: IB_MAD_RESULT_SUCCESS or error */ -int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, +int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad_hdr *in, size_t in_mad_size, struct ib_mad_hdr *out, size_t *out_mad_size, @@ -82,9 +82,6 @@ int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, * future may choose to implement this but it should not be made into a * requirement. */ - if (ibport_num_to_idx(ibdev, port_num) < 0) - return -EINVAL; - return IB_MAD_RESULT_FAILURE; } diff --git a/drivers/infiniband/sw/rdmavt/mad.h b/drivers/infiniband/sw/rdmavt/mad.h index a9d6eecc3723..1eae5efea4be 100644 --- a/drivers/infiniband/sw/rdmavt/mad.h +++ b/drivers/infiniband/sw/rdmavt/mad.h @@ -50,7 +50,7 @@ #include <rdma/rdma_vt.h> -int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, +int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad_hdr *in, size_t in_mad_size, struct ib_mad_hdr *out, size_t *out_mad_size, diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index 8fd0128a9336..12ebe041a5da 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -151,15 +151,12 @@ static int rvt_modify_device(struct ib_device *device, * * Return: 0 on success */ -static int rvt_query_port(struct ib_device *ibdev, u8 port_num, +static int rvt_query_port(struct ib_device *ibdev, u32 port_num, struct ib_port_attr *props) { struct rvt_dev_info *rdi = ib_to_rvt(ibdev); struct rvt_ibport *rvp; - int port_index = ibport_num_to_idx(ibdev, port_num); - - if (port_index < 0) - return -EINVAL; + u32 port_index = ibport_num_to_idx(ibdev, port_num); rvp = rdi->ports[port_index]; /* props being zeroed by the caller, avoid zeroing it here */ @@ -186,16 +183,13 @@ static int rvt_query_port(struct ib_device *ibdev, u8 port_num, * * Return: 0 on success */ -static int rvt_modify_port(struct ib_device *ibdev, u8 port_num, +static int rvt_modify_port(struct ib_device *ibdev, u32 port_num, int port_modify_mask, struct ib_port_modify *props) { struct rvt_dev_info *rdi = ib_to_rvt(ibdev); struct rvt_ibport *rvp; int ret = 0; - int port_index = ibport_num_to_idx(ibdev, port_num); - - if (port_index < 0) - return -EINVAL; + u32 port_index = ibport_num_to_idx(ibdev, port_num); rvp = rdi->ports[port_index]; if (port_modify_mask & IB_PORT_OPA_MASK_CHG) { @@ -225,7 +219,7 @@ static int rvt_modify_port(struct ib_device *ibdev, u8 port_num, * * Return: 0 on failure pkey otherwise */ -static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index, +static int rvt_query_pkey(struct ib_device *ibdev, u32 port_num, u16 index, u16 *pkey) { /* @@ -235,11 +229,9 @@ static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index, * no way to protect against that anyway. */ struct rvt_dev_info *rdi = ib_to_rvt(ibdev); - int port_index; + u32 port_index; port_index = ibport_num_to_idx(ibdev, port_num); - if (port_index < 0) - return -EINVAL; if (index >= rvt_get_npkeys(rdi)) return -EINVAL; @@ -257,12 +249,12 @@ static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index, * * Return: 0 on success */ -static int rvt_query_gid(struct ib_device *ibdev, u8 port_num, +static int rvt_query_gid(struct ib_device *ibdev, u32 port_num, int guid_index, union ib_gid *gid) { struct rvt_dev_info *rdi; struct rvt_ibport *rvp; - int port_index; + u32 port_index; /* * Driver is responsible for updating the guid table. Which will be used @@ -270,8 +262,6 @@ static int rvt_query_gid(struct ib_device *ibdev, u8 port_num, * is being done. */ port_index = ibport_num_to_idx(ibdev, port_num); - if (port_index < 0) - return -EINVAL; rdi = ib_to_rvt(ibdev); rvp = rdi->ports[port_index]; @@ -301,16 +291,12 @@ static void rvt_dealloc_ucontext(struct ib_ucontext *context) return; } -static int rvt_get_port_immutable(struct ib_device *ibdev, u8 port_num, +static int rvt_get_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { struct rvt_dev_info *rdi = ib_to_rvt(ibdev); struct ib_port_attr attr; - int err, port_index; - - port_index = ibport_num_to_idx(ibdev, port_num); - if (port_index < 0) - return -EINVAL; + int err; immutable->core_cap_flags = rdi->dparms.core_cap_flags; diff --git a/drivers/infiniband/sw/rdmavt/vt.h b/drivers/infiniband/sw/rdmavt/vt.h index d19ff817c2c7..c0fed6510f0b 100644 --- a/drivers/infiniband/sw/rdmavt/vt.h +++ b/drivers/infiniband/sw/rdmavt/vt.h @@ -96,16 +96,9 @@ #define __rvt_pr_err_ratelimited(pdev, name, fmt, ...) \ dev_err_ratelimited(&(pdev)->dev, "%s: " fmt, name, ##__VA_ARGS__) -static inline int ibport_num_to_idx(struct ib_device *ibdev, u8 port_num) +static inline u32 ibport_num_to_idx(struct ib_device *ibdev, u32 port_num) { - struct rvt_dev_info *rdi = ib_to_rvt(ibdev); - int port_index; - - port_index = port_num - 1; /* IB ports start at 1 our arrays at 0 */ - if ((port_index < 0) || (port_index >= rdi->dparms.nports)) - return -EINVAL; - - return port_index; + return port_num - 1; /* IB ports start at 1 our arrays at 0 */ } #endif /* DEF_RDMAVT_H */ diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c index df0d173d6acb..da2e867a1ed9 100644 --- a/drivers/infiniband/sw/rxe/rxe_av.c +++ b/drivers/infiniband/sw/rxe/rxe_av.c @@ -88,7 +88,7 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr) type = RXE_NETWORK_TYPE_IPV4; break; case RDMA_NETWORK_IPV6: - type = RXE_NETWORK_TYPE_IPV4; + type = RXE_NETWORK_TYPE_IPV6; break; default: /* not reached - checked in rxe_av_chk_attr */ diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index 17a361b8dbb1..2af26737d32d 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -345,7 +345,7 @@ static inline enum comp_state do_read(struct rxe_qp *qp, ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &wqe->dma, payload_addr(pkt), - payload_size(pkt), to_mem_obj, NULL); + payload_size(pkt), to_mr_obj, NULL); if (ret) return COMPST_ERROR; @@ -365,7 +365,7 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp, ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &wqe->dma, &atomic_orig, - sizeof(u64), to_mem_obj, NULL); + sizeof(u64), to_mr_obj, NULL); if (ret) return COMPST_ERROR; else @@ -676,7 +676,6 @@ int rxe_completer(void *arg) /* there is nothing to retry in this case */ if (!wqe || (wqe->state == wqe_state_posted)) { - pr_warn("Retry attempted without a valid wqe\n"); ret = -EAGAIN; goto done; } diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c index ac9154f0593d..f469fd1c753d 100644 --- a/drivers/infiniband/sw/rxe/rxe_hw_counters.c +++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c @@ -26,7 +26,7 @@ static const char * const rxe_counter_name[] = { int rxe_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, - u8 port, int index) + u32 port, int index) { struct rxe_dev *dev = to_rdev(ibdev); unsigned int cnt; @@ -41,7 +41,7 @@ int rxe_ib_get_hw_stats(struct ib_device *ibdev, } struct rdma_hw_stats *rxe_ib_alloc_hw_stats(struct ib_device *ibdev, - u8 port_num) + u32 port_num) { BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_name) != RXE_NUM_OF_COUNTERS); /* We support only per port stats */ diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.h b/drivers/infiniband/sw/rxe/rxe_hw_counters.h index 49ee6f96656d..2f369acb46d7 100644 --- a/drivers/infiniband/sw/rxe/rxe_hw_counters.h +++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.h @@ -30,8 +30,8 @@ enum rxe_counters { }; struct rdma_hw_stats *rxe_ib_alloc_hw_stats(struct ib_device *ibdev, - u8 port_num); + u32 port_num); int rxe_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, - u8 port, int index); + u32 port, int index); #endif /* RXE_HW_COUNTERS_H */ diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 0d758760b9ae..ef8061d2fbe0 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -72,40 +72,37 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); /* rxe_mr.c */ enum copy_direction { - to_mem_obj, - from_mem_obj, + to_mr_obj, + from_mr_obj, }; -void rxe_mem_init_dma(struct rxe_pd *pd, - int access, struct rxe_mem *mem); +void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr); -int rxe_mem_init_user(struct rxe_pd *pd, u64 start, - u64 length, u64 iova, int access, struct ib_udata *udata, - struct rxe_mem *mr); +int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, + int access, struct ib_udata *udata, struct rxe_mr *mr); -int rxe_mem_init_fast(struct rxe_pd *pd, - int max_pages, struct rxe_mem *mem); +int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr); -int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, - int length, enum copy_direction dir, u32 *crcp); +int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, + enum copy_direction dir, u32 *crcp); int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, void *addr, int length, enum copy_direction dir, u32 *crcp); -void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length); +void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length); enum lookup_type { lookup_local, lookup_remote, }; -struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key, - enum lookup_type type); +struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key, + enum lookup_type type); -int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length); +int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length); -void rxe_mem_cleanup(struct rxe_pool_entry *arg); +void rxe_mr_cleanup(struct rxe_pool_entry *arg); int advance_dma_data(struct rxe_dma_info *dma, unsigned int length); @@ -116,7 +113,6 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, int paylen, struct rxe_pkt_info *pkt); int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc); const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num); -struct device *rxe_dma_device(struct rxe_dev *rxe); int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid); int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid); diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 6e8c41567ba0..9f63947bab12 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -24,16 +24,15 @@ static u8 rxe_get_key(void) return key; } -int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length) +int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) { - switch (mem->type) { - case RXE_MEM_TYPE_DMA: + switch (mr->type) { + case RXE_MR_TYPE_DMA: return 0; - case RXE_MEM_TYPE_MR: - if (iova < mem->iova || - length > mem->length || - iova > mem->iova + mem->length - length) + case RXE_MR_TYPE_MR: + if (iova < mr->iova || length > mr->length || + iova > mr->iova + mr->length - length) return -EFAULT; return 0; @@ -46,85 +45,83 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length) | IB_ACCESS_REMOTE_WRITE \ | IB_ACCESS_REMOTE_ATOMIC) -static void rxe_mem_init(int access, struct rxe_mem *mem) +static void rxe_mr_init(int access, struct rxe_mr *mr) { - u32 lkey = mem->pelem.index << 8 | rxe_get_key(); + u32 lkey = mr->pelem.index << 8 | rxe_get_key(); u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0; - mem->ibmr.lkey = lkey; - mem->ibmr.rkey = rkey; - mem->state = RXE_MEM_STATE_INVALID; - mem->type = RXE_MEM_TYPE_NONE; - mem->map_shift = ilog2(RXE_BUF_PER_MAP); + mr->ibmr.lkey = lkey; + mr->ibmr.rkey = rkey; + mr->state = RXE_MR_STATE_INVALID; + mr->type = RXE_MR_TYPE_NONE; + mr->map_shift = ilog2(RXE_BUF_PER_MAP); } -void rxe_mem_cleanup(struct rxe_pool_entry *arg) +void rxe_mr_cleanup(struct rxe_pool_entry *arg) { - struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem); + struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem); int i; - ib_umem_release(mem->umem); + ib_umem_release(mr->umem); - if (mem->map) { - for (i = 0; i < mem->num_map; i++) - kfree(mem->map[i]); + if (mr->map) { + for (i = 0; i < mr->num_map; i++) + kfree(mr->map[i]); - kfree(mem->map); + kfree(mr->map); } } -static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf) +static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf) { int i; int num_map; - struct rxe_map **map = mem->map; + struct rxe_map **map = mr->map; num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP; - mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL); - if (!mem->map) + mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL); + if (!mr->map) goto err1; for (i = 0; i < num_map; i++) { - mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL); - if (!mem->map[i]) + mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL); + if (!mr->map[i]) goto err2; } BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP)); - mem->map_shift = ilog2(RXE_BUF_PER_MAP); - mem->map_mask = RXE_BUF_PER_MAP - 1; + mr->map_shift = ilog2(RXE_BUF_PER_MAP); + mr->map_mask = RXE_BUF_PER_MAP - 1; - mem->num_buf = num_buf; - mem->num_map = num_map; - mem->max_buf = num_map * RXE_BUF_PER_MAP; + mr->num_buf = num_buf; + mr->num_map = num_map; + mr->max_buf = num_map * RXE_BUF_PER_MAP; return 0; err2: for (i--; i >= 0; i--) - kfree(mem->map[i]); + kfree(mr->map[i]); - kfree(mem->map); + kfree(mr->map); err1: return -ENOMEM; } -void rxe_mem_init_dma(struct rxe_pd *pd, - int access, struct rxe_mem *mem) +void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr) { - rxe_mem_init(access, mem); + rxe_mr_init(access, mr); - mem->ibmr.pd = &pd->ibpd; - mem->access = access; - mem->state = RXE_MEM_STATE_VALID; - mem->type = RXE_MEM_TYPE_DMA; + mr->ibmr.pd = &pd->ibpd; + mr->access = access; + mr->state = RXE_MR_STATE_VALID; + mr->type = RXE_MR_TYPE_DMA; } -int rxe_mem_init_user(struct rxe_pd *pd, u64 start, - u64 length, u64 iova, int access, struct ib_udata *udata, - struct rxe_mem *mem) +int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, + int access, struct ib_udata *udata, struct rxe_mr *mr) { struct rxe_map **map; struct rxe_phys_buf *buf = NULL; @@ -142,23 +139,23 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start, goto err1; } - mem->umem = umem; + mr->umem = umem; num_buf = ib_umem_num_pages(umem); - rxe_mem_init(access, mem); + rxe_mr_init(access, mr); - err = rxe_mem_alloc(mem, num_buf); + err = rxe_mr_alloc(mr, num_buf); if (err) { - pr_warn("err %d from rxe_mem_alloc\n", err); + pr_warn("err %d from rxe_mr_alloc\n", err); ib_umem_release(umem); goto err1; } - mem->page_shift = PAGE_SHIFT; - mem->page_mask = PAGE_SIZE - 1; + mr->page_shift = PAGE_SHIFT; + mr->page_mask = PAGE_SIZE - 1; num_buf = 0; - map = mem->map; + map = mr->map; if (length > 0) { buf = map[0]->buf; @@ -185,15 +182,15 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start, } } - mem->ibmr.pd = &pd->ibpd; - mem->umem = umem; - mem->access = access; - mem->length = length; - mem->iova = iova; - mem->va = start; - mem->offset = ib_umem_offset(umem); - mem->state = RXE_MEM_STATE_VALID; - mem->type = RXE_MEM_TYPE_MR; + mr->ibmr.pd = &pd->ibpd; + mr->umem = umem; + mr->access = access; + mr->length = length; + mr->iova = iova; + mr->va = start; + mr->offset = ib_umem_offset(umem); + mr->state = RXE_MR_STATE_VALID; + mr->type = RXE_MR_TYPE_MR; return 0; @@ -201,24 +198,23 @@ err1: return err; } -int rxe_mem_init_fast(struct rxe_pd *pd, - int max_pages, struct rxe_mem *mem) +int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr) { int err; - rxe_mem_init(0, mem); + rxe_mr_init(0, mr); /* In fastreg, we also set the rkey */ - mem->ibmr.rkey = mem->ibmr.lkey; + mr->ibmr.rkey = mr->ibmr.lkey; - err = rxe_mem_alloc(mem, max_pages); + err = rxe_mr_alloc(mr, max_pages); if (err) goto err1; - mem->ibmr.pd = &pd->ibpd; - mem->max_buf = max_pages; - mem->state = RXE_MEM_STATE_FREE; - mem->type = RXE_MEM_TYPE_MR; + mr->ibmr.pd = &pd->ibpd; + mr->max_buf = max_pages; + mr->state = RXE_MR_STATE_FREE; + mr->type = RXE_MR_TYPE_MR; return 0; @@ -226,28 +222,24 @@ err1: return err; } -static void lookup_iova( - struct rxe_mem *mem, - u64 iova, - int *m_out, - int *n_out, - size_t *offset_out) +static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out, + size_t *offset_out) { - size_t offset = iova - mem->iova + mem->offset; + size_t offset = iova - mr->iova + mr->offset; int map_index; int buf_index; u64 length; - if (likely(mem->page_shift)) { - *offset_out = offset & mem->page_mask; - offset >>= mem->page_shift; - *n_out = offset & mem->map_mask; - *m_out = offset >> mem->map_shift; + if (likely(mr->page_shift)) { + *offset_out = offset & mr->page_mask; + offset >>= mr->page_shift; + *n_out = offset & mr->map_mask; + *m_out = offset >> mr->map_shift; } else { map_index = 0; buf_index = 0; - length = mem->map[map_index]->buf[buf_index].size; + length = mr->map[map_index]->buf[buf_index].size; while (offset >= length) { offset -= length; @@ -257,7 +249,7 @@ static void lookup_iova( map_index++; buf_index = 0; } - length = mem->map[map_index]->buf[buf_index].size; + length = mr->map[map_index]->buf[buf_index].size; } *m_out = map_index; @@ -266,49 +258,49 @@ static void lookup_iova( } } -void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length) +void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length) { size_t offset; int m, n; void *addr; - if (mem->state != RXE_MEM_STATE_VALID) { - pr_warn("mem not in valid state\n"); + if (mr->state != RXE_MR_STATE_VALID) { + pr_warn("mr not in valid state\n"); addr = NULL; goto out; } - if (!mem->map) { + if (!mr->map) { addr = (void *)(uintptr_t)iova; goto out; } - if (mem_check_range(mem, iova, length)) { + if (mr_check_range(mr, iova, length)) { pr_warn("range violation\n"); addr = NULL; goto out; } - lookup_iova(mem, iova, &m, &n, &offset); + lookup_iova(mr, iova, &m, &n, &offset); - if (offset + length > mem->map[m]->buf[n].size) { + if (offset + length > mr->map[m]->buf[n].size) { pr_warn("crosses page boundary\n"); addr = NULL; goto out; } - addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset; + addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset; out: return addr; } /* copy data from a range (vaddr, vaddr+length-1) to or from - * a mem object starting at iova. Compute incremental value of - * crc32 if crcp is not zero. caller must hold a reference to mem + * a mr object starting at iova. Compute incremental value of + * crc32 if crcp is not zero. caller must hold a reference to mr */ -int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length, - enum copy_direction dir, u32 *crcp) +int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, + enum copy_direction dir, u32 *crcp) { int err; int bytes; @@ -323,43 +315,41 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length, if (length == 0) return 0; - if (mem->type == RXE_MEM_TYPE_DMA) { + if (mr->type == RXE_MR_TYPE_DMA) { u8 *src, *dest; - src = (dir == to_mem_obj) ? - addr : ((void *)(uintptr_t)iova); + src = (dir == to_mr_obj) ? addr : ((void *)(uintptr_t)iova); - dest = (dir == to_mem_obj) ? - ((void *)(uintptr_t)iova) : addr; + dest = (dir == to_mr_obj) ? ((void *)(uintptr_t)iova) : addr; memcpy(dest, src, length); if (crcp) - *crcp = rxe_crc32(to_rdev(mem->ibmr.device), - *crcp, dest, length); + *crcp = rxe_crc32(to_rdev(mr->ibmr.device), *crcp, dest, + length); return 0; } - WARN_ON_ONCE(!mem->map); + WARN_ON_ONCE(!mr->map); - err = mem_check_range(mem, iova, length); + err = mr_check_range(mr, iova, length); if (err) { err = -EFAULT; goto err1; } - lookup_iova(mem, iova, &m, &i, &offset); + lookup_iova(mr, iova, &m, &i, &offset); - map = mem->map + m; + map = mr->map + m; buf = map[0]->buf + i; while (length > 0) { u8 *src, *dest; va = (u8 *)(uintptr_t)buf->addr + offset; - src = (dir == to_mem_obj) ? addr : va; - dest = (dir == to_mem_obj) ? va : addr; + src = (dir == to_mr_obj) ? addr : va; + dest = (dir == to_mr_obj) ? va : addr; bytes = buf->size - offset; @@ -369,8 +359,8 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length, memcpy(dest, src, bytes); if (crcp) - crc = rxe_crc32(to_rdev(mem->ibmr.device), - crc, dest, bytes); + crc = rxe_crc32(to_rdev(mr->ibmr.device), crc, dest, + bytes); length -= bytes; addr += bytes; @@ -411,7 +401,7 @@ int copy_data( struct rxe_sge *sge = &dma->sge[dma->cur_sge]; int offset = dma->sge_offset; int resid = dma->resid; - struct rxe_mem *mem = NULL; + struct rxe_mr *mr = NULL; u64 iova; int err; @@ -424,8 +414,8 @@ int copy_data( } if (sge->length && (offset < sge->length)) { - mem = lookup_mem(pd, access, sge->lkey, lookup_local); - if (!mem) { + mr = lookup_mr(pd, access, sge->lkey, lookup_local); + if (!mr) { err = -EINVAL; goto err1; } @@ -435,9 +425,9 @@ int copy_data( bytes = length; if (offset >= sge->length) { - if (mem) { - rxe_drop_ref(mem); - mem = NULL; + if (mr) { + rxe_drop_ref(mr); + mr = NULL; } sge++; dma->cur_sge++; @@ -449,9 +439,9 @@ int copy_data( } if (sge->length) { - mem = lookup_mem(pd, access, sge->lkey, - lookup_local); - if (!mem) { + mr = lookup_mr(pd, access, sge->lkey, + lookup_local); + if (!mr) { err = -EINVAL; goto err1; } @@ -466,7 +456,7 @@ int copy_data( if (bytes > 0) { iova = sge->addr + offset; - err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp); + err = rxe_mr_copy(mr, iova, addr, bytes, dir, crcp); if (err) goto err2; @@ -480,14 +470,14 @@ int copy_data( dma->sge_offset = offset; dma->resid = resid; - if (mem) - rxe_drop_ref(mem); + if (mr) + rxe_drop_ref(mr); return 0; err2: - if (mem) - rxe_drop_ref(mem); + if (mr) + rxe_drop_ref(mr); err1: return err; } @@ -525,31 +515,30 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length) return 0; } -/* (1) find the mem (mr or mw) corresponding to lkey/rkey +/* (1) find the mr corresponding to lkey/rkey * depending on lookup_type - * (2) verify that the (qp) pd matches the mem pd - * (3) verify that the mem can support the requested access - * (4) verify that mem state is valid + * (2) verify that the (qp) pd matches the mr pd + * (3) verify that the mr can support the requested access + * (4) verify that mr state is valid */ -struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key, - enum lookup_type type) +struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key, + enum lookup_type type) { - struct rxe_mem *mem; + struct rxe_mr *mr; struct rxe_dev *rxe = to_rdev(pd->ibpd.device); int index = key >> 8; - mem = rxe_pool_get_index(&rxe->mr_pool, index); - if (!mem) + mr = rxe_pool_get_index(&rxe->mr_pool, index); + if (!mr) return NULL; - if (unlikely((type == lookup_local && mr_lkey(mem) != key) || - (type == lookup_remote && mr_rkey(mem) != key) || - mr_pd(mem) != pd || - (access && !(access & mem->access)) || - mem->state != RXE_MEM_STATE_VALID)) { - rxe_drop_ref(mem); - mem = NULL; + if (unlikely((type == lookup_local && mr_lkey(mr) != key) || + (type == lookup_remote && mr_rkey(mr) != key) || + mr_pd(mr) != pd || (access && !(access & mr->access)) || + mr->state != RXE_MR_STATE_VALID)) { + rxe_drop_ref(mr); + mr = NULL; } - return mem; + return mr; } diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c index 307d8986e7c9..d24901f2af3f 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.c +++ b/drivers/infiniband/sw/rxe/rxe_pool.c @@ -8,8 +8,6 @@ #include "rxe_loc.h" /* info about object pools - * note that mr and mw share a single index space - * so that one can map an lkey to the correct type of object */ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { [RXE_TYPE_UC] = { @@ -56,18 +54,18 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { }, [RXE_TYPE_MR] = { .name = "rxe-mr", - .size = sizeof(struct rxe_mem), - .elem_offset = offsetof(struct rxe_mem, pelem), - .cleanup = rxe_mem_cleanup, + .size = sizeof(struct rxe_mr), + .elem_offset = offsetof(struct rxe_mr, pelem), + .cleanup = rxe_mr_cleanup, .flags = RXE_POOL_INDEX, .max_index = RXE_MAX_MR_INDEX, .min_index = RXE_MIN_MR_INDEX, }, [RXE_TYPE_MW] = { .name = "rxe-mw", - .size = sizeof(struct rxe_mem), - .elem_offset = offsetof(struct rxe_mem, pelem), - .flags = RXE_POOL_INDEX, + .size = sizeof(struct rxe_mw), + .elem_offset = offsetof(struct rxe_mw, pelem), + .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .max_index = RXE_MAX_MW_INDEX, .min_index = RXE_MIN_MW_INDEX, }, diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 889290793d75..3664cdae7e1f 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -464,7 +464,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, } else { err = copy_data(qp->pd, 0, &wqe->dma, payload_addr(pkt), paylen, - from_mem_obj, + from_mr_obj, &crc); if (err) return err; @@ -596,7 +596,7 @@ next_wqe: if (wqe->mask & WR_REG_MASK) { if (wqe->wr.opcode == IB_WR_LOCAL_INV) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); - struct rxe_mem *rmr; + struct rxe_mr *rmr; rmr = rxe_pool_get_index(&rxe->mr_pool, wqe->wr.ex.invalidate_rkey >> 8); @@ -607,14 +607,14 @@ next_wqe: wqe->status = IB_WC_MW_BIND_ERR; goto exit; } - rmr->state = RXE_MEM_STATE_FREE; + rmr->state = RXE_MR_STATE_FREE; rxe_drop_ref(rmr); wqe->state = wqe_state_done; wqe->status = IB_WC_SUCCESS; } else if (wqe->wr.opcode == IB_WR_REG_MR) { - struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr); + struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr); - rmr->state = RXE_MEM_STATE_VALID; + rmr->state = RXE_MR_STATE_VALID; rmr->access = wqe->wr.wr.reg.access; rmr->ibmr.lkey = wqe->wr.wr.reg.key; rmr->ibmr.rkey = wqe->wr.wr.reg.key; diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 142f3d8014d8..2b220659bddb 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -391,7 +391,7 @@ static enum resp_states check_length(struct rxe_qp *qp, static enum resp_states check_rkey(struct rxe_qp *qp, struct rxe_pkt_info *pkt) { - struct rxe_mem *mem = NULL; + struct rxe_mr *mr = NULL; u64 va; u32 rkey; u32 resid; @@ -430,18 +430,18 @@ static enum resp_states check_rkey(struct rxe_qp *qp, resid = qp->resp.resid; pktlen = payload_size(pkt); - mem = lookup_mem(qp->pd, access, rkey, lookup_remote); - if (!mem) { + mr = lookup_mr(qp->pd, access, rkey, lookup_remote); + if (!mr) { state = RESPST_ERR_RKEY_VIOLATION; goto err; } - if (unlikely(mem->state == RXE_MEM_STATE_FREE)) { + if (unlikely(mr->state == RXE_MR_STATE_FREE)) { state = RESPST_ERR_RKEY_VIOLATION; goto err; } - if (mem_check_range(mem, va, resid)) { + if (mr_check_range(mr, va, resid)) { state = RESPST_ERR_RKEY_VIOLATION; goto err; } @@ -469,12 +469,12 @@ static enum resp_states check_rkey(struct rxe_qp *qp, WARN_ON_ONCE(qp->resp.mr); - qp->resp.mr = mem; + qp->resp.mr = mr; return RESPST_EXECUTE; err: - if (mem) - rxe_drop_ref(mem); + if (mr) + rxe_drop_ref(mr); return state; } @@ -484,7 +484,7 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr, int err; err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma, - data_addr, data_len, to_mem_obj, NULL); + data_addr, data_len, to_mr_obj, NULL); if (unlikely(err)) return (err == -ENOSPC) ? RESPST_ERR_LENGTH : RESPST_ERR_MALFORMED_WQE; @@ -499,8 +499,8 @@ static enum resp_states write_data_in(struct rxe_qp *qp, int err; int data_len = payload_size(pkt); - err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), - data_len, to_mem_obj, NULL); + err = rxe_mr_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), data_len, + to_mr_obj, NULL); if (err) { rc = RESPST_ERR_RKEY_VIOLATION; goto out; @@ -522,9 +522,9 @@ static enum resp_states process_atomic(struct rxe_qp *qp, u64 iova = atmeth_va(pkt); u64 *vaddr; enum resp_states ret; - struct rxe_mem *mr = qp->resp.mr; + struct rxe_mr *mr = qp->resp.mr; - if (mr->state != RXE_MEM_STATE_VALID) { + if (mr->state != RXE_MR_STATE_VALID) { ret = RESPST_ERR_RKEY_VIOLATION; goto out; } @@ -700,8 +700,8 @@ static enum resp_states read_reply(struct rxe_qp *qp, if (!skb) return RESPST_ERR_RNR; - err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt), - payload, from_mem_obj, &icrc); + err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt), + payload, from_mr_obj, &icrc); if (err) pr_err("Failed copying memory\n"); @@ -816,8 +816,8 @@ static enum resp_states do_complete(struct rxe_qp *qp, struct rxe_recv_wqe *wqe = qp->resp.wqe; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); - if (unlikely(!wqe)) - return RESPST_CLEANUP; + if (!wqe) + goto finish; memset(&cqe, 0, sizeof(cqe)); @@ -883,7 +883,7 @@ static enum resp_states do_complete(struct rxe_qp *qp, } if (pkt->mask & RXE_IETH_MASK) { - struct rxe_mem *rmr; + struct rxe_mr *rmr; wc->wc_flags |= IB_WC_WITH_INVALIDATE; wc->ex.invalidate_rkey = ieth_rkey(pkt); @@ -895,7 +895,7 @@ static enum resp_states do_complete(struct rxe_qp *qp, wc->ex.invalidate_rkey); return RESPST_ERROR; } - rmr->state = RXE_MEM_STATE_FREE; + rmr->state = RXE_MR_STATE_FREE; rxe_drop_ref(rmr); } @@ -917,12 +917,12 @@ static enum resp_states do_complete(struct rxe_qp *qp, if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1)) return RESPST_ERR_CQ_OVERFLOW; - if (qp->resp.state == QP_STATE_ERROR) +finish: + if (unlikely(qp->resp.state == QP_STATE_ERROR)) return RESPST_CHK_RESOURCE; - - if (!pkt) + if (unlikely(!pkt)) return RESPST_DONE; - else if (qp_type(qp) == IB_QPT_RC) + if (qp_type(qp) == IB_QPT_RC) return RESPST_ACKNOWLEDGE; else return RESPST_CLEANUP; @@ -1056,10 +1056,8 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, if (pkt->mask & RXE_SEND_MASK || pkt->mask & RXE_WRITE_MASK) { /* SEND. Ack again and cleanup. C9-105. */ - if (bth_ack(pkt)) - send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn); - rc = RESPST_CLEANUP; - goto out; + send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn); + return RESPST_CLEANUP; } else if (pkt->mask & RXE_READ_MASK) { struct resp_res *res; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index dee5e0e919d2..aeb5e232c195 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -26,7 +26,7 @@ static int rxe_query_device(struct ib_device *dev, } static int rxe_query_port(struct ib_device *dev, - u8 port_num, struct ib_port_attr *attr) + u32 port_num, struct ib_port_attr *attr) { struct rxe_dev *rxe = to_rdev(dev); struct rxe_port *port; @@ -54,7 +54,7 @@ static int rxe_query_port(struct ib_device *dev, } static int rxe_query_pkey(struct ib_device *device, - u8 port_num, u16 index, u16 *pkey) + u32 port_num, u16 index, u16 *pkey) { if (index > 0) return -EINVAL; @@ -84,7 +84,7 @@ static int rxe_modify_device(struct ib_device *dev, } static int rxe_modify_port(struct ib_device *dev, - u8 port_num, int mask, struct ib_port_modify *attr) + u32 port_num, int mask, struct ib_port_modify *attr) { struct rxe_dev *rxe = to_rdev(dev); struct rxe_port *port; @@ -101,7 +101,7 @@ static int rxe_modify_port(struct ib_device *dev, } static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev, - u8 port_num) + u32 port_num) { return IB_LINK_LAYER_ETHERNET; } @@ -121,7 +121,7 @@ static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc) rxe_drop_ref(uc); } -static int rxe_port_immutable(struct ib_device *dev, u8 port_num, +static int rxe_port_immutable(struct ib_device *dev, u32 port_num, struct ib_port_immutable *immutable) { int err; @@ -865,7 +865,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) { struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); - struct rxe_mem *mr; + struct rxe_mr *mr; mr = rxe_alloc(&rxe->mr_pool); if (!mr) @@ -873,7 +873,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) rxe_add_index(mr); rxe_add_ref(pd); - rxe_mem_init_dma(pd, access, mr); + rxe_mr_init_dma(pd, access, mr); return &mr->ibmr; } @@ -887,7 +887,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, int err; struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); - struct rxe_mem *mr; + struct rxe_mr *mr; mr = rxe_alloc(&rxe->mr_pool); if (!mr) { @@ -899,8 +899,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, rxe_add_ref(pd); - err = rxe_mem_init_user(pd, start, length, iova, - access, udata, mr); + err = rxe_mr_init_user(pd, start, length, iova, access, udata, mr); if (err) goto err3; @@ -916,9 +915,9 @@ err2: static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { - struct rxe_mem *mr = to_rmr(ibmr); + struct rxe_mr *mr = to_rmr(ibmr); - mr->state = RXE_MEM_STATE_ZOMBIE; + mr->state = RXE_MR_STATE_ZOMBIE; rxe_drop_ref(mr_pd(mr)); rxe_drop_index(mr); rxe_drop_ref(mr); @@ -930,7 +929,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, { struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); - struct rxe_mem *mr; + struct rxe_mr *mr; int err; if (mr_type != IB_MR_TYPE_MEM_REG) @@ -946,7 +945,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, rxe_add_ref(pd); - err = rxe_mem_init_fast(pd, max_num_sg, mr); + err = rxe_mr_init_fast(pd, max_num_sg, mr); if (err) goto err2; @@ -962,7 +961,7 @@ err1: static int rxe_set_page(struct ib_mr *ibmr, u64 addr) { - struct rxe_mem *mr = to_rmr(ibmr); + struct rxe_mr *mr = to_rmr(ibmr); struct rxe_map *map; struct rxe_phys_buf *buf; @@ -982,7 +981,7 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr) static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { - struct rxe_mem *mr = to_rmr(ibmr); + struct rxe_mr *mr = to_rmr(ibmr); int n; mr->nbuf = 0; @@ -1110,6 +1109,7 @@ static const struct ib_device_ops rxe_dev_ops = { INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc), + INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw), }; int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 79e0a5a878da..11eba7a3ba8f 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -156,7 +156,7 @@ struct resp_res { struct sk_buff *skb; } atomic; struct { - struct rxe_mem *mr; + struct rxe_mr *mr; u64 va_org; u32 rkey; u32 length; @@ -183,7 +183,7 @@ struct rxe_resp_info { /* RDMA read / atomic only */ u64 va; - struct rxe_mem *mr; + struct rxe_mr *mr; u32 resid; u32 rkey; u32 length; @@ -262,18 +262,18 @@ struct rxe_qp { struct execute_work cleanup_work; }; -enum rxe_mem_state { - RXE_MEM_STATE_ZOMBIE, - RXE_MEM_STATE_INVALID, - RXE_MEM_STATE_FREE, - RXE_MEM_STATE_VALID, +enum rxe_mr_state { + RXE_MR_STATE_ZOMBIE, + RXE_MR_STATE_INVALID, + RXE_MR_STATE_FREE, + RXE_MR_STATE_VALID, }; -enum rxe_mem_type { - RXE_MEM_TYPE_NONE, - RXE_MEM_TYPE_DMA, - RXE_MEM_TYPE_MR, - RXE_MEM_TYPE_MW, +enum rxe_mr_type { + RXE_MR_TYPE_NONE, + RXE_MR_TYPE_DMA, + RXE_MR_TYPE_MR, + RXE_MR_TYPE_MW, }; #define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf)) @@ -287,17 +287,14 @@ struct rxe_map { struct rxe_phys_buf buf[RXE_BUF_PER_MAP]; }; -struct rxe_mem { +struct rxe_mr { struct rxe_pool_entry pelem; - union { - struct ib_mr ibmr; - struct ib_mw ibmw; - }; + struct ib_mr ibmr; struct ib_umem *umem; - enum rxe_mem_state state; - enum rxe_mem_type type; + enum rxe_mr_state state; + enum rxe_mr_type type; u64 va; u64 iova; size_t length; @@ -318,6 +315,17 @@ struct rxe_mem { struct rxe_map **map; }; +enum rxe_mw_state { + RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID, + RXE_MW_STATE_FREE = RXE_MR_STATE_FREE, + RXE_MW_STATE_VALID = RXE_MR_STATE_VALID, +}; + +struct rxe_mw { + struct ib_mw ibmw; + struct rxe_pool_entry pelem; +}; + struct rxe_mc_grp { struct rxe_pool_entry pelem; spinlock_t mcg_lock; /* guard group */ @@ -422,27 +430,27 @@ static inline struct rxe_cq *to_rcq(struct ib_cq *cq) return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL; } -static inline struct rxe_mem *to_rmr(struct ib_mr *mr) +static inline struct rxe_mr *to_rmr(struct ib_mr *mr) { - return mr ? container_of(mr, struct rxe_mem, ibmr) : NULL; + return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL; } -static inline struct rxe_mem *to_rmw(struct ib_mw *mw) +static inline struct rxe_mw *to_rmw(struct ib_mw *mw) { - return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL; + return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL; } -static inline struct rxe_pd *mr_pd(struct rxe_mem *mr) +static inline struct rxe_pd *mr_pd(struct rxe_mr *mr) { return to_rpd(mr->ibmr.pd); } -static inline u32 mr_lkey(struct rxe_mem *mr) +static inline u32 mr_lkey(struct rxe_mr *mr) { return mr->ibmr.lkey; } -static inline u32 mr_rkey(struct rxe_mem *mr) +static inline u32 mr_rkey(struct rxe_mr *mr) { return mr->ibmr.rkey; } diff --git a/drivers/infiniband/sw/siw/iwarp.h b/drivers/infiniband/sw/siw/iwarp.h index e8a04d9c89cb..3f1dedb50a0d 100644 --- a/drivers/infiniband/sw/siw/iwarp.h +++ b/drivers/infiniband/sw/siw/iwarp.h @@ -114,13 +114,6 @@ static inline u8 __ddp_get_version(struct iwarp_ctrl *ctrl) return be16_to_cpu(ctrl->ddp_rdmap_ctrl & DDP_MASK_VERSION) >> 8; } -static inline void __ddp_set_version(struct iwarp_ctrl *ctrl, u8 version) -{ - ctrl->ddp_rdmap_ctrl = - (ctrl->ddp_rdmap_ctrl & ~DDP_MASK_VERSION) | - (cpu_to_be16((u16)version << 8) & DDP_MASK_VERSION); -} - static inline u8 __rdmap_get_version(struct iwarp_ctrl *ctrl) { __be16 ver = ctrl->ddp_rdmap_ctrl & RDMAP_MASK_VERSION; @@ -128,12 +121,6 @@ static inline u8 __rdmap_get_version(struct iwarp_ctrl *ctrl) return be16_to_cpu(ver) >> 6; } -static inline void __rdmap_set_version(struct iwarp_ctrl *ctrl, u8 version) -{ - ctrl->ddp_rdmap_ctrl = (ctrl->ddp_rdmap_ctrl & ~RDMAP_MASK_VERSION) | - (cpu_to_be16(version << 6) & RDMAP_MASK_VERSION); -} - static inline u8 __rdmap_get_opcode(struct iwarp_ctrl *ctrl) { return be16_to_cpu(ctrl->ddp_rdmap_ctrl & RDMAP_MASK_OPCODE); diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index 1f9e15b71504..7a5ed86ffc9f 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -1300,7 +1300,7 @@ static void siw_cm_llp_state_change(struct sock *sk) } static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr, - struct sockaddr *raddr) + struct sockaddr *raddr, bool afonly) { int rv, flags = 0; size_t size = laddr->sa_family == AF_INET ? @@ -1311,6 +1311,12 @@ static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr, */ sock_set_reuseaddr(s->sk); + if (afonly) { + rv = ip6_sock_set_v6only(s->sk); + if (rv) + return rv; + } + rv = s->ops->bind(s, laddr, size); if (rv < 0) return rv; @@ -1371,7 +1377,7 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params) * mode. Might be reconsidered for async connection setup at * TCP level. */ - rv = kernel_bindconnect(s, laddr, raddr); + rv = kernel_bindconnect(s, laddr, raddr, id->afonly); if (rv != 0) { siw_dbg_qp(qp, "kernel_bindconnect: error %d\n", rv); goto error; @@ -1786,6 +1792,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) } else { struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr); + if (id->afonly) { + rv = ip6_sock_set_v6only(s->sk); + if (rv) { + siw_dbg(id->device, + "ip6_sock_set_v6only erro: %d\n", rv); + goto error; + } + } + /* For wildcard addr, limit binding to current device only */ if (ipv6_addr_any(&laddr->sin6_addr)) s->sk->sk_bound_dev_if = sdev->netdev->ifindex; diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c index 34a910cf0edb..61c17db70d65 100644 --- a/drivers/infiniband/sw/siw/siw_mem.c +++ b/drivers/infiniband/sw/siw/siw_mem.c @@ -106,8 +106,6 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj, mem->perms = rights & IWARP_ACCESS_MASK; kref_init(&mem->ref); - mr->mem = mem; - get_random_bytes(&next, 4); next &= 0x00ffffff; @@ -116,6 +114,8 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj, kfree(mem); return -ENOMEM; } + + mr->mem = mem; /* Set the STag index part */ mem->stag = id << 8; mr->base_mr.lkey = mr->base_mr.rkey = mem->stag; diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h index db138c8423da..f911287576d1 100644 --- a/drivers/infiniband/sw/siw/siw_mem.h +++ b/drivers/infiniband/sw/siw/siw_mem.h @@ -29,11 +29,6 @@ static inline void siw_mem_put(struct siw_mem *mem) kref_put(&mem->ref, siw_free_mem); } -static inline struct siw_mr *siw_mem2mr(struct siw_mem *m) -{ - return container_of(m, struct siw_mr, mem); -} - static inline void siw_unref_mem_sgl(struct siw_mem **mem, unsigned int num_sge) { while (num_sge) { diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index e389d44e5591..d2313efb26db 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -160,7 +160,7 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr, return 0; } -int siw_query_port(struct ib_device *base_dev, u8 port, +int siw_query_port(struct ib_device *base_dev, u32 port, struct ib_port_attr *attr) { struct siw_device *sdev = to_siw_dev(base_dev); @@ -194,7 +194,7 @@ int siw_query_port(struct ib_device *base_dev, u8 port, return rv; } -int siw_get_port_immutable(struct ib_device *base_dev, u8 port, +int siw_get_port_immutable(struct ib_device *base_dev, u32 port, struct ib_port_immutable *port_immutable) { struct ib_port_attr attr; @@ -209,7 +209,7 @@ int siw_get_port_immutable(struct ib_device *base_dev, u8 port, return 0; } -int siw_query_gid(struct ib_device *base_dev, u8 port, int idx, +int siw_query_gid(struct ib_device *base_dev, u32 port, int idx, union ib_gid *gid) { struct siw_device *sdev = to_siw_dev(base_dev); @@ -1848,7 +1848,7 @@ void siw_srq_event(struct siw_srq *srq, enum ib_event_type etype) } } -void siw_port_event(struct siw_device *sdev, u8 port, enum ib_event_type etype) +void siw_port_event(struct siw_device *sdev, u32 port, enum ib_event_type etype) { struct ib_event event; diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h index 637454529357..67ac08886a70 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.h +++ b/drivers/infiniband/sw/siw/siw_verbs.h @@ -36,17 +36,17 @@ static inline void siw_copy_sgl(struct ib_sge *sge, struct siw_sge *siw_sge, int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata); void siw_dealloc_ucontext(struct ib_ucontext *base_ctx); -int siw_query_port(struct ib_device *base_dev, u8 port, +int siw_query_port(struct ib_device *base_dev, u32 port, struct ib_port_attr *attr); -int siw_get_port_immutable(struct ib_device *base_dev, u8 port, +int siw_get_port_immutable(struct ib_device *base_dev, u32 port, struct ib_port_immutable *port_immutable); int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr, struct ib_udata *udata); int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); -int siw_query_port(struct ib_device *base_dev, u8 port, +int siw_query_port(struct ib_device *base_dev, u32 port, struct ib_port_attr *attr); -int siw_query_gid(struct ib_device *base_dev, u8 port, int idx, +int siw_query_gid(struct ib_device *base_dev, u32 port, int idx, union ib_gid *gid); int siw_alloc_pd(struct ib_pd *base_pd, struct ib_udata *udata); int siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata); @@ -86,6 +86,6 @@ void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry); void siw_qp_event(struct siw_qp *qp, enum ib_event_type type); void siw_cq_event(struct siw_cq *cq, enum ib_event_type type); void siw_srq_event(struct siw_srq *srq, enum ib_event_type type); -void siw_port_event(struct siw_device *dev, u8 port, enum ib_event_type type); +void siw_port_event(struct siw_device *dev, u32 port, enum ib_event_type type); #endif |