diff options
Diffstat (limited to 'drivers/infiniband/core')
32 files changed, 638 insertions, 379 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 5c9fac7cf420..3b0991fedd81 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -121,7 +121,7 @@ struct ib_gid_table { u32 default_gid_indices; }; -static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port) +static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port) { struct ib_event event; @@ -197,7 +197,7 @@ int ib_cache_gid_parse_type_str(const char *buf) } EXPORT_SYMBOL(ib_cache_gid_parse_type_str); -static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port) +static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port) { return device->port_data[port].cache.gid; } @@ -237,10 +237,10 @@ static void put_gid_ndev(struct rcu_head *head) static void free_gid_entry_locked(struct ib_gid_table_entry *entry) { struct ib_device *device = entry->attr.device; - u8 port_num = entry->attr.port_num; + u32 port_num = entry->attr.port_num; struct ib_gid_table *table = rdma_gid_table(device, port_num); - dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__, + dev_dbg(&device->dev, "%s port=%u index=%d gid %pI6\n", __func__, port_num, entry->attr.index, entry->attr.gid.raw); write_lock_irq(&table->rwlock); @@ -282,7 +282,7 @@ static void free_gid_work(struct work_struct *work) struct ib_gid_table_entry *entry = container_of(work, struct ib_gid_table_entry, del_work); struct ib_device *device = entry->attr.device; - u8 port_num = entry->attr.port_num; + u32 port_num = entry->attr.port_num; struct ib_gid_table *table = rdma_gid_table(device, port_num); mutex_lock(&table->lock); @@ -379,7 +379,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry) * @ix: GID entry index to delete * */ -static void del_gid(struct ib_device *ib_dev, u8 port, +static void del_gid(struct ib_device *ib_dev, u32 port, struct ib_gid_table *table, int ix) { struct roce_gid_ndev_storage *ndev_storage; @@ -387,7 +387,7 @@ static void del_gid(struct ib_device *ib_dev, u8 port, lockdep_assert_held(&table->lock); - dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port, + dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port, ix, table->data_vec[ix]->attr.gid.raw); write_lock_irq(&table->rwlock); @@ -543,7 +543,7 @@ static void make_default_gid(struct net_device *dev, union ib_gid *gid) addrconf_ifid_eui48(&gid->raw[8], dev); } -static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port, +static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr, unsigned long mask, bool default_gid) { @@ -587,7 +587,7 @@ out_unlock: return ret; } -int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, +int ib_cache_gid_add(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr) { unsigned long mask = GID_ATTR_FIND_MASK_GID | @@ -598,7 +598,7 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, } static int -_ib_cache_gid_del(struct ib_device *ib_dev, u8 port, +_ib_cache_gid_del(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr, unsigned long mask, bool default_gid) { @@ -627,7 +627,7 @@ out_unlock: return ret; } -int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, +int ib_cache_gid_del(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr) { unsigned long mask = GID_ATTR_FIND_MASK_GID | @@ -638,7 +638,7 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false); } -int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, +int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, struct net_device *ndev) { struct ib_gid_table *table; @@ -683,7 +683,7 @@ const struct ib_gid_attr * rdma_find_gid_by_port(struct ib_device *ib_dev, const union ib_gid *gid, enum ib_gid_type gid_type, - u8 port, struct net_device *ndev) + u32 port, struct net_device *ndev) { int local_index; struct ib_gid_table *table; @@ -734,7 +734,7 @@ EXPORT_SYMBOL(rdma_find_gid_by_port); * */ const struct ib_gid_attr *rdma_find_gid_by_filter( - struct ib_device *ib_dev, const union ib_gid *gid, u8 port, + struct ib_device *ib_dev, const union ib_gid *gid, u32 port, bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *, void *), void *context) @@ -818,7 +818,7 @@ static void release_gid_table(struct ib_device *device, kfree(table); } -static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port, +static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port, struct ib_gid_table *table) { int i; @@ -834,7 +834,7 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port, mutex_unlock(&table->lock); } -void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, +void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port, struct net_device *ndev, unsigned long gid_type_mask, enum ib_cache_gid_default_mode mode) @@ -867,7 +867,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, } } -static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port, +static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port, struct ib_gid_table *table) { unsigned int i; @@ -884,7 +884,7 @@ static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port, static void gid_table_release_one(struct ib_device *ib_dev) { - unsigned int p; + u32 p; rdma_for_each_port (ib_dev, p) { release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid); @@ -895,7 +895,7 @@ static void gid_table_release_one(struct ib_device *ib_dev) static int _gid_table_setup_one(struct ib_device *ib_dev) { struct ib_gid_table *table; - unsigned int rdma_port; + u32 rdma_port; rdma_for_each_port (ib_dev, rdma_port) { table = alloc_gid_table( @@ -915,7 +915,7 @@ rollback_table_setup: static void gid_table_cleanup_one(struct ib_device *ib_dev) { - unsigned int p; + u32 p; rdma_for_each_port (ib_dev, p) cleanup_gid_table_port(ib_dev, p, @@ -950,7 +950,7 @@ static int gid_table_setup_one(struct ib_device *ib_dev) * Returns 0 on success or appropriate error code. * */ -int rdma_query_gid(struct ib_device *device, u8 port_num, +int rdma_query_gid(struct ib_device *device, u32 port_num, int index, union ib_gid *gid) { struct ib_gid_table *table; @@ -1014,7 +1014,7 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device, unsigned long mask = GID_ATTR_FIND_MASK_GID | GID_ATTR_FIND_MASK_GID_TYPE; struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type}; - unsigned int p; + u32 p; if (ndev) mask |= GID_ATTR_FIND_MASK_NETDEV; @@ -1043,7 +1043,7 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device, EXPORT_SYMBOL(rdma_find_gid); int ib_get_cached_pkey(struct ib_device *device, - u8 port_num, + u32 port_num, int index, u16 *pkey) { @@ -1069,9 +1069,8 @@ int ib_get_cached_pkey(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_pkey); -int ib_get_cached_subnet_prefix(struct ib_device *device, - u8 port_num, - u64 *sn_pfx) +int ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num, + u64 *sn_pfx) { unsigned long flags; @@ -1086,10 +1085,8 @@ int ib_get_cached_subnet_prefix(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_subnet_prefix); -int ib_find_cached_pkey(struct ib_device *device, - u8 port_num, - u16 pkey, - u16 *index) +int ib_find_cached_pkey(struct ib_device *device, u32 port_num, + u16 pkey, u16 *index) { struct ib_pkey_cache *cache; unsigned long flags; @@ -1116,8 +1113,9 @@ int ib_find_cached_pkey(struct ib_device *device, *index = i; ret = 0; break; - } else + } else { partial_ix = i; + } } if (ret && partial_ix >= 0) { @@ -1132,10 +1130,8 @@ err: } EXPORT_SYMBOL(ib_find_cached_pkey); -int ib_find_exact_cached_pkey(struct ib_device *device, - u8 port_num, - u16 pkey, - u16 *index) +int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num, + u16 pkey, u16 *index) { struct ib_pkey_cache *cache; unsigned long flags; @@ -1169,9 +1165,7 @@ err: } EXPORT_SYMBOL(ib_find_exact_cached_pkey); -int ib_get_cached_lmc(struct ib_device *device, - u8 port_num, - u8 *lmc) +int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc) { unsigned long flags; int ret = 0; @@ -1187,8 +1181,7 @@ int ib_get_cached_lmc(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_lmc); -int ib_get_cached_port_state(struct ib_device *device, - u8 port_num, +int ib_get_cached_port_state(struct ib_device *device, u32 port_num, enum ib_port_state *port_state) { unsigned long flags; @@ -1222,7 +1215,7 @@ EXPORT_SYMBOL(ib_get_cached_port_state); * code. */ const struct ib_gid_attr * -rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index) +rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index) { const struct ib_gid_attr *attr = ERR_PTR(-ENODATA); struct ib_gid_table *table; @@ -1263,7 +1256,7 @@ ssize_t rdma_query_gid_table(struct ib_device *device, const struct ib_gid_attr *gid_attr; ssize_t num_entries = 0, ret; struct ib_gid_table *table; - unsigned int port_num, i; + u32 port_num, i; struct net_device *ndev; unsigned long flags; @@ -1361,7 +1354,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr) container_of(attr, struct ib_gid_table_entry, attr); struct ib_device *device = entry->attr.device; struct net_device *ndev = ERR_PTR(-EINVAL); - u8 port_num = entry->attr.port_num; + u32 port_num = entry->attr.port_num; struct ib_gid_table *table; unsigned long flags; bool valid; @@ -1441,7 +1434,7 @@ int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr, EXPORT_SYMBOL(rdma_read_gid_l2_fields); static int config_non_roce_gid_cache(struct ib_device *device, - u8 port, int gid_tbl_len) + u32 port, int gid_tbl_len) { struct ib_gid_attr gid_attr = {}; struct ib_gid_table *table; @@ -1472,7 +1465,7 @@ err: } static int -ib_cache_update(struct ib_device *device, u8 port, bool enforce_security) +ib_cache_update(struct ib_device *device, u32 port, bool enforce_security) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; @@ -1621,7 +1614,7 @@ EXPORT_SYMBOL(ib_dispatch_event); int ib_cache_setup_one(struct ib_device *device) { - unsigned int p; + u32 p; int err; rwlock_init(&device->cache_lock); @@ -1641,7 +1634,7 @@ int ib_cache_setup_one(struct ib_device *device) void ib_cache_release_one(struct ib_device *device) { - unsigned int p; + u32 p; /* * The release function frees all the cache elements. diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 3d194bb60840..0ead0d223154 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -202,7 +202,7 @@ static struct attribute *cm_counter_default_attrs[] = { struct cm_port { struct cm_device *cm_dev; struct ib_mad_agent *mad_agent; - u8 port_num; + u32 port_num; struct list_head cm_priv_prim_list; struct list_head cm_priv_altr_list; struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; @@ -255,7 +255,8 @@ struct cm_id_private { struct completion comp; refcount_t refcount; /* Number of clients sharing this ib_cm_id. Only valid for listeners. - * Protected by the cm.lock spinlock. */ + * Protected by the cm.lock spinlock. + */ int listen_sharecount; struct rcu_head rcu; @@ -420,8 +421,7 @@ static int cm_alloc_response_msg(struct cm_port *port, return 0; } -static void * cm_copy_private_data(const void *private_data, - u8 private_data_len) +static void *cm_copy_private_data(const void *private_data, u8 private_data_len) { void *data; @@ -680,8 +680,8 @@ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv, return cm_id_priv; } -static struct cm_id_private * cm_find_listen(struct ib_device *device, - __be64 service_id) +static struct cm_id_private *cm_find_listen(struct ib_device *device, + __be64 service_id) { struct rb_node *node = cm.listen_service_table.rb_node; struct cm_id_private *cm_id_priv; @@ -708,8 +708,8 @@ static struct cm_id_private * cm_find_listen(struct ib_device *device, return NULL; } -static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info - *timewait_info) +static struct cm_timewait_info * +cm_insert_remote_id(struct cm_timewait_info *timewait_info) { struct rb_node **link = &cm.remote_id_table.rb_node; struct rb_node *parent = NULL; @@ -767,8 +767,8 @@ static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid, return res; } -static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info - *timewait_info) +static struct cm_timewait_info * +cm_insert_remote_qpn(struct cm_timewait_info *timewait_info) { struct rb_node **link = &cm.remote_qp_table.rb_node; struct rb_node *parent = NULL; @@ -797,8 +797,8 @@ static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info return NULL; } -static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private - *cm_id_priv) +static struct cm_id_private * +cm_insert_remote_sidr(struct cm_id_private *cm_id_priv) { struct rb_node **link = &cm.remote_sidr_table.rb_node; struct rb_node *parent = NULL; @@ -897,7 +897,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, } EXPORT_SYMBOL(ib_create_cm_id); -static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) +static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv) { struct cm_work *work; @@ -986,7 +986,7 @@ static void cm_remove_remote(struct cm_id_private *cm_id_priv) } } -static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) +static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id) { struct cm_timewait_info *timewait_info; @@ -1631,7 +1631,7 @@ static bool cm_req_has_alt_path(struct cm_req_msg *req_msg) req_msg)))); } -static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num, +static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num, struct sa_path_rec *path, union ib_gid *gid) { if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num)) @@ -1750,7 +1750,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg, static u16 cm_get_bth_pkey(struct cm_work *work) { struct ib_device *ib_dev = work->port->cm_dev->ib_device; - u8 port_num = work->port->port_num; + u32 port_num = work->port->port_num; u16 pkey_index = work->mad_recv_wc->wc->pkey_index; u16 pkey; int ret; @@ -1778,7 +1778,7 @@ static void cm_opa_to_ib_sgid(struct cm_work *work, struct sa_path_rec *path) { struct ib_device *dev = work->port->cm_dev->ib_device; - u8 port_num = work->port->port_num; + u32 port_num = work->port->port_num; if (rdma_cap_opa_ah(dev, port_num) && (ib_is_opa_gid(&path->sgid))) { @@ -1977,8 +1977,8 @@ unlock: spin_unlock_irq(&cm_id_priv->lock); free: cm_free_msg(msg); } -static struct cm_id_private * cm_match_req(struct cm_work *work, - struct cm_id_private *cm_id_priv) +static struct cm_id_private *cm_match_req(struct cm_work *work, + struct cm_id_private *cm_id_priv) { struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; struct cm_timewait_info *timewait_info; @@ -2138,20 +2138,17 @@ static int cm_req_handler(struct cm_work *work) goto destroy; } - cm_process_routed_req(req_msg, work->mad_recv_wc->wc); - memset(&work->path[0], 0, sizeof(work->path[0])); if (cm_req_has_alt_path(req_msg)) memset(&work->path[1], 0, sizeof(work->path[1])); grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr); gid_attr = grh->sgid_attr; - if (gid_attr && - rdma_protocol_roce(work->port->cm_dev->ib_device, - work->port->port_num)) { + if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) { work->path[0].rec_type = sa_conv_gid_to_pathrec_type(gid_attr->gid_type); } else { + cm_process_routed_req(req_msg, work->mad_recv_wc->wc); cm_path_set_rec_type( work->port->cm_dev->ib_device, work->port->port_num, &work->path[0], @@ -2993,7 +2990,7 @@ static void cm_format_rej_event(struct cm_work *work) IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg); } -static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) +static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) { struct cm_id_private *cm_id_priv; __be32 remote_id; @@ -3098,7 +3095,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id, cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); - switch(cm_id_priv->id.state) { + switch (cm_id_priv->id.state) { case IB_CM_REQ_RCVD: cm_state = IB_CM_MRA_REQ_SENT; lap_state = cm_id->lap_state; @@ -3155,7 +3152,7 @@ error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); } EXPORT_SYMBOL(ib_send_cm_mra); -static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) +static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) { switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) { case CM_MSG_RESPONSE_REQ: @@ -3917,8 +3914,7 @@ static int cm_establish(struct ib_cm_id *cm_id) cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); - switch (cm_id->state) - { + switch (cm_id->state) { case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: cm_id->state = IB_CM_ESTABLISHED; @@ -4334,7 +4330,7 @@ static int cm_add_one(struct ib_device *ib_device) unsigned long flags; int ret; int count = 0; - unsigned int i; + u32 i; cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt), GFP_KERNEL); @@ -4432,7 +4428,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) .clr_port_cap_mask = IB_PORT_CM_SUP }; unsigned long flags; - unsigned int i; + u32 i; write_lock_irqsave(&cm.device_lock, flags); list_del(&cm_dev->list); diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h index 0cc40656b5c5..8462de7ca26e 100644 --- a/drivers/infiniband/core/cm_msgs.h +++ b/drivers/infiniband/core/cm_msgs.h @@ -22,7 +22,7 @@ static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg) { u8 transport_type = IBA_GET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg); - switch(transport_type) { + switch (transport_type) { case 0: return IB_QPT_RC; case 1: return IB_QPT_UC; case 3: @@ -37,7 +37,7 @@ static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg) static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg, enum ib_qp_type qp_type) { - switch(qp_type) { + switch (qp_type) { case IB_QPT_UC: IBA_SET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg, 1); break; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 94096511599f..2b9ffc21cbc4 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -43,7 +43,6 @@ MODULE_DESCRIPTION("Generic RDMA CM Agent"); MODULE_LICENSE("Dual BSD/GPL"); #define CMA_CM_RESPONSE_TIMEOUT 20 -#define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000 #define CMA_MAX_CM_RETRIES 15 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) #define CMA_IBOE_PACKET_LIFETIME 18 @@ -219,14 +218,6 @@ struct rdma_bind_list { unsigned short port; }; -struct class_port_info_context { - struct ib_class_port_info *class_port_info; - struct ib_device *device; - struct completion done; - struct ib_sa_query *sa_query; - u8 port_num; -}; - static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps, struct rdma_bind_list *bind_list, int snum) { @@ -287,7 +278,7 @@ struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, } int cma_get_default_gid_type(struct cma_device *cma_dev, - unsigned int port) + u32 port) { if (!rdma_is_port_valid(cma_dev->device, port)) return -EINVAL; @@ -296,7 +287,7 @@ int cma_get_default_gid_type(struct cma_device *cma_dev, } int cma_set_default_gid_type(struct cma_device *cma_dev, - unsigned int port, + u32 port, enum ib_gid_type default_gid_type) { unsigned long supported_gids; @@ -319,7 +310,7 @@ int cma_set_default_gid_type(struct cma_device *cma_dev, return 0; } -int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port) +int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port) { if (!rdma_is_port_valid(cma_dev->device, port)) return -EINVAL; @@ -327,7 +318,7 @@ int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port) return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)]; } -int cma_set_default_roce_tos(struct cma_device *cma_dev, unsigned int port, +int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port, u8 default_roce_tos) { if (!rdma_is_port_valid(cma_dev->device, port)) @@ -463,7 +454,6 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv, id_priv->id.route.addr.dev_addr.transport = rdma_node_get_transport(cma_dev->device->node_type); list_add_tail(&id_priv->list, &cma_dev->id_list); - rdma_restrack_add(&id_priv->res); trace_cm_id_attach(id_priv, cma_dev->device); } @@ -562,7 +552,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a } static const struct ib_gid_attr * -cma_validate_port(struct ib_device *device, u8 port, +cma_validate_port(struct ib_device *device, u32 port, enum ib_gid_type gid_type, union ib_gid *gid, struct rdma_id_private *id_priv) @@ -620,7 +610,7 @@ static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv) struct cma_device *cma_dev; enum ib_gid_type gid_type; int ret = -ENODEV; - unsigned int port; + u32 port; if (dev_addr->dev_type != ARPHRD_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) @@ -700,6 +690,7 @@ static int cma_ib_acquire_dev(struct rdma_id_private *id_priv, mutex_lock(&lock); cma_attach_to_dev(id_priv, listen_id_priv->cma_dev); mutex_unlock(&lock); + rdma_restrack_add(&id_priv->res); return 0; } @@ -711,8 +702,8 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev; enum ib_gid_type gid_type; int ret = -ENODEV; - unsigned int port; union ib_gid gid; + u32 port; if (dev_addr->dev_type != ARPHRD_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) @@ -754,8 +745,10 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, } out: - if (!ret) + if (!ret) { cma_attach_to_dev(id_priv, cma_dev); + rdma_restrack_add(&id_priv->res); + } mutex_unlock(&lock); return ret; @@ -816,6 +809,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) found: cma_attach_to_dev(id_priv, cma_dev); + rdma_restrack_add(&id_priv->res); mutex_unlock(&lock); addr = (struct sockaddr_ib *)cma_src_addr(id_priv); memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); @@ -852,6 +846,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, id_priv->id.qp_type = qp_type; id_priv->tos_set = false; id_priv->timeout_set = false; + id_priv->min_rnr_timer_set = false; id_priv->gid_type = IB_GID_TYPE_IB; spin_lock_init(&id_priv->lock); mutex_init(&id_priv->qp_mutex); @@ -1135,12 +1130,16 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, qp_attr_mask); qp_attr->port_num = id_priv->id.port_num; *qp_attr_mask |= IB_QP_PORT; - } else + } else { ret = -ENOSYS; + } if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set) qp_attr->timeout = id_priv->timeout; + if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set) + qp_attr->min_rnr_timer = id_priv->min_rnr_timer; + return ret; } EXPORT_SYMBOL(rdma_init_qp_attr); @@ -1581,7 +1580,7 @@ static bool cma_match_private_data(struct rdma_id_private *id_priv, static bool cma_protocol_roce(const struct rdma_cm_id *id) { struct ib_device *device = id->device; - const int port_num = id->port_num ?: rdma_start_port(device); + const u32 port_num = id->port_num ?: rdma_start_port(device); return rdma_protocol_roce(device, port_num); } @@ -2474,6 +2473,7 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) id->tos = id_priv->tos; id->tos_set = id_priv->tos_set; + id->afonly = id_priv->afonly; id_priv->cm_id.iw = id; memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), @@ -2529,6 +2529,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv, rdma_addr_size(cma_src_addr(id_priv))); _cma_attach_to_dev(dev_id_priv, cma_dev); + rdma_restrack_add(&dev_id_priv->res); cma_id_get(id_priv); dev_id_priv->internal_id = 1; dev_id_priv->afonly = id_priv->afonly; @@ -2615,6 +2616,43 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout) } EXPORT_SYMBOL(rdma_set_ack_timeout); +/** + * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the + * QP associated with a connection identifier. + * @id: Communication identifier to associated with service type. + * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK + * Timer Field" in the IBTA specification. + * + * This function should be called before rdma_connect() on active + * side, and on passive side before rdma_accept(). The timer value + * will be associated with the local QP. When it receives a send it is + * not read to handle, typically if the receive queue is empty, an RNR + * Retry NAK is returned to the requester with the min_rnr_timer + * encoded. The requester will then wait at least the time specified + * in the NAK before retrying. The default is zero, which translates + * to a minimum RNR Timer value of 655 ms. + * + * Return: 0 for success + */ +int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer) +{ + struct rdma_id_private *id_priv; + + /* It is a five-bit value */ + if (min_rnr_timer & 0xe0) + return -EINVAL; + + if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT)) + return -EINVAL; + + id_priv = container_of(id, struct rdma_id_private, id); + id_priv->min_rnr_timer = min_rnr_timer; + id_priv->min_rnr_timer_set = true; + + return 0; +} +EXPORT_SYMBOL(rdma_set_min_rnr_timer); + static void cma_query_handler(int status, struct sa_path_rec *path_rec, void *context) { @@ -3169,6 +3207,7 @@ port_found: ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); id_priv->id.port_num = p; cma_attach_to_dev(id_priv, cma_dev); + rdma_restrack_add(&id_priv->res); cma_set_loopback(cma_src_addr(id_priv)); out: mutex_unlock(&lock); @@ -3201,6 +3240,7 @@ static void addr_handler(int status, struct sockaddr *src_addr, if (status) pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n", status); + rdma_restrack_add(&id_priv->res); } else if (status) { pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status); } @@ -3812,6 +3852,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) if (ret) goto err2; + if (!cma_any_addr(addr)) + rdma_restrack_add(&id_priv->res); return 0; err2: if (id_priv->cma_dev) @@ -4124,10 +4166,11 @@ int rdma_connect_locked(struct rdma_cm_id *id, ret = cma_resolve_ib_udp(id_priv, conn_param); else ret = cma_connect_ib(id_priv, conn_param); - } else if (rdma_cap_iw_cm(id->device, id->port_num)) + } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = cma_connect_iw(id_priv, conn_param); - else + } else { ret = -ENOSYS; + } if (ret) goto err_state; return 0; @@ -4234,9 +4277,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv, iw_param.ird = conn_param->responder_resources; iw_param.private_data = conn_param->private_data; iw_param.private_data_len = conn_param->private_data_len; - if (id_priv->id.qp) { + if (id_priv->id.qp) iw_param.qpn = id_priv->qp_num; - } else + else iw_param.qpn = conn_param->qp_num; return iw_cm_accept(id_priv->cm_id.iw, &iw_param); @@ -4319,11 +4362,11 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) else ret = cma_rep_recv(id_priv); } - } else if (rdma_cap_iw_cm(id->device, id->port_num)) + } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = cma_accept_iw(id_priv, conn_param); - else + } else { ret = -ENOSYS; - + } if (ret) goto reject; @@ -4409,8 +4452,9 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = iw_cm_reject(id_priv->cm_id.iw, private_data, private_data_len); - } else + } else { ret = -ENOSYS; + } return ret; } @@ -4864,14 +4908,28 @@ static void cma_process_remove(struct cma_device *cma_dev) wait_for_completion(&cma_dev->comp); } +static bool cma_supported(struct ib_device *device) +{ + u32 i; + + rdma_for_each_port(device, i) { + if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i)) + return true; + } + return false; +} + static int cma_add_one(struct ib_device *device) { struct rdma_id_private *to_destroy; struct cma_device *cma_dev; struct rdma_id_private *id_priv; - unsigned int i; unsigned long supported_gids = 0; int ret; + u32 i; + + if (!cma_supported(device)) + return -EOPNOTSUPP; cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL); if (!cma_dev) diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c index e0d5e3bae458..9ac16e0db761 100644 --- a/drivers/infiniband/core/cma_configfs.c +++ b/drivers/infiniband/core/cma_configfs.c @@ -43,7 +43,7 @@ struct cma_device; struct cma_dev_group; struct cma_dev_port_group { - unsigned int port_num; + u32 port_num; struct cma_dev_group *cma_dev_group; struct config_group group; }; @@ -200,10 +200,10 @@ static const struct config_item_type cma_port_group_type = { static int make_cma_ports(struct cma_dev_group *cma_dev_group, struct cma_device *cma_dev) { - struct ib_device *ibdev; - unsigned int i; - unsigned int ports_num; struct cma_dev_port_group *ports; + struct ib_device *ibdev; + u32 ports_num; + u32 i; ibdev = cma_get_ib_dev(cma_dev); diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h index caece96ebcf5..5c463da99845 100644 --- a/drivers/infiniband/core/cma_priv.h +++ b/drivers/infiniband/core/cma_priv.h @@ -86,9 +86,11 @@ struct rdma_id_private { u8 tos; u8 tos_set:1; u8 timeout_set:1; + u8 min_rnr_timer_set:1; u8 reuseaddr; u8 afonly; u8 timeout; + u8 min_rnr_timer; enum ib_gid_type gid_type; /* @@ -117,11 +119,11 @@ void cma_dev_put(struct cma_device *dev); typedef bool (*cma_device_filter)(struct ib_device *, void *); struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, void *cookie); -int cma_get_default_gid_type(struct cma_device *dev, unsigned int port); -int cma_set_default_gid_type(struct cma_device *dev, unsigned int port, +int cma_get_default_gid_type(struct cma_device *dev, u32 port); +int cma_set_default_gid_type(struct cma_device *dev, u32 port, enum ib_gid_type default_gid_type); -int cma_get_default_roce_tos(struct cma_device *dev, unsigned int port); -int cma_set_default_roce_tos(struct cma_device *dev, unsigned int port, +int cma_get_default_roce_tos(struct cma_device *dev, u32 port); +int cma_set_default_roce_tos(struct cma_device *dev, u32 port, u8 default_roce_tos); struct ib_device *cma_get_ib_dev(struct cma_device *dev); diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 315f7a297eee..29809dd30041 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -83,14 +83,14 @@ void ib_device_unregister_sysfs(struct ib_device *device); int ib_device_rename(struct ib_device *ibdev, const char *name); int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim); -typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port, +typedef void (*roce_netdev_callback)(struct ib_device *device, u32 port, struct net_device *idev, void *cookie); -typedef bool (*roce_netdev_filter)(struct ib_device *device, u8 port, +typedef bool (*roce_netdev_filter)(struct ib_device *device, u32 port, struct net_device *idev, void *cookie); struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, - unsigned int port); + u32 port); void ib_enum_roce_netdev(struct ib_device *ib_dev, roce_netdev_filter filter, @@ -113,7 +113,7 @@ int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, struct ib_client_nl_info { struct sk_buff *nl_msg; struct device *cdev; - unsigned int port; + u32 port; u64 abi; }; int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name, @@ -128,24 +128,24 @@ int ib_cache_gid_parse_type_str(const char *buf); const char *ib_cache_gid_type_str(enum ib_gid_type gid_type); -void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, +void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port, struct net_device *ndev, unsigned long gid_type_mask, enum ib_cache_gid_default_mode mode); -int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, +int ib_cache_gid_add(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr); -int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, +int ib_cache_gid_del(struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *attr); -int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, +int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, struct net_device *ndev); int roce_gid_mgmt_init(void); void roce_gid_mgmt_cleanup(void); -unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port); +unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port); int ib_cache_setup_one(struct ib_device *device); void ib_cache_cleanup_one(struct ib_device *device); @@ -215,14 +215,14 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb, struct netlink_ext_ack *extack); int ib_get_cached_subnet_prefix(struct ib_device *device, - u8 port_num, - u64 *sn_pfx); + u32 port_num, + u64 *sn_pfx); #ifdef CONFIG_SECURITY_INFINIBAND void ib_security_release_port_pkey_list(struct ib_device *device); void ib_security_cache_change(struct ib_device *device, - u8 port_num, + u32 port_num, u64 subnet_prefix); int ib_security_modify_qp(struct ib_qp *qp, @@ -247,7 +247,7 @@ static inline void ib_security_release_port_pkey_list(struct ib_device *device) } static inline void ib_security_cache_change(struct ib_device *device, - u8 port_num, + u32 port_num, u64 subnet_prefix) { } @@ -381,7 +381,7 @@ int ib_setup_port_attrs(struct ib_core_device *coredev); int rdma_compatdev_set(u8 enable); -int ib_port_register_module_stat(struct ib_device *device, u8 port_num, +int ib_port_register_module_stat(struct ib_device *device, u32 port_num, struct kobject *kobj, struct kobj_type *ktype, const char *name); void ib_port_unregister_module_stat(struct kobject *kobj); diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index f3a7c1f404af..15493357cfef 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -14,10 +14,12 @@ static int __counter_set_mode(struct rdma_port_counter *port_counter, enum rdma_nl_counter_mode new_mode, enum rdma_nl_counter_mask new_mask) { - if (new_mode == RDMA_COUNTER_MODE_AUTO && port_counter->num_counters) - if (new_mask & ~ALL_AUTO_MODE_MASKS || - port_counter->mode.mode != RDMA_COUNTER_MODE_NONE) + if (new_mode == RDMA_COUNTER_MODE_AUTO) { + if (new_mask & (~ALL_AUTO_MODE_MASKS)) return -EINVAL; + if (port_counter->num_counters) + return -EBUSY; + } port_counter->mode.mode = new_mode; port_counter->mode.mask = new_mask; @@ -32,14 +34,17 @@ static int __counter_set_mode(struct rdma_port_counter *port_counter, * @mask: Mask to configure * @extack: Message to the user * - * Return 0 on success. + * Return 0 on success. If counter mode wasn't changed then it is considered + * as success as well. + * Return -EBUSY when changing to auto mode while there are bounded counters. + * */ -int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port, +int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port, enum rdma_nl_counter_mask mask, struct netlink_ext_ack *extack) { - enum rdma_nl_counter_mode mode = RDMA_COUNTER_MODE_AUTO; struct rdma_port_counter *port_counter; + enum rdma_nl_counter_mode mode; int ret; port_counter = &dev->port_data[port].port_counter; @@ -47,25 +52,26 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port, return -EOPNOTSUPP; mutex_lock(&port_counter->lock); - if (mask) { - ret = __counter_set_mode(port_counter, mode, mask); - if (ret) - NL_SET_ERR_MSG( - extack, - "Turning on auto mode is not allowed when there is bound QP"); + if (mask) + mode = RDMA_COUNTER_MODE_AUTO; + else + mode = (port_counter->num_counters) ? RDMA_COUNTER_MODE_MANUAL : + RDMA_COUNTER_MODE_NONE; + + if (port_counter->mode.mode == mode && + port_counter->mode.mask == mask) { + ret = 0; goto out; } - if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO) { - ret = -EINVAL; - goto out; - } + ret = __counter_set_mode(port_counter, mode, mask); - mode = (port_counter->num_counters) ? RDMA_COUNTER_MODE_MANUAL : - RDMA_COUNTER_MODE_NONE; - ret = __counter_set_mode(port_counter, mode, 0); out: mutex_unlock(&port_counter->lock); + if (ret == -EBUSY) + NL_SET_ERR_MSG( + extack, + "Modifying auto mode is not allowed when there is a bound QP"); return ret; } @@ -100,7 +106,7 @@ static int __rdma_counter_bind_qp(struct rdma_counter *counter, return ret; } -static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u8 port, +static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port, struct ib_qp *qp, enum rdma_nl_counter_mode mode) { @@ -238,7 +244,7 @@ static void counter_history_stat_update(struct rdma_counter *counter) * Return: The counter (with ref-count increased) if found */ static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp, - u8 port) + u32 port) { struct rdma_port_counter *port_counter; struct rdma_counter *counter = NULL; @@ -282,7 +288,7 @@ static void counter_release(struct kref *kref) * rdma_counter_bind_qp_auto - Check and bind the QP to a counter base on * the auto-mode rule */ -int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port) +int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port) { struct rdma_port_counter *port_counter; struct ib_device *dev = qp->device; @@ -352,7 +358,7 @@ int rdma_counter_query_stats(struct rdma_counter *counter) } static u64 get_running_counters_hwstat_sum(struct ib_device *dev, - u8 port, u32 index) + u32 port, u32 index) { struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; @@ -388,7 +394,7 @@ next: * rdma_counter_get_hwstat_value() - Get the sum value of all counters on a * specific port, including the running ones and history data */ -u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index) +u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index) { struct rdma_port_counter *port_counter; u64 sum; @@ -443,7 +449,7 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev, /* * rdma_counter_bind_qpn() - Bind QP @qp_num to counter @counter_id */ -int rdma_counter_bind_qpn(struct ib_device *dev, u8 port, +int rdma_counter_bind_qpn(struct ib_device *dev, u32 port, u32 qp_num, u32 counter_id) { struct rdma_port_counter *port_counter; @@ -493,7 +499,7 @@ err: * rdma_counter_bind_qpn_alloc() - Alloc a counter and bind QP @qp_num to it * The id of new counter is returned in @counter_id */ -int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port, +int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port, u32 qp_num, u32 *counter_id) { struct rdma_port_counter *port_counter; @@ -540,7 +546,7 @@ err: /* * rdma_counter_unbind_qpn() - Unbind QP @qp_num from a counter */ -int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port, +int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port, u32 qp_num, u32 counter_id) { struct rdma_port_counter *port_counter; @@ -573,7 +579,7 @@ out: return ret; } -int rdma_counter_get_mode(struct ib_device *dev, u8 port, +int rdma_counter_get_mode(struct ib_device *dev, u32 port, enum rdma_nl_counter_mode *mode, enum rdma_nl_counter_mask *mask) { diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index aac0fe14e1d9..c660cef66ac6 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -779,7 +779,7 @@ static void remove_client_context(struct ib_device *device, static int alloc_port_data(struct ib_device *device) { struct ib_port_data_rcu *pdata_rcu; - unsigned int port; + u32 port; if (device->port_data) return 0; @@ -788,6 +788,10 @@ static int alloc_port_data(struct ib_device *device) if (WARN_ON(!device->phys_port_cnt)) return -EINVAL; + /* Reserve U32_MAX so the logic to go over all the ports is sane */ + if (WARN_ON(device->phys_port_cnt == U32_MAX)) + return -EINVAL; + /* * device->port_data is indexed directly by the port number to make * access to this data as efficient as possible. @@ -819,7 +823,7 @@ static int alloc_port_data(struct ib_device *device) return 0; } -static int verify_immutable(const struct ib_device *dev, u8 port) +static int verify_immutable(const struct ib_device *dev, u32 port) { return WARN_ON(!rdma_cap_ib_mad(dev, port) && rdma_max_mad_size(dev, port) != 0); @@ -827,7 +831,7 @@ static int verify_immutable(const struct ib_device *dev, u8 port) static int setup_port_data(struct ib_device *device) { - unsigned int port; + u32 port; int ret; ret = alloc_port_data(device); @@ -2005,7 +2009,7 @@ void ib_dispatch_event_clients(struct ib_event *event) } static int iw_query_port(struct ib_device *device, - u8 port_num, + u32 port_num, struct ib_port_attr *port_attr) { struct in_device *inetdev; @@ -2044,7 +2048,7 @@ static int iw_query_port(struct ib_device *device, } static int __ib_query_port(struct ib_device *device, - u8 port_num, + u32 port_num, struct ib_port_attr *port_attr) { union ib_gid gid = {}; @@ -2078,7 +2082,7 @@ static int __ib_query_port(struct ib_device *device, * @port_attr pointer. */ int ib_query_port(struct ib_device *device, - u8 port_num, + u32 port_num, struct ib_port_attr *port_attr) { if (!rdma_is_port_valid(device, port_num)) @@ -2130,7 +2134,7 @@ static void add_ndev_hash(struct ib_port_data *pdata) * NETDEV_UNREGISTER event. */ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, - unsigned int port) + u32 port) { struct net_device *old_ndev; struct ib_port_data *pdata; @@ -2173,7 +2177,7 @@ EXPORT_SYMBOL(ib_device_set_netdev); static void free_netdevs(struct ib_device *ib_dev) { unsigned long flags; - unsigned int port; + u32 port; if (!ib_dev->port_data) return; @@ -2204,7 +2208,7 @@ static void free_netdevs(struct ib_device *ib_dev) } struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, - unsigned int port) + u32 port) { struct ib_port_data *pdata; struct net_device *res; @@ -2291,7 +2295,7 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev, roce_netdev_callback cb, void *cookie) { - unsigned int port; + u32 port; rdma_for_each_port (ib_dev, port) if (rdma_protocol_roce(ib_dev, port)) { @@ -2369,7 +2373,7 @@ int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, * ib_query_pkey() fetches the specified P_Key table entry. */ int ib_query_pkey(struct ib_device *device, - u8 port_num, u16 index, u16 *pkey) + u32 port_num, u16 index, u16 *pkey) { if (!rdma_is_port_valid(device, port_num)) return -EINVAL; @@ -2414,7 +2418,7 @@ EXPORT_SYMBOL(ib_modify_device); * @port_modify_mask and @port_modify structure. */ int ib_modify_port(struct ib_device *device, - u8 port_num, int port_modify_mask, + u32 port_num, int port_modify_mask, struct ib_port_modify *port_modify) { int rc; @@ -2446,10 +2450,10 @@ EXPORT_SYMBOL(ib_modify_port); * parameter may be NULL. */ int ib_find_gid(struct ib_device *device, union ib_gid *gid, - u8 *port_num, u16 *index) + u32 *port_num, u16 *index) { union ib_gid tmp_gid; - unsigned int port; + u32 port; int ret, i; rdma_for_each_port (device, port) { @@ -2483,7 +2487,7 @@ EXPORT_SYMBOL(ib_find_gid); * @index: The index into the PKey table where the PKey was found. */ int ib_find_pkey(struct ib_device *device, - u8 port_num, u16 pkey, u16 *index) + u32 port_num, u16 pkey, u16 *index) { int ret, i; u16 tmp_pkey; @@ -2526,7 +2530,7 @@ EXPORT_SYMBOL(ib_find_pkey); * */ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, - u8 port, + u32 port, u16 pkey, const union ib_gid *gid, const struct sockaddr *addr) @@ -2696,7 +2700,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, reg_dm_mr); SET_DEVICE_OP(dev_ops, reg_user_mr); SET_DEVICE_OP(dev_ops, reg_user_mr_dmabuf); - SET_DEVICE_OP(dev_ops, req_ncomp_notif); SET_DEVICE_OP(dev_ops, req_notify_cq); SET_DEVICE_OP(dev_ops, rereg_user_mr); SET_DEVICE_OP(dev_ops, resize_cq); diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index 30a0ff76b332..932b26f50d03 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c @@ -528,7 +528,8 @@ add_mapping_response_exit: } /* netlink attribute policy for the response to add and query mapping request - * and response with remote address info */ + * and response with remote address info + */ static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = { [IWPM_NLA_RQUERY_MAPPING_SEQ] = { .type = NLA_U32 }, [IWPM_NLA_RQUERY_LOCAL_ADDR] = { diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 9355e521d9f4..2081e4854fb0 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -61,7 +61,7 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, { u16 pkey; struct ib_device *dev = qp_info->port_priv->device; - u8 pnum = qp_info->port_priv->port_num; + u32 pnum = qp_info->port_priv->port_num; struct ib_ud_wr *wr = &mad_send_wr->send_wr; struct rdma_ah_attr attr = {}; @@ -118,7 +118,7 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc); * Assumes ib_mad_port_list_lock is being held */ static inline struct ib_mad_port_private * -__ib_get_mad_port(struct ib_device *device, int port_num) +__ib_get_mad_port(struct ib_device *device, u32 port_num) { struct ib_mad_port_private *entry; @@ -134,7 +134,7 @@ __ib_get_mad_port(struct ib_device *device, int port_num) * for a device/port */ static inline struct ib_mad_port_private * -ib_get_mad_port(struct ib_device *device, int port_num) +ib_get_mad_port(struct ib_device *device, u32 port_num) { struct ib_mad_port_private *entry; unsigned long flags; @@ -155,8 +155,7 @@ static inline u8 convert_mgmt_class(u8 mgmt_class) static int get_spl_qp_index(enum ib_qp_type qp_type) { - switch (qp_type) - { + switch (qp_type) { case IB_QPT_SMI: return 0; case IB_QPT_GSI: @@ -222,7 +221,7 @@ EXPORT_SYMBOL(ib_response_mad); * Context: Process context. */ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, - u8 port_num, + u32 port_num, enum ib_qp_type qp_type, struct ib_mad_reg_req *mad_reg_req, u8 rmpp_version, @@ -549,7 +548,7 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list) } static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, - u16 pkey_index, u8 port_num, struct ib_wc *wc) + u16 pkey_index, u32 port_num, struct ib_wc *wc) { memset(wc, 0, sizeof *wc); wc->wr_cqe = cqe; @@ -608,7 +607,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_port_private *port_priv; struct ib_mad_agent_private *recv_mad_agent = NULL; struct ib_device *device = mad_agent_priv->agent.device; - u8 port_num; + u32 port_num; struct ib_wc mad_wc; struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); @@ -707,8 +706,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, (const struct ib_mad *)smp, (struct ib_mad *)mad_priv->mad, &mad_size, &out_mad_pkey_index); - switch (ret) - { + switch (ret) { case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && mad_agent_priv->agent.recv_handler) { @@ -807,7 +805,7 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, /* Allocate data segments. */ for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { - seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); + seg = kmalloc(sizeof(*seg) + seg_size, gfp_mask); if (!seg) { free_send_rmpp_list(send_wr); return -ENOMEM; @@ -837,12 +835,11 @@ int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) } EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); -struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, - u32 remote_qpn, u16 pkey_index, - int rmpp_active, - int hdr_len, int data_len, - gfp_t gfp_mask, - u8 base_version) +struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent, + u32 remote_qpn, u16 pkey_index, + int rmpp_active, int hdr_len, + int data_len, gfp_t gfp_mask, + u8 base_version) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; @@ -1275,11 +1272,9 @@ static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, int i; /* Remove any methods for this mad agent */ - for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { - if (method->agent[i] == agent) { + for (i = 0; i < IB_MGMT_MAX_METHODS; i++) + if (method->agent[i] == agent) method->agent[i] = NULL; - } - } } static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, @@ -1454,9 +1449,8 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) * Was MAD registration request supplied * with original registration ? */ - if (!agent_priv->reg_req) { + if (!agent_priv->reg_req) goto out; - } port_priv = agent_priv->qp_info->port_priv; mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); @@ -1613,7 +1607,7 @@ out: if (mad_agent && !mad_agent->agent.recv_handler) { dev_notice(&port_priv->device->dev, - "No receive handler for client %p on port %d\n", + "No receive handler for client %p on port %u\n", &mad_agent->agent, port_priv->port_num); deref_mad_agent(mad_agent); mad_agent = NULL; @@ -1677,15 +1671,16 @@ static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, rwc->recv_buf.mad->mad_hdr.mgmt_class; } -static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, - const struct ib_mad_send_wr_private *wr, - const struct ib_mad_recv_wc *rwc ) +static inline int +rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, + const struct ib_mad_send_wr_private *wr, + const struct ib_mad_recv_wc *rwc) { struct rdma_ah_attr attr; u8 send_resp, rcv_resp; union ib_gid sgid; struct ib_device *device = mad_agent_priv->agent.device; - u8 port_num = mad_agent_priv->agent.port_num; + u32 port_num = mad_agent_priv->agent.port_num; u8 lmc; bool has_grh; @@ -1834,7 +1829,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, deref_mad_agent(mad_agent_priv); } else { /* not user rmpp, revert to normal behavior and - * drop the mad */ + * drop the mad + */ ib_free_recv_mad(mad_recv_wc); deref_mad_agent(mad_agent_priv); return; @@ -1860,14 +1856,12 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, mad_recv_wc); deref_mad_agent(mad_agent_priv); } - - return; } static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, const struct ib_mad_qp_info *qp_info, const struct ib_wc *wc, - int port_num, + u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response) { @@ -1954,7 +1948,7 @@ static enum smi_action handle_opa_smi(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info, struct ib_wc *wc, - int port_num, + u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response) { @@ -2010,7 +2004,7 @@ static enum smi_action handle_smi(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info, struct ib_wc *wc, - int port_num, + u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response, bool opa) @@ -2034,7 +2028,7 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) struct ib_mad_private_header *mad_priv_hdr; struct ib_mad_private *recv, *response = NULL; struct ib_mad_agent_private *mad_agent; - int port_num; + u32 port_num; int ret = IB_MAD_RESULT_SUCCESS; size_t mad_size; u16 resp_mad_pkey_index = 0; @@ -2202,9 +2196,10 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) temp_mad_send_wr->timeout)) break; } - } - else + } else { list_item = &mad_agent_priv->wait_list; + } + list_add(&mad_send_wr->agent_list, list_item); /* Reschedule a work item if we have a shorter timeout */ @@ -2258,7 +2253,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, adjust_timeout(mad_agent_priv); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); - if (mad_send_wr->status != IB_WC_SUCCESS ) + if (mad_send_wr->status != IB_WC_SUCCESS) mad_send_wc->status = mad_send_wr->status; if (ret == IB_RMPP_RESULT_INTERNAL) ib_rmpp_send_handler(mad_send_wc); @@ -2947,7 +2942,7 @@ static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) * Create the QP, PD, MR, and CQ if needed */ static int ib_mad_port_open(struct ib_device *device, - int port_num) + u32 port_num) { int ret, cq_size; struct ib_mad_port_private *port_priv; @@ -3002,7 +2997,7 @@ static int ib_mad_port_open(struct ib_device *device, if (ret) goto error7; - snprintf(name, sizeof name, "ib_mad%d", port_num); + snprintf(name, sizeof(name), "ib_mad%u", port_num); port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); if (!port_priv->wq) { ret = -ENOMEM; @@ -3048,7 +3043,7 @@ error3: * If there are no classes using the port, free the port * resources (CQ, MR, PD, QP) and remove the port's info structure */ -static int ib_mad_port_close(struct ib_device *device, int port_num) +static int ib_mad_port_close(struct ib_device *device, u32 port_num) { struct ib_mad_port_private *port_priv; unsigned long flags; @@ -3057,7 +3052,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) port_priv = __ib_get_mad_port(device, port_num); if (port_priv == NULL) { spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); - dev_err(&device->dev, "Port %d not found\n", port_num); + dev_err(&device->dev, "Port %u not found\n", port_num); return -ENODEV; } list_del_init(&port_priv->port_list); diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index e0573e4d0404..8af0619a39cd 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c @@ -382,8 +382,8 @@ static inline int get_seg_num(struct ib_mad_recv_buf *seg) return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); } -static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list, - struct ib_mad_recv_buf *seg) +static inline struct ib_mad_recv_buf *get_next_seg(struct list_head *rmpp_list, + struct ib_mad_recv_buf *seg) { if (seg->list.next == rmpp_list) return NULL; @@ -396,8 +396,8 @@ static inline int window_size(struct ib_mad_agent_private *agent) return max(agent->qp_info->recv_queue.max_active >> 3, 1); } -static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list, - int seg_num) +static struct ib_mad_recv_buf *find_seg_location(struct list_head *rmpp_list, + int seg_num) { struct ib_mad_recv_buf *seg_buf; int cur_seg_num; @@ -449,7 +449,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv) return hdr_size + rmpp_recv->seg_num * data_size - pad; } -static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv) +static struct ib_mad_recv_wc *complete_rmpp(struct mad_rmpp_recv *rmpp_recv) { struct ib_mad_recv_wc *rmpp_wc; diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index 57519ca6cd2c..a5dd4b7a74bc 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -63,7 +63,7 @@ struct mcast_port { struct rb_root table; atomic_t refcount; struct completion comp; - u8 port_num; + u32 port_num; }; struct mcast_device { @@ -605,7 +605,7 @@ found: */ struct ib_sa_multicast * ib_sa_join_multicast(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, + struct ib_device *device, u32 port_num, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, gfp_t gfp_mask, int (*callback)(int status, @@ -690,7 +690,7 @@ void ib_sa_free_multicast(struct ib_sa_multicast *multicast) } EXPORT_SYMBOL(ib_sa_free_multicast); -int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num, +int ib_sa_get_mcmember_rec(struct ib_device *device, u32 port_num, union ib_gid *mgid, struct ib_sa_mcmember_rec *rec) { struct mcast_device *dev; @@ -732,7 +732,7 @@ EXPORT_SYMBOL(ib_sa_get_mcmember_rec); * success or appropriate error code. * */ -int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, +int ib_init_ah_from_mcmember(struct ib_device *device, u32 port_num, struct ib_sa_mcmember_rec *rec, struct net_device *ndev, enum ib_gid_type gid_type, diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index d306049c22a2..34d0cc1a4147 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -92,7 +92,9 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED }, + [RDMA_NLDEV_ATTR_RES_CTX] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 }, + [RDMA_NLDEV_ATTR_RES_CTX_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_DST_ADDR] = { .len = sizeof(struct __kernel_sockaddr_storage) }, [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 }, @@ -130,6 +132,11 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 }, + [RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED }, + [RDMA_NLDEV_ATTR_RES_SRQN] = { .type = NLA_U32 }, + [RDMA_NLDEV_ATTR_RES_SRQ_ENTRY] = { .type = NLA_NESTED }, + [RDMA_NLDEV_ATTR_MIN_RANGE] = { .type = NLA_U32 }, + [RDMA_NLDEV_ATTR_MAX_RANGE] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 }, @@ -146,6 +153,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { [RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 }, [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 }, [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 }, + [RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 }, }; static int put_driver_name_print_type(struct sk_buff *msg, const char *name, @@ -242,7 +250,7 @@ static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) { char fw[IB_FW_VERSION_NAME_MAX]; int ret = 0; - u8 port; + u32 port; if (fill_nldev_handle(msg, device)) return -EMSGSIZE; @@ -385,6 +393,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device) [RDMA_RESTRACK_CM_ID] = "cm_id", [RDMA_RESTRACK_MR] = "mr", [RDMA_RESTRACK_CTX] = "ctx", + [RDMA_RESTRACK_SRQ] = "srq", }; struct nlattr *table_attr; @@ -703,6 +712,135 @@ static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, err: return -EMSGSIZE; } +static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin, + struct rdma_restrack_entry *res, uint32_t port) +{ + struct ib_ucontext *ctx = container_of(res, struct ib_ucontext, res); + + if (rdma_is_kernel_res(res)) + return 0; + + if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id)) + return -EMSGSIZE; + + return fill_res_name_pid(msg, res); +} + +static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range, + uint32_t max_range) +{ + struct nlattr *entry_attr; + + if (!min_range) + return 0; + + entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); + if (!entry_attr) + return -EMSGSIZE; + + if (min_range == max_range) { + if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range)) + goto err; + } else { + if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range)) + goto err; + if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range)) + goto err; + } + nla_nest_end(msg, entry_attr); + return 0; + +err: + nla_nest_cancel(msg, entry_attr); + return -EMSGSIZE; +} + +static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq) +{ + uint32_t min_range = 0, prev = 0; + struct rdma_restrack_entry *res; + struct rdma_restrack_root *rt; + struct nlattr *table_attr; + struct ib_qp *qp = NULL; + unsigned long id = 0; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); + if (!table_attr) + return -EMSGSIZE; + + rt = &srq->device->res[RDMA_RESTRACK_QP]; + xa_lock(&rt->xa); + xa_for_each(&rt->xa, id, res) { + if (!rdma_restrack_get(res)) + continue; + + qp = container_of(res, struct ib_qp, res); + if (!qp->srq || (qp->srq->res.id != srq->res.id)) { + rdma_restrack_put(res); + continue; + } + + if (qp->qp_num < prev) + /* qp_num should be ascending */ + goto err_loop; + + if (min_range == 0) { + min_range = qp->qp_num; + } else if (qp->qp_num > (prev + 1)) { + if (fill_res_range_qp_entry(msg, min_range, prev)) + goto err_loop; + + min_range = qp->qp_num; + } + prev = qp->qp_num; + rdma_restrack_put(res); + } + + xa_unlock(&rt->xa); + + if (fill_res_range_qp_entry(msg, min_range, prev)) + goto err; + + nla_nest_end(msg, table_attr); + return 0; + +err_loop: + rdma_restrack_put(res); + xa_unlock(&rt->xa); +err: + nla_nest_cancel(msg, table_attr); + return -EMSGSIZE; +} + +static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin, + struct rdma_restrack_entry *res, uint32_t port) +{ + struct ib_srq *srq = container_of(res, struct ib_srq, res); + + if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id)) + goto err; + + if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type)) + goto err; + + if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id)) + goto err; + + if (ib_srq_has_cq(srq->srq_type)) { + if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, + srq->ext.cq->res.id)) + goto err; + } + + if (fill_res_srq_qps(msg, srq)) + goto err; + + return fill_res_name_pid(msg, res); + +err: + return -EMSGSIZE; +} + static int fill_stat_counter_mode(struct sk_buff *msg, struct rdma_counter *counter) { @@ -1236,6 +1374,19 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = { .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY, .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID, }, + [RDMA_RESTRACK_CTX] = { + .nldev_attr = RDMA_NLDEV_ATTR_RES_CTX, + .flags = NLDEV_PER_DEV, + .entry = RDMA_NLDEV_ATTR_RES_CTX_ENTRY, + .id = RDMA_NLDEV_ATTR_RES_CTXN, + }, + [RDMA_RESTRACK_SRQ] = { + .nldev_attr = RDMA_NLDEV_ATTR_RES_SRQ, + .flags = NLDEV_PER_DEV, + .entry = RDMA_NLDEV_ATTR_RES_SRQ_ENTRY, + .id = RDMA_NLDEV_ATTR_RES_SRQN, + }, + }; static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, @@ -1476,6 +1627,8 @@ RES_GET_FUNCS(pd, RDMA_RESTRACK_PD); RES_GET_FUNCS(mr, RDMA_RESTRACK_MR); RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR); RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER); +RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX); +RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ); static LIST_HEAD(link_ops); static DECLARE_RWSEM(link_ops_rwsem); @@ -1697,6 +1850,19 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nlmsg_free(msg); return err; } + + /* + * Copy-on-fork is supported. + * See commits: + * 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes") + * 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm") + * for more details. Don't backport this without them. + * + * Return value ignored on purpose, assume copy-on-fork is not + * supported in case of failure. + */ + nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1); + nlmsg_end(msg, nlh); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); } @@ -2139,6 +2305,14 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { .doit = nldev_res_get_pd_doit, .dump = nldev_res_get_pd_dumpit, }, + [RDMA_NLDEV_CMD_RES_CTX_GET] = { + .doit = nldev_res_get_ctx_doit, + .dump = nldev_res_get_ctx_dumpit, + }, + [RDMA_NLDEV_CMD_RES_SRQ_GET] = { + .doit = nldev_res_get_srq_doit, + .dump = nldev_res_get_srq_dumpit, + }, [RDMA_NLDEV_CMD_SYS_GET] = { .doit = nldev_sys_get_doit, }, diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h index af4879bdf3d6..64e2822af70f 100644 --- a/drivers/infiniband/core/opa_smi.h +++ b/drivers/infiniband/core/opa_smi.h @@ -40,11 +40,11 @@ #include "smi.h" enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch, - int port_num, int phys_port_cnt); + u32 port_num, int phys_port_cnt); int opa_smi_get_fwd_port(struct opa_smp *smp); extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp); extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, - bool is_switch, int port_num); + bool is_switch, u32 port_num); /* * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 75eafd9208aa..94d83b665a2f 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -112,7 +112,7 @@ static void assert_uverbs_usecnt(struct ib_uobject *uobj, * however the type's allocat_commit function cannot have been called and the * uobject cannot be on the uobjects_lists * - * For RDMA_REMOVE_DESTROY the caller shold be holding a kref (eg via + * For RDMA_REMOVE_DESTROY the caller should be holding a kref (eg via * rdma_lookup_get_uobject) and the object is left in a state where the caller * needs to call rdma_lookup_put_uobject. * @@ -916,7 +916,7 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile, } /* - * Destroy the uncontext and every uobject associated with it. + * Destroy the ucontext and every uobject associated with it. * * This is internally locked and can be called in parallel from multiple * contexts. diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c index ffabaf327242..033207882c82 100644 --- a/drivers/infiniband/core/restrack.c +++ b/drivers/infiniband/core/restrack.c @@ -47,6 +47,7 @@ static const char *type2str(enum rdma_restrack_type type) [RDMA_RESTRACK_MR] = "MR", [RDMA_RESTRACK_CTX] = "CTX", [RDMA_RESTRACK_COUNTER] = "COUNTER", + [RDMA_RESTRACK_SRQ] = "SRQ", }; return names[type]; @@ -141,6 +142,8 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res) return container_of(res, struct ib_ucontext, res)->device; case RDMA_RESTRACK_COUNTER: return container_of(res, struct rdma_counter, res)->device; + case RDMA_RESTRACK_SRQ: + return container_of(res, struct ib_srq, res)->device; default: WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type); return NULL; diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c index 34fff94eaa38..7b638d91a4ec 100644 --- a/drivers/infiniband/core/roce_gid_mgmt.c +++ b/drivers/infiniband/core/roce_gid_mgmt.c @@ -70,7 +70,7 @@ struct netdev_event_work { }; static const struct { - bool (*is_supported)(const struct ib_device *device, u8 port_num); + bool (*is_supported)(const struct ib_device *device, u32 port_num); enum ib_gid_type gid_type; } PORT_CAP_TO_GID_TYPE[] = { {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE}, @@ -79,7 +79,7 @@ static const struct { #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE) -unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port) +unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port) { int i; unsigned int ret_flags = 0; @@ -96,7 +96,7 @@ unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port) EXPORT_SYMBOL(roce_gid_type_mask_support); static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev, - u8 port, union ib_gid *gid, + u32 port, union ib_gid *gid, struct ib_gid_attr *gid_attr) { int i; @@ -144,7 +144,7 @@ static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_de #define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \ BONDING_SLAVE_STATE_NA) static bool -is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port, +is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *real_dev; @@ -168,7 +168,7 @@ is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port, } static bool -is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port, +is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *master_dev; @@ -197,7 +197,7 @@ is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port, * considered for deriving default RoCE GID, returns false otherwise. */ static bool -is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port, +is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *cookie_ndev = cookie; @@ -223,13 +223,13 @@ is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port, return res; } -static bool pass_all_filter(struct ib_device *ib_dev, u8 port, +static bool pass_all_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { return true; } -static bool upper_device_filter(struct ib_device *ib_dev, u8 port, +static bool upper_device_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { bool res; @@ -260,7 +260,7 @@ static bool upper_device_filter(struct ib_device *ib_dev, u8 port, * not have been established as slave device yet. */ static bool -is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port, +is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { @@ -280,7 +280,7 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port, static void update_gid_ip(enum gid_op_type gid_op, struct ib_device *ib_dev, - u8 port, struct net_device *ndev, + u32 port, struct net_device *ndev, struct sockaddr *addr) { union ib_gid gid; @@ -294,7 +294,7 @@ static void update_gid_ip(enum gid_op_type gid_op, } static void bond_delete_netdev_default_gids(struct ib_device *ib_dev, - u8 port, + u32 port, struct net_device *rdma_ndev, struct net_device *event_ndev) { @@ -328,7 +328,7 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev, } static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, - u8 port, struct net_device *ndev) + u32 port, struct net_device *ndev) { const struct in_ifaddr *ifa; struct in_device *in_dev; @@ -372,7 +372,7 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, } static void enum_netdev_ipv6_ips(struct ib_device *ib_dev, - u8 port, struct net_device *ndev) + u32 port, struct net_device *ndev) { struct inet6_ifaddr *ifp; struct inet6_dev *in6_dev; @@ -417,7 +417,7 @@ static void enum_netdev_ipv6_ips(struct ib_device *ib_dev, } } -static void _add_netdev_ips(struct ib_device *ib_dev, u8 port, +static void _add_netdev_ips(struct ib_device *ib_dev, u32 port, struct net_device *ndev) { enum_netdev_ipv4_ips(ib_dev, port, ndev); @@ -425,13 +425,13 @@ static void _add_netdev_ips(struct ib_device *ib_dev, u8 port, enum_netdev_ipv6_ips(ib_dev, port, ndev); } -static void add_netdev_ips(struct ib_device *ib_dev, u8 port, +static void add_netdev_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { _add_netdev_ips(ib_dev, port, cookie); } -static void del_netdev_ips(struct ib_device *ib_dev, u8 port, +static void del_netdev_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie); @@ -446,7 +446,7 @@ static void del_netdev_ips(struct ib_device *ib_dev, u8 port, * * del_default_gids() deletes the default GIDs of the event/cookie netdevice. */ -static void del_default_gids(struct ib_device *ib_dev, u8 port, +static void del_default_gids(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *cookie_ndev = cookie; @@ -458,7 +458,7 @@ static void del_default_gids(struct ib_device *ib_dev, u8 port, IB_CACHE_GID_DEFAULT_MODE_DELETE); } -static void add_default_gids(struct ib_device *ib_dev, u8 port, +static void add_default_gids(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *event_ndev = cookie; @@ -470,7 +470,7 @@ static void add_default_gids(struct ib_device *ib_dev, u8 port, } static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev, - u8 port, + u32 port, struct net_device *rdma_ndev, void *cookie) { @@ -515,7 +515,7 @@ void rdma_roce_rescan_device(struct ib_device *ib_dev) EXPORT_SYMBOL(rdma_roce_rescan_device); static void callback_for_addr_gid_device_scan(struct ib_device *device, - u8 port, + u32 port, struct net_device *rdma_ndev, void *cookie) { @@ -547,10 +547,10 @@ static int netdev_upper_walk(struct net_device *upper, return 0; } -static void handle_netdev_upper(struct ib_device *ib_dev, u8 port, +static void handle_netdev_upper(struct ib_device *ib_dev, u32 port, void *cookie, void (*handle_netdev)(struct ib_device *ib_dev, - u8 port, + u32 port, struct net_device *ndev)) { struct net_device *ndev = cookie; @@ -574,25 +574,25 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port, } } -static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, +static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, struct net_device *event_ndev) { ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev); } -static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port, +static void del_netdev_upper_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids); } -static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port, +static void add_netdev_upper_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips); } -static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port, +static void del_netdev_default_ips_join(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 31156e22d3e7..a588c2038479 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -25,7 +25,7 @@ MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations"); * registration is also enabled if registering memory might yield better * performance than using multiple SGE entries, see rdma_rw_io_needs_mr() */ -static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num) +static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u32 port_num) { if (rdma_protocol_iwarp(dev, port_num)) return true; @@ -42,7 +42,7 @@ static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num) * optimization otherwise. Additionally we have a debug option to force usage * of MRs to help testing this code path. */ -static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num, +static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u32 port_num, enum dma_data_direction dir, int dma_nents) { if (dir == DMA_FROM_DEVICE) { @@ -87,7 +87,7 @@ static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg) } /* Caller must have zero-initialized *reg. */ -static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num, +static int rdma_rw_init_one_mr(struct ib_qp *qp, u32 port_num, struct rdma_rw_reg_ctx *reg, struct scatterlist *sg, u32 sg_cnt, u32 offset) { @@ -121,7 +121,7 @@ static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num, } static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, - u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset, + u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset, u64 remote_addr, u32 rkey, enum dma_data_direction dir) { struct rdma_rw_reg_ctx *prev = NULL; @@ -308,7 +308,7 @@ static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg, * Returns the number of WQEs that will be needed on the workqueue if * successful, or a negative error code. */ -int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, +int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 sg_offset, u64 remote_addr, u32 rkey, enum dma_data_direction dir) { @@ -377,7 +377,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_init); * successful, or a negative error code. */ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, - u8 port_num, struct scatterlist *sg, u32 sg_cnt, + u32 port_num, struct scatterlist *sg, u32 sg_cnt, struct scatterlist *prot_sg, u32 prot_sg_cnt, struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey, enum dma_data_direction dir) @@ -505,7 +505,7 @@ static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval) * completion notification. */ struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, - u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) + u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) { struct ib_send_wr *first_wr, *last_wr; int i; @@ -562,7 +562,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_wrs); * is not set @cqe must be set so that the caller gets a completion * notification. */ -int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, +int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) { struct ib_send_wr *first_wr; @@ -581,8 +581,9 @@ EXPORT_SYMBOL(rdma_rw_ctx_post); * @sg_cnt: number of entries in @sg * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ */ -void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, - struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir) +void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, + u32 port_num, struct scatterlist *sg, u32 sg_cnt, + enum dma_data_direction dir) { int i; @@ -620,7 +621,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_destroy); * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ */ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, - u8 port_num, struct scatterlist *sg, u32 sg_cnt, + u32 port_num, struct scatterlist *sg, u32 sg_cnt, struct scatterlist *prot_sg, u32 prot_sg_cnt, enum dma_data_direction dir) { @@ -647,7 +648,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); * compute max_rdma_ctxts and the size of the transport's Send and * Send Completion Queues. */ -unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num, +unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num, unsigned int maxpages) { unsigned int mr_pages; diff --git a/drivers/infiniband/core/sa.h b/drivers/infiniband/core/sa.h index cbaaaa92fff3..143de37ae598 100644 --- a/drivers/infiniband/core/sa.h +++ b/drivers/infiniband/core/sa.h @@ -49,7 +49,7 @@ static inline void ib_sa_client_put(struct ib_sa_client *client) } int ib_sa_mcmember_rec_query(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, u8 method, + struct ib_device *device, u32 port_num, u8 method, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, unsigned long timeout_ms, gfp_t gfp_mask, diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 9ef1a355131b..8f1705c403b4 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -95,7 +95,7 @@ struct ib_sa_port { struct delayed_work ib_cpi_work; spinlock_t classport_lock; /* protects class port info set */ spinlock_t ah_lock; - u8 port_num; + u32 port_num; }; struct ib_sa_device { @@ -1194,7 +1194,7 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query) } EXPORT_SYMBOL(ib_sa_cancel_query); -static u8 get_src_path_mask(struct ib_device *device, u8 port_num) +static u8 get_src_path_mask(struct ib_device *device, u32 port_num) { struct ib_sa_device *sa_dev; struct ib_sa_port *port; @@ -1213,7 +1213,7 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num) return src_path_mask; } -static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num, +static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num, struct sa_path_rec *rec, struct rdma_ah_attr *ah_attr, const struct ib_gid_attr *gid_attr) @@ -1251,7 +1251,7 @@ static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num, * User must invoke rdma_destroy_ah_attr() to release reference to SGID * attributes which are initialized using ib_init_ah_attr_from_path(). */ -int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, +int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num, struct sa_path_rec *rec, struct rdma_ah_attr *ah_attr, const struct ib_gid_attr *gid_attr) @@ -1409,7 +1409,7 @@ EXPORT_SYMBOL(ib_sa_pack_path); static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client, struct ib_sa_device *sa_dev, - u8 port_num) + u32 port_num) { struct ib_sa_port *port; unsigned long flags; @@ -1444,7 +1444,7 @@ enum opa_pr_supported { */ static int opa_pr_query_possible(struct ib_sa_client *client, struct ib_sa_device *sa_dev, - struct ib_device *device, u8 port_num, + struct ib_device *device, u32 port_num, struct sa_path_rec *rec) { struct ib_port_attr port_attr; @@ -1533,7 +1533,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) * the query. */ int ib_sa_path_rec_get(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, + struct ib_device *device, u32 port_num, struct sa_path_rec *rec, ib_sa_comp_mask comp_mask, unsigned long timeout_ms, gfp_t gfp_mask, @@ -1688,7 +1688,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) * the query. */ int ib_sa_service_rec_query(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, u8 method, + struct ib_device *device, u32 port_num, u8 method, struct ib_sa_service_rec *rec, ib_sa_comp_mask comp_mask, unsigned long timeout_ms, gfp_t gfp_mask, @@ -1784,7 +1784,7 @@ static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) } int ib_sa_mcmember_rec_query(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, + struct ib_device *device, u32 port_num, u8 method, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, @@ -1876,7 +1876,7 @@ static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) } int ib_sa_guid_info_rec_query(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, + struct ib_device *device, u32 port_num, struct ib_sa_guidinfo_rec *rec, ib_sa_comp_mask comp_mask, u8 method, unsigned long timeout_ms, gfp_t gfp_mask, @@ -2265,7 +2265,7 @@ static void ib_sa_event(struct ib_event_handler *handler, unsigned long flags; struct ib_sa_device *sa_dev = container_of(handler, typeof(*sa_dev), event_handler); - u8 port_num = event->element.port_num - sa_dev->start_port; + u32 port_num = event->element.port_num - sa_dev->start_port; struct ib_sa_port *port = &sa_dev->port[port_num]; if (!rdma_cap_ib_sa(handler->device, port->port_num)) diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 75e7ec017836..e5a78d1a63c9 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -193,7 +193,7 @@ static void qp_to_error(struct ib_qp_security *sec) static inline void check_pkey_qps(struct pkey_index_qp_list *pkey, struct ib_device *device, - u8 port_num, + u32 port_num, u64 subnet_prefix) { struct ib_port_pkey *pp, *tmp_pp; @@ -245,7 +245,7 @@ static int port_pkey_list_insert(struct ib_port_pkey *pp) struct pkey_index_qp_list *tmp_pkey; struct pkey_index_qp_list *pkey; struct ib_device *dev; - u8 port_num = pp->port_num; + u32 port_num = pp->port_num; int ret = 0; if (pp->state != IB_PORT_PKEY_VALID) @@ -538,7 +538,7 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec) } void ib_security_cache_change(struct ib_device *device, - u8 port_num, + u32 port_num, u64 subnet_prefix) { struct pkey_index_qp_list *pkey; @@ -649,7 +649,7 @@ int ib_security_modify_qp(struct ib_qp *qp, } static int ib_security_pkey_access(struct ib_device *dev, - u8 port_num, + u32 port_num, u16 pkey_index, void *sec) { diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index f19b23817c2b..45f09b75c893 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c @@ -41,7 +41,7 @@ #include "smi.h" #include "opa_smi.h" -static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num, +static enum smi_action __smi_handle_dr_smp_send(bool is_switch, u32 port_num, u8 *hop_ptr, u8 hop_cnt, const u8 *initial_path, const u8 *return_path, @@ -127,7 +127,7 @@ static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num, * Return IB_SMI_DISCARD if the SMP should be discarded */ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, - bool is_switch, int port_num) + bool is_switch, u32 port_num) { return __smi_handle_dr_smp_send(is_switch, port_num, &smp->hop_ptr, smp->hop_cnt, @@ -139,7 +139,7 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, } enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, - bool is_switch, int port_num) + bool is_switch, u32 port_num) { return __smi_handle_dr_smp_send(is_switch, port_num, &smp->hop_ptr, smp->hop_cnt, @@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, OPA_LID_PERMISSIVE); } -static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num, +static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, u32 port_num, int phys_port_cnt, u8 *hop_ptr, u8 hop_cnt, const u8 *initial_path, @@ -238,7 +238,7 @@ static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num, * Return IB_SMI_DISCARD if the SMP should be dropped */ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch, - int port_num, int phys_port_cnt) + u32 port_num, int phys_port_cnt) { return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt, &smp->hop_ptr, smp->hop_cnt, @@ -254,7 +254,7 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch, * Return IB_SMI_DISCARD if the SMP should be dropped */ enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch, - int port_num, int phys_port_cnt) + u32 port_num, int phys_port_cnt) { return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt, &smp->hop_ptr, smp->hop_cnt, diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h index 91d9b353ab85..e350ed623c45 100644 --- a/drivers/infiniband/core/smi.h +++ b/drivers/infiniband/core/smi.h @@ -52,11 +52,11 @@ enum smi_forward_action { }; enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch, - int port_num, int phys_port_cnt); + u32 port_num, int phys_port_cnt); int smi_get_fwd_port(struct ib_smp *smp); extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp); extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, - bool is_switch, int port_num); + bool is_switch, u32 port_num); /* * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index b8abb30f80df..05b702de00e8 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -62,7 +62,7 @@ struct ib_port { const struct attribute_group *pma_table; struct attribute_group *hw_stats_ag; struct rdma_hw_stats *hw_stats; - u8 port_num; + u32 port_num; }; struct port_attribute { @@ -94,7 +94,7 @@ struct hw_stats_attribute { const char *buf, size_t count); int index; - u8 port_num; + u32 port_num; }; static ssize_t port_attr_show(struct kobject *kobj, @@ -297,7 +297,7 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, static const char *phys_state_to_str(enum ib_port_phys_state phys_state) { - static const char * phys_state_str[] = { + static const char *phys_state_str[] = { "<unknown>", "Sleep", "Polling", @@ -470,14 +470,14 @@ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr, struct port_table_attribute port_pma_attr_##_name = { \ .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \ .index = (_offset) | ((_width) << 16) | ((_counter) << 24), \ - .attr_id = IB_PMA_PORT_COUNTERS , \ + .attr_id = IB_PMA_PORT_COUNTERS, \ } #define PORT_PMA_ATTR_EXT(_name, _width, _offset) \ struct port_table_attribute port_pma_attr_ext_##_name = { \ .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \ .index = (_offset) | ((_width) << 16), \ - .attr_id = IB_PMA_PORT_COUNTERS_EXT , \ + .attr_id = IB_PMA_PORT_COUNTERS_EXT, \ } /* @@ -812,7 +812,7 @@ static const struct attribute_group *get_counter_table(struct ib_device *dev, } static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats, - u8 port_num, int index) + u32 port_num, int index) { int ret; @@ -938,7 +938,7 @@ static void free_hsag(struct kobject *kobj, struct attribute_group *attr_group) kfree(attr_group); } -static struct attribute *alloc_hsa(int index, u8 port_num, const char *name) +static struct attribute *alloc_hsa(int index, u32 port_num, const char *name) { struct hw_stats_attribute *hsa; @@ -956,7 +956,7 @@ static struct attribute *alloc_hsa(int index, u8 port_num, const char *name) return &hsa->attr; } -static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num) +static struct attribute *alloc_hsa_lifespan(char *name, u32 port_num) { struct hw_stats_attribute *hsa; @@ -975,7 +975,7 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num) } static void setup_hw_stats(struct ib_device *device, struct ib_port *port, - u8 port_num) + u32 port_num) { struct attribute_group *hsag; struct rdma_hw_stats *stats; @@ -1049,7 +1049,6 @@ err_free_hsag: kfree(hsag); err_free_stats: kfree(stats); - return; } static int add_port(struct ib_core_device *coredev, int port_num) @@ -1075,9 +1074,8 @@ static int add_port(struct ib_core_device *coredev, int port_num) ret = kobject_init_and_add(&p->kobj, &port_type, coredev->ports_kobj, "%d", port_num); - if (ret) { + if (ret) goto err_put; - } p->gid_attr_group = kzalloc(sizeof(*p->gid_attr_group), GFP_KERNEL); if (!p->gid_attr_group) { @@ -1088,9 +1086,8 @@ static int add_port(struct ib_core_device *coredev, int port_num) p->gid_attr_group->port = p; ret = kobject_init_and_add(&p->gid_attr_group->kobj, &gid_attr_type, &p->kobj, "gid_attrs"); - if (ret) { + if (ret) goto err_put_gid_attrs; - } if (device->ops.process_mad && is_full_dev) { p->pma_table = get_counter_table(device, port_num); @@ -1383,7 +1380,7 @@ void ib_free_port_attrs(struct ib_core_device *coredev) int ib_setup_port_attrs(struct ib_core_device *coredev) { struct ib_device *device = rdma_device_to_ibdev(&coredev->dev); - unsigned int port; + u32 port; int ret; coredev->ports_kobj = kobject_create_and_add("ports", @@ -1437,7 +1434,7 @@ void ib_device_unregister_sysfs(struct ib_device *device) * @ktype: pointer to the ktype for this kobject. * @name: the name of the kobject */ -int ib_port_register_module_stat(struct ib_device *device, u8 port_num, +int ib_port_register_module_stat(struct ib_device *device, u32 port_num, struct kobject *kobj, struct kobj_type *ktype, const char *name) { diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index da2512c30ffd..15d57ba4d07a 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -231,7 +231,7 @@ static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, memcpy(dst->private_data, src->private_data, src->private_data_len); dst->private_data_len = src->private_data_len; - dst->responder_resources =src->responder_resources; + dst->responder_resources = src->responder_resources; dst->initiator_depth = src->initiator_depth; dst->flow_control = src->flow_control; dst->retry_count = src->retry_count; @@ -1034,7 +1034,7 @@ static void ucma_copy_conn_param(struct rdma_cm_id *id, { dst->private_data = src->private_data; dst->private_data_len = src->private_data_len; - dst->responder_resources =src->responder_resources; + dst->responder_resources = src->responder_resources; dst->initiator_depth = src->initiator_depth; dst->flow_control = src->flow_control; dst->retry_count = src->retry_count; @@ -1708,8 +1708,8 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf, ssize_t ret; if (!ib_safe_file_access(filp)) { - pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", - task_tgid_vnr(current), current->comm); + pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", + __func__, task_tgid_vnr(current), current->comm); return -EACCES; } diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 2dde99a9ba07..0eb40025075f 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -47,17 +47,17 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) { - struct sg_page_iter sg_iter; - struct page *page; + bool make_dirty = umem->writable && dirty; + struct scatterlist *sg; + unsigned int i; if (umem->nmap > 0) ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents, DMA_BIDIRECTIONAL); - for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) { - page = sg_page_iter_page(&sg_iter); - unpin_user_pages_dirty_lock(&page, 1, umem->writable && dirty); - } + for_each_sg(umem->sg_head.sgl, sg, umem->sg_nents, i) + unpin_user_page_range_dirty_lock(sg_page(sg), + DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty); sg_free_table(&umem->sg_head); } @@ -100,10 +100,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, */ pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT); - /* At minimum, drivers must support PAGE_SIZE or smaller */ - if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0)))) - return 0; - umem->iova = va = virt; /* The best result is the smallest page size that results in the minimum * number of required pages. Compute the largest page size that could @@ -309,8 +305,8 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, int ret; if (offset > umem->length || length > umem->length - offset) { - pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n", - offset, umem->length, end); + pr_err("%s not in range. offset: %zd umem length: %zd end: %zd\n", + __func__, offset, umem->length, end); return -EINVAL; } diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c index f9b5162d9260..0d65ce146fc4 100644 --- a/drivers/infiniband/core/umem_dmabuf.c +++ b/drivers/infiniband/core/umem_dmabuf.c @@ -168,6 +168,10 @@ void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf; + dma_resv_lock(dmabuf->resv, NULL); + ib_umem_dmabuf_unmap_pages(umem_dmabuf); + dma_resv_unlock(dmabuf->resv); + dma_buf_detach(dmabuf, umem_dmabuf->attach); dma_buf_put(dmabuf); kfree(umem_dmabuf); diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index dd7f3b437c6b..852efedda798 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -101,7 +101,7 @@ struct ib_umad_port { struct ib_device *ib_dev; struct ib_umad_device *umad_dev; int dev_num; - u8 port_num; + u32 port_num; }; struct ib_umad_device { @@ -165,8 +165,8 @@ static void ib_umad_dev_put(struct ib_umad_device *dev) static int hdr_size(struct ib_umad_file *file) { - return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) : - sizeof (struct ib_user_mad_hdr_old); + return file->use_pkey_index ? sizeof(struct ib_user_mad_hdr) : + sizeof(struct ib_user_mad_hdr_old); } /* caller must hold file->mutex */ @@ -688,8 +688,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, mutex_lock(&file->mutex); if (!file->port->ib_dev) { - dev_notice(&file->port->dev, - "ib_umad_reg_agent: invalid device\n"); + dev_notice(&file->port->dev, "%s: invalid device\n", __func__); ret = -EPIPE; goto out; } @@ -701,7 +700,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, if (ureq.qpn != 0 && ureq.qpn != 1) { dev_notice(&file->port->dev, - "ib_umad_reg_agent: invalid QPN %d specified\n", + "%s: invalid QPN %d specified\n", __func__, ureq.qpn); ret = -EINVAL; goto out; @@ -711,9 +710,9 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, if (!__get_agent(file, agent_id)) goto found; - dev_notice(&file->port->dev, - "ib_umad_reg_agent: Max Agents (%u) reached\n", + dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n", __func__, IB_UMAD_MAX_AGENTS); + ret = -ENOMEM; goto out; @@ -790,8 +789,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) mutex_lock(&file->mutex); if (!file->port->ib_dev) { - dev_notice(&file->port->dev, - "ib_umad_reg_agent2: invalid device\n"); + dev_notice(&file->port->dev, "%s: invalid device\n", __func__); ret = -EPIPE; goto out; } @@ -802,17 +800,16 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) } if (ureq.qpn != 0 && ureq.qpn != 1) { - dev_notice(&file->port->dev, - "ib_umad_reg_agent2: invalid QPN %d specified\n", - ureq.qpn); + dev_notice(&file->port->dev, "%s: invalid QPN %d specified\n", + __func__, ureq.qpn); ret = -EINVAL; goto out; } if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) { dev_notice(&file->port->dev, - "ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n", - ureq.flags, IB_USER_MAD_REG_FLAGS_CAP); + "%s failed: invalid registration flags specified 0x%x; supported 0x%x\n", + __func__, ureq.flags, IB_USER_MAD_REG_FLAGS_CAP); ret = -EINVAL; if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP, @@ -827,8 +824,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) if (!__get_agent(file, agent_id)) goto found; - dev_notice(&file->port->dev, - "ib_umad_reg_agent2: Max Agents (%u) reached\n", + dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n", __func__, IB_UMAD_MAX_AGENTS); ret = -ENOMEM; goto out; @@ -840,7 +836,7 @@ found: req.mgmt_class_version = ureq.mgmt_class_version; if (ureq.oui & 0xff000000) { dev_notice(&file->port->dev, - "ib_umad_reg_agent2 failed: oui invalid 0x%08x\n", + "%s failed: oui invalid 0x%08x\n", __func__, ureq.oui); ret = -EINVAL; goto out; @@ -1145,7 +1141,7 @@ static const struct file_operations umad_sm_fops = { static struct ib_umad_port *get_port(struct ib_device *ibdev, struct ib_umad_device *umad_dev, - unsigned int port) + u32 port) { if (!umad_dev) return ERR_PTR(-EOPNOTSUPP); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index f5b8be3bedde..d5e15a8c870d 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -364,7 +364,7 @@ static void copy_query_dev_fields(struct ib_ucontext *ucontext, resp->max_srq_sge = attr->max_srq_sge; resp->max_pkeys = attr->max_pkeys; resp->local_ca_ack_delay = attr->local_ca_ack_delay; - resp->phys_port_cnt = ib_dev->phys_port_cnt; + resp->phys_port_cnt = min_t(u32, ib_dev->phys_port_cnt, U8_MAX); } static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs) @@ -2002,12 +2002,13 @@ static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs) static void *alloc_wr(size_t wr_size, __u32 num_sge) { - if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) / - sizeof (struct ib_sge)) + if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof(struct ib_sge))) / + sizeof(struct ib_sge)) return NULL; - return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + - num_sge * sizeof (struct ib_sge), GFP_KERNEL); + return kmalloc(ALIGN(wr_size, sizeof(struct ib_sge)) + + num_sge * sizeof(struct ib_sge), + GFP_KERNEL); } static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs) @@ -2216,7 +2217,7 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count, const struct ib_sge __user *sgls; const void __user *wqes; - if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) + if (wqe_size < sizeof(struct ib_uverbs_recv_wr)) return ERR_PTR(-EINVAL); wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count); @@ -2249,14 +2250,14 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count, } if (user_wr->num_sge >= - (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) / - sizeof (struct ib_sge)) { + (U32_MAX - ALIGN(sizeof(*next), sizeof(struct ib_sge))) / + sizeof(struct ib_sge)) { ret = -EINVAL; goto err; } - next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + - user_wr->num_sge * sizeof (struct ib_sge), + next = kmalloc(ALIGN(sizeof(*next), sizeof(struct ib_sge)) + + user_wr->num_sge * sizeof(struct ib_sge), GFP_KERNEL); if (!next) { ret = -ENOMEM; @@ -2274,8 +2275,8 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count, next->num_sge = user_wr->num_sge; if (next->num_sge) { - next->sg_list = (void *) next + - ALIGN(sizeof *next, sizeof (struct ib_sge)); + next->sg_list = (void *)next + + ALIGN(sizeof(*next), sizeof(struct ib_sge)); if (copy_from_user(next->sg_list, sgls + sg_ind, next->num_sge * sizeof(struct ib_sge))) { diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index ff047eb024ab..990f0724acc6 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -752,9 +752,10 @@ int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx) return uverbs_set_output(bundle, attr); } -int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, - size_t idx, s64 lower_bound, u64 upper_bound, - s64 *def_val) +int _uverbs_get_const_signed(s64 *to, + const struct uverbs_attr_bundle *attrs_bundle, + size_t idx, s64 lower_bound, u64 upper_bound, + s64 *def_val) { const struct uverbs_attr *attr; @@ -773,7 +774,30 @@ int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, return 0; } -EXPORT_SYMBOL(_uverbs_get_const); +EXPORT_SYMBOL(_uverbs_get_const_signed); + +int _uverbs_get_const_unsigned(u64 *to, + const struct uverbs_attr_bundle *attrs_bundle, + size_t idx, u64 upper_bound, u64 *def_val) +{ + const struct uverbs_attr *attr; + + attr = uverbs_attr_get(attrs_bundle, idx); + if (IS_ERR(attr)) { + if ((PTR_ERR(attr) != -ENOENT) || !def_val) + return PTR_ERR(attr); + + *to = *def_val; + } else { + *to = attr->ptr_attr.data; + } + + if (*to > upper_bound) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(_uverbs_get_const_unsigned); int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, size_t idx, const void *from, size_t size) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 28464c58738c..2b0798151fb7 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -96,10 +96,10 @@ static const char * const wc_statuses[] = { [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", [IB_WC_LOC_PROT_ERR] = "local protection error", [IB_WC_WR_FLUSH_ERR] = "WR flushed", - [IB_WC_MW_BIND_ERR] = "memory management operation error", + [IB_WC_MW_BIND_ERR] = "memory bind operation error", [IB_WC_BAD_RESP_ERR] = "bad response error", [IB_WC_LOC_ACCESS_ERR] = "local access error", - [IB_WC_REM_INV_REQ_ERR] = "invalid request error", + [IB_WC_REM_INV_REQ_ERR] = "remote invalid request error", [IB_WC_REM_ACCESS_ERR] = "remote access error", [IB_WC_REM_OP_ERR] = "remote operation error", [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", @@ -227,7 +227,8 @@ rdma_node_get_transport(unsigned int node_type) } EXPORT_SYMBOL(rdma_node_get_transport); -enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) +enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, + u32 port_num) { enum rdma_transport_type lt; if (device->ops.get_link_layer) @@ -341,7 +342,8 @@ int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata) } /* uverbs manipulates usecnt with proper locking, while the kabi - requires the caller to guarantee we can't race here. */ + * requires the caller to guarantee we can't race here. + */ WARN_ON(atomic_read(&pd->usecnt)); ret = pd->device->ops.dealloc_pd(pd, udata); @@ -658,7 +660,7 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr) EXPORT_SYMBOL(ib_get_rdma_header_version); static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, - u8 port_num, + u32 port_num, const struct ib_grh *grh) { int grh_version; @@ -701,7 +703,7 @@ static bool find_gid_index(const union ib_gid *gid, } static const struct ib_gid_attr * -get_sgid_attr_from_eth(struct ib_device *device, u8 port_num, +get_sgid_attr_from_eth(struct ib_device *device, u32 port_num, u16 vlan_id, const union ib_gid *sgid, enum ib_gid_type gid_type) { @@ -788,7 +790,7 @@ static int ib_resolve_unicast_gid_dmac(struct ib_device *device, * On success the caller is responsible to call rdma_destroy_ah_attr on the * attr. */ -int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, +int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num, const struct ib_wc *wc, const struct ib_grh *grh, struct rdma_ah_attr *ah_attr) { @@ -919,7 +921,7 @@ void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr) EXPORT_SYMBOL(rdma_destroy_ah_attr); struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, - const struct ib_grh *grh, u8 port_num) + const struct ib_grh *grh, u32 port_num) { struct rdma_ah_attr ah_attr; struct ib_ah *ah; @@ -1037,8 +1039,12 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd, } atomic_inc(&pd->usecnt); + rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ); + rdma_restrack_parent_name(&srq->res, &pd->res); + ret = pd->device->ops.create_srq(srq, srq_init_attr, udata); if (ret) { + rdma_restrack_put(&srq->res); atomic_dec(&srq->pd->usecnt); if (srq->srq_type == IB_SRQT_XRC) atomic_dec(&srq->ext.xrc.xrcd->usecnt); @@ -1048,6 +1054,8 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd, return ERR_PTR(ret); } + rdma_restrack_add(&srq->res); + return srq; } EXPORT_SYMBOL(ib_create_srq_user); @@ -1086,6 +1094,7 @@ int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata) atomic_dec(&srq->ext.xrc.xrcd->usecnt); if (ib_srq_has_cq(srq->srq_type)) atomic_dec(&srq->ext.cq->usecnt); + rdma_restrack_del(&srq->res); kfree(srq); return ret; @@ -1673,7 +1682,7 @@ static bool is_qp_type_connected(const struct ib_qp *qp) static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { - u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; + u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; const struct ib_gid_attr *old_sgid_attr_av; const struct ib_gid_attr *old_sgid_attr_alt_av; int ret; @@ -1801,7 +1810,7 @@ int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr, } EXPORT_SYMBOL(ib_modify_qp_with_udata); -int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width) +int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width) { int rc; u32 netdev_speed; @@ -2467,7 +2476,7 @@ int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, } EXPORT_SYMBOL(ib_check_mr_status); -int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, +int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port, int state) { if (!device->ops.set_vf_link_state) @@ -2477,7 +2486,7 @@ int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, } EXPORT_SYMBOL(ib_set_vf_link_state); -int ib_get_vf_config(struct ib_device *device, int vf, u8 port, +int ib_get_vf_config(struct ib_device *device, int vf, u32 port, struct ifla_vf_info *info) { if (!device->ops.get_vf_config) @@ -2487,7 +2496,7 @@ int ib_get_vf_config(struct ib_device *device, int vf, u8 port, } EXPORT_SYMBOL(ib_get_vf_config); -int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, +int ib_get_vf_stats(struct ib_device *device, int vf, u32 port, struct ifla_vf_stats *stats) { if (!device->ops.get_vf_stats) @@ -2497,7 +2506,7 @@ int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, } EXPORT_SYMBOL(ib_get_vf_stats); -int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, +int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid, int type) { if (!device->ops.set_vf_guid) @@ -2507,7 +2516,7 @@ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, } EXPORT_SYMBOL(ib_set_vf_guid); -int ib_get_vf_guid(struct ib_device *device, int vf, u8 port, +int ib_get_vf_guid(struct ib_device *device, int vf, u32 port, struct ifla_vf_guid *node_guid, struct ifla_vf_guid *port_guid) { @@ -2849,7 +2858,7 @@ void ib_drain_qp(struct ib_qp *qp) } EXPORT_SYMBOL(ib_drain_qp); -struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, +struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *)) @@ -2875,7 +2884,7 @@ struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, } EXPORT_SYMBOL(rdma_alloc_netdev); -int rdma_init_netdev(struct ib_device *device, u8 port_num, +int rdma_init_netdev(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *), |