diff options
25 files changed, 1135 insertions, 59 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 9ffec4a8c6ea..9cbc3766fd74 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9137,6 +9137,7 @@ F: Documentation/networking/device_drivers/ethernet/intel/ F: drivers/net/ethernet/intel/ F: drivers/net/ethernet/intel/*/ F: include/linux/avf/virtchnl.h +F: include/linux/net/intel/iidc.h INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) M: Maik Broemme <mbroemme@libmpq.org> diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index b496f30ce066..364f69cd620f 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -1423,7 +1423,7 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev, struct i40e_qv_info *iw_qvinfo; u32 ceq_idx; u32 i; - u32 size; + size_t size; if (!ldev->msix_count) { i40iw_pr_err("No MSI-X vectors\n"); @@ -1433,8 +1433,7 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev, iwdev->msix_count = ldev->msix_count; size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count; - size += sizeof(struct i40e_qvlist_info); - size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1; + size += struct_size(iw_qvlist, qv_info, iwdev->msix_count); iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL); if (!iwdev->iw_msixtbl) diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index c1d155690341..eae1b42e48db 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -241,6 +241,7 @@ config I40E tristate "Intel(R) Ethernet Controller XL710 Family support" imply PTP_1588_CLOCK depends on PCI + select AUXILIARY_BUS help This driver supports Intel(R) Ethernet Controller XL710 Family of devices. For more information on how to identify your adapter, go @@ -294,6 +295,7 @@ config ICE tristate "Intel(R) Ethernet Connection E800 Series Support" default n depends on PCI_MSI + select AUXILIARY_BUS select DIMLIB select NET_DEVLINK select PLDMFW diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 85d3dd3a3339..b9417dc0007c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -870,6 +870,8 @@ struct i40e_netdev_priv { struct i40e_vsi *vsi; }; +extern struct ida i40e_client_ida; + /* struct that defines an interrupt vector */ struct i40e_q_vector { struct i40e_vsi *vsi; diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 32f3facbed1a..e07ed065d3a4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -12,6 +12,7 @@ static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR; static struct i40e_client *registered_client; static LIST_HEAD(i40e_devices); static DEFINE_MUTEX(i40e_device_mutex); +DEFINE_IDA(i40e_client_ida); static int i40e_client_virtchnl_send(struct i40e_info *ldev, struct i40e_client *client, @@ -275,6 +276,57 @@ void i40e_client_update_msix_info(struct i40e_pf *pf) cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector]; } +static void i40e_auxiliary_dev_release(struct device *dev) +{ + struct i40e_auxiliary_device *i40e_aux_dev = + container_of(dev, struct i40e_auxiliary_device, aux_dev.dev); + + ida_free(&i40e_client_ida, i40e_aux_dev->aux_dev.id); + kfree(i40e_aux_dev); +} + +static int i40e_register_auxiliary_dev(struct i40e_info *ldev, const char *name) +{ + struct i40e_auxiliary_device *i40e_aux_dev; + struct pci_dev *pdev = ldev->pcidev; + struct auxiliary_device *aux_dev; + int ret; + + i40e_aux_dev = kzalloc(sizeof(*i40e_aux_dev), GFP_KERNEL); + if (!i40e_aux_dev) + return -ENOMEM; + + i40e_aux_dev->ldev = ldev; + + aux_dev = &i40e_aux_dev->aux_dev; + aux_dev->name = name; + aux_dev->dev.parent = &pdev->dev; + aux_dev->dev.release = i40e_auxiliary_dev_release; + ldev->aux_dev = aux_dev; + + ret = ida_alloc(&i40e_client_ida, GFP_KERNEL); + if (ret < 0) { + kfree(i40e_aux_dev); + return ret; + } + aux_dev->id = ret; + + ret = auxiliary_device_init(aux_dev); + if (ret < 0) { + ida_free(&i40e_client_ida, aux_dev->id); + kfree(i40e_aux_dev); + return ret; + } + + ret = auxiliary_device_add(aux_dev); + if (ret) { + auxiliary_device_uninit(aux_dev); + return ret; + } + + return ret; +} + /** * i40e_client_add_instance - add a client instance struct to the instance list * @pf: pointer to the board struct @@ -286,9 +338,6 @@ static void i40e_client_add_instance(struct i40e_pf *pf) struct netdev_hw_addr *mac = NULL; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; - if (!registered_client || pf->cinst) - return; - cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (!cdev) return; @@ -308,11 +357,8 @@ static void i40e_client_add_instance(struct i40e_pf *pf) cdev->lan_info.fw_build = pf->hw.aq.fw_build; set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state); - if (i40e_client_get_params(vsi, &cdev->lan_info.params)) { - kfree(cdev); - cdev = NULL; - return; - } + if (i40e_client_get_params(vsi, &cdev->lan_info.params)) + goto free_cdev; mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list, struct netdev_hw_addr, list); @@ -324,7 +370,17 @@ static void i40e_client_add_instance(struct i40e_pf *pf) cdev->client = registered_client; pf->cinst = cdev; - i40e_client_update_msix_info(pf); + cdev->lan_info.msix_count = pf->num_iwarp_msix; + cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector]; + + if (i40e_register_auxiliary_dev(&cdev->lan_info, "iwarp")) + goto free_cdev; + + return; + +free_cdev: + kfree(cdev); + pf->cinst = NULL; } /** @@ -345,7 +401,7 @@ void i40e_client_del_instance(struct i40e_pf *pf) **/ void i40e_client_subtask(struct i40e_pf *pf) { - struct i40e_client *client = registered_client; + struct i40e_client *client; struct i40e_client_instance *cdev; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; int ret = 0; @@ -359,9 +415,11 @@ void i40e_client_subtask(struct i40e_pf *pf) test_bit(__I40E_CONFIG_BUSY, pf->state)) return; - if (!client || !cdev) + if (!cdev || !cdev->client) return; + client = cdev->client; + /* Here we handle client opens. If the client is down, and * the netdev is registered, then open the client. */ @@ -423,16 +481,8 @@ int i40e_lan_add_device(struct i40e_pf *pf) pf->hw.pf_id, pf->hw.bus.bus_id, pf->hw.bus.device, pf->hw.bus.func); - /* If a client has already been registered, we need to add an instance - * of it to our new LAN device. - */ - if (registered_client) - i40e_client_add_instance(pf); + i40e_client_add_instance(pf); - /* Since in some cases register may have happened before a device gets - * added, we can schedule a subtask to go initiate the clients if - * they can be launched at probe time. - */ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); i40e_service_event_schedule(pf); @@ -449,9 +499,13 @@ out: **/ int i40e_lan_del_device(struct i40e_pf *pf) { + struct auxiliary_device *aux_dev = pf->cinst->lan_info.aux_dev; struct i40e_device *ldev, *tmp; int ret = -ENODEV; + auxiliary_device_delete(aux_dev); + auxiliary_device_uninit(aux_dev); + /* First, remove any client instance. */ i40e_client_del_instance(pf); @@ -579,7 +633,7 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev, u32 v_idx, i, reg_idx, reg; ldev->qvlist_info = kzalloc(struct_size(ldev->qvlist_info, qv_info, - qvlist_info->num_vectors - 1), GFP_KERNEL); + qvlist_info->num_vectors), GFP_KERNEL); if (!ldev->qvlist_info) return -ENOMEM; ldev->qvlist_info->num_vectors = qvlist_info->num_vectors; @@ -732,6 +786,42 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, return err; } +void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client) +{ + struct i40e_pf *pf = ldev->pf; + + pf->cinst->client = client; + set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); + i40e_service_event_schedule(pf); +} +EXPORT_SYMBOL_GPL(i40e_client_device_register); + +void i40e_client_device_unregister(struct i40e_info *ldev) +{ + struct i40e_pf *pf = ldev->pf; + struct i40e_client_instance *cdev = pf->cinst; + + if (!cdev) + return; + + while (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) + usleep_range(500, 1000); + + if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { + cdev->client->ops->close(&cdev->lan_info, cdev->client, false); + clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); + i40e_client_release_qvlist(&cdev->lan_info); + } + + pf->cinst->client = NULL; + clear_bit(__I40E_SERVICE_SCHED, pf->state); +} +EXPORT_SYMBOL_GPL(i40e_client_device_unregister); + +/* Retain these legacy global registration/unregistration calls till i40iw is + * removed from the kernel. The irdma unified driver does not use these + * exported symbols. + */ /** * i40e_register_client - Register a i40e client driver with the L2 driver * @client: pointer to the i40e_client struct diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 704e474879c5..9db1968fc491 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -16270,6 +16270,7 @@ static void __exit i40e_exit_module(void) { pci_unregister_driver(&i40e_driver); destroy_workqueue(i40e_wq); + ida_destroy(&i40e_client_ida); i40e_dbg_exit(); } module_exit(i40e_exit_module); diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 07fe857e9e3a..dfb64fb504a2 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -22,6 +22,7 @@ ice-y := ice_main.o \ ice_ethtool_fdir.o \ ice_flex_pipe.o \ ice_flow.o \ + ice_idc.o \ ice_devlink.o \ ice_fw_update.o \ ice_lag.o \ diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index e35db3ff583b..228055e8f33b 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -34,6 +34,7 @@ #include <linux/if_bridge.h> #include <linux/ctype.h> #include <linux/bpf.h> +#include <linux/auxiliary_bus.h> #include <linux/avf/virtchnl.h> #include <linux/cpu_rmap.h> #include <linux/dim.h> @@ -55,6 +56,7 @@ #include "ice_switch.h" #include "ice_common.h" #include "ice_sched.h" +#include "ice_idc_int.h" #include "ice_virtchnl_pf.h" #include "ice_sriov.h" #include "ice_fdir.h" @@ -78,6 +80,8 @@ #define ICE_MIN_LAN_OICR_MSIX 1 #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) #define ICE_FDIR_MSIX 2 +#define ICE_RDMA_NUM_AEQ_MSIX 4 +#define ICE_MIN_RDMA_MSIX 2 #define ICE_NO_VSI 0xffff #define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_SCATTER 1 @@ -88,8 +92,9 @@ #define ICE_MAX_LG_RSS_QS 256 #define ICE_RES_VALID_BIT 0x8000 #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) +#define ICE_RES_RDMA_VEC_ID (ICE_RES_MISC_VEC_ID - 1) /* All VF control VSIs share the same IRQ, so assign a unique ID for them */ -#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_MISC_VEC_ID - 1) +#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1) #define ICE_INVAL_Q_INDEX 0xffff #define ICE_INVAL_VFID 256 @@ -203,9 +208,9 @@ enum ice_pf_state { ICE_NEEDS_RESTART, ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ - ICE_PFR_REQ, /* set by driver and peers */ - ICE_CORER_REQ, /* set by driver and peers */ - ICE_GLOBR_REQ, /* set by driver and peers */ + ICE_PFR_REQ, /* set by driver */ + ICE_CORER_REQ, /* set by driver */ + ICE_GLOBR_REQ, /* set by driver */ ICE_CORER_RECV, /* set by OICR handler */ ICE_GLOBR_RECV, /* set by OICR handler */ ICE_EMPR_RECV, /* set by OICR handler */ @@ -332,6 +337,7 @@ struct ice_vsi { u16 req_rxq; /* User requested Rx queues */ u16 num_rx_desc; u16 num_tx_desc; + u16 qset_handle[ICE_MAX_TRAFFIC_CLASS]; struct ice_tc_cfg tc_cfg; struct bpf_prog *xdp_prog; struct ice_ring **xdp_rings; /* XDP ring array */ @@ -373,12 +379,14 @@ struct ice_q_vector { enum ice_pf_flags { ICE_FLAG_FLTR_SYNC, + ICE_FLAG_RDMA_ENA, ICE_FLAG_RSS_ENA, ICE_FLAG_SRIOV_ENA, ICE_FLAG_SRIOV_CAPABLE, ICE_FLAG_DCB_CAPABLE, ICE_FLAG_DCB_ENA, ICE_FLAG_FD_ENA, + ICE_FLAG_AUX_ENA, ICE_FLAG_ADV_FEATURES, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, @@ -439,6 +447,8 @@ struct ice_pf { struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ struct mutex tc_mutex; /* lock to protect TC changes */ u32 msg_enable; + u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ + u16 rdma_base_vector; /* spinlock to protect the AdminQ wait list */ spinlock_t aq_wait_lock; @@ -471,6 +481,8 @@ struct ice_pf { unsigned long tx_timeout_last_recovery; u32 tx_timeout_recovery_level; char int_name[ICE_INT_NAME_STR_LEN]; + struct auxiliary_device *adev; + int aux_idx; u32 sw_int_count; __le64 nvm_phy_type_lo; /* NVM PHY type low */ @@ -636,6 +648,9 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); +int ice_plug_aux_dev(struct ice_pf *pf); +void ice_unplug_aux_dev(struct ice_pf *pf); +int ice_init_rdma(struct ice_pf *pf); const char *ice_stat_str(enum ice_status stat_err); const char *ice_aq_str(enum ice_aq_err aq_err); bool ice_is_wol_supported(struct ice_hw *hw); @@ -660,4 +675,25 @@ int ice_open_internal(struct net_device *netdev); int ice_stop(struct net_device *netdev); void ice_service_task_schedule(struct ice_pf *pf); +/** + * ice_set_rdma_cap - enable RDMA support + * @pf: PF struct + */ +static inline void ice_set_rdma_cap(struct ice_pf *pf) +{ + if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { + set_bit(ICE_FLAG_RDMA_ENA, pf->flags); + ice_plug_aux_dev(pf); + } +} + +/** + * ice_clear_rdma_cap - disable RDMA support + * @pf: PF struct + */ +static inline void ice_clear_rdma_cap(struct ice_pf *pf) +{ + ice_unplug_aux_dev(pf); + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); +} #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 5cdfe406af84..ff11a618bef7 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -115,6 +115,7 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_PENDING_OROM_VER 0x004B #define ICE_AQC_CAPS_NET_VER 0x004C #define ICE_AQC_CAPS_PENDING_NET_VER 0x004D +#define ICE_AQC_CAPS_RDMA 0x0051 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 u8 major_ver; @@ -1684,6 +1685,36 @@ struct ice_aqc_dis_txq_item { __le16 q_id[]; } __packed; +/* Add Tx RDMA Queue Set (indirect 0x0C33) */ +struct ice_aqc_add_rdma_qset { + u8 num_qset_grps; + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +/* This is the descriptor of each Qset entry for the Add Tx RDMA Queue Set + * command (0x0C33). Only used within struct ice_aqc_add_rdma_qset. + */ +struct ice_aqc_add_tx_rdma_qset_entry { + __le16 tx_qset_id; + u8 rsvd[2]; + __le32 qset_teid; + struct ice_aqc_txsched_elem info; +}; + +/* The format of the command buffer for Add Tx RDMA Queue Set(0x0C33) + * is an array of the following structs. Please note that the length of + * each struct ice_aqc_add_rdma_qset is variable due to the variable + * number of queues in each group! + */ +struct ice_aqc_add_rdma_qset_data { + __le32 parent_teid; + __le16 num_qsets; + u8 rsvd[2]; + struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[]; +}; + /* Configure Firmware Logging Command (indirect 0xFF09) * Logging Information Read Response (indirect 0xFF10) * Note: The 0xFF10 command has no input parameters. @@ -1880,6 +1911,7 @@ struct ice_aq_desc { struct ice_aqc_get_set_rss_key get_set_rss_key; struct ice_aqc_add_txqs add_txqs; struct ice_aqc_dis_txqs dis_txqs; + struct ice_aqc_add_rdma_qset add_rdma_qset; struct ice_aqc_add_get_update_free_vsi vsi_cmd; struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; struct ice_aqc_fw_logging fw_logging; @@ -2028,6 +2060,7 @@ enum ice_adminq_opc { /* Tx queue handling commands/events */ ice_aqc_opc_add_txqs = 0x0C30, ice_aqc_opc_dis_txqs = 0x0C31, + ice_aqc_opc_add_rdma_qset = 0x0C33, /* package commands */ ice_aqc_opc_download_pkg = 0x0C40, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index e93b1e40f627..b8cc737ea261 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2,6 +2,7 @@ /* Copyright (c) 2018, Intel Corporation. */ #include "ice_common.h" +#include "ice_lib.h" #include "ice_sched.h" #include "ice_adminq_cmd.h" #include "ice_flow.h" @@ -1062,7 +1063,8 @@ enum ice_status ice_check_reset(struct ice_hw *hw) GLNVM_ULD_POR_DONE_1_M |\ GLNVM_ULD_PCIER_DONE_2_M) - uld_mask = ICE_RESET_DONE_MASK; + uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? + GLNVM_ULD_PE_DONE_M : 0); /* Device is Active; check Global Reset processes are done */ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { @@ -1938,6 +1940,10 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, caps->nvm_unified_update); break; + case ICE_AQC_CAPS_RDMA: + caps->rdma = (number == 1); + ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); + break; case ICE_AQC_CAPS_MAX_MTU: caps->max_mtu = number; ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", @@ -1971,6 +1977,16 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) caps->maxtc = 4; ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", caps->maxtc); + if (caps->rdma) { + ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); + caps->rdma = 0; + } + + /* print message only when processing device capabilities + * during initialization. + */ + if (caps == &hw->dev_caps.common_cap) + dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); } } @@ -3635,6 +3651,52 @@ do_aq: return status; } +/** + * ice_aq_add_rdma_qsets + * @hw: pointer to the hardware structure + * @num_qset_grps: Number of RDMA Qset groups + * @qset_list: list of Qset groups to be added + * @buf_size: size of buffer for indirect command + * @cd: pointer to command details structure or NULL + * + * Add Tx RDMA Qsets (0x0C33) + */ +static int +ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, + struct ice_aqc_add_rdma_qset_data *qset_list, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_rdma_qset_data *list; + struct ice_aqc_add_rdma_qset *cmd; + struct ice_aq_desc desc; + u16 i, sum_size = 0; + + cmd = &desc.params.add_rdma_qset; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); + + if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) + return -EINVAL; + + for (i = 0, list = qset_list; i < num_qset_grps; i++) { + u16 num_qsets = le16_to_cpu(list->num_qsets); + + sum_size += struct_size(list, rdma_qsets, num_qsets); + list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + + num_qsets); + } + + if (buf_size != sum_size) + return -EINVAL; + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + cmd->num_qset_grps = num_qset_grps; + + return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list, + buf_size, cd)); +} + /* End of FW Admin Queue command wrappers */ /** @@ -4133,6 +4195,162 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, } /** + * ice_cfg_vsi_rdma - configure the VSI RDMA queues + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc_bitmap: TC bitmap + * @max_rdmaqs: max RDMA queues array per TC + * + * This function adds/updates the VSI RDMA queues per TC. + */ +int +ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, + u16 *max_rdmaqs) +{ + return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, + max_rdmaqs, + ICE_SCHED_NODE_OWNER_RDMA)); +} + +/** + * ice_ena_vsi_rdma_qset + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc: TC number + * @rdma_qset: pointer to RDMA Qset + * @num_qsets: number of RDMA Qsets + * @qset_teid: pointer to Qset node TEIDs + * + * This function adds RDMA Qset + */ +int +ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) +{ + struct ice_aqc_txsched_elem_data node = { 0 }; + struct ice_aqc_add_rdma_qset_data *buf; + struct ice_sched_node *parent; + enum ice_status status; + struct ice_hw *hw; + u16 i, buf_size; + int ret; + + if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) + return -EIO; + hw = pi->hw; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return -EINVAL; + + buf_size = struct_size(buf, rdma_qsets, num_qsets); + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + mutex_lock(&pi->sched_lock); + + parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, + ICE_SCHED_NODE_OWNER_RDMA); + if (!parent) { + ret = -EINVAL; + goto rdma_error_exit; + } + buf->parent_teid = parent->info.node_teid; + node.parent_teid = parent->info.node_teid; + + buf->num_qsets = cpu_to_le16(num_qsets); + for (i = 0; i < num_qsets; i++) { + buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); + buf->rdma_qsets[i].info.valid_sections = + ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | + ICE_AQC_ELEM_VALID_EIR; + buf->rdma_qsets[i].info.generic = 0; + buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->rdma_qsets[i].info.cir_bw.bw_alloc = + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); + buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->rdma_qsets[i].info.eir_bw.bw_alloc = + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); + } + ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); + if (ret) { + ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); + goto rdma_error_exit; + } + node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; + for (i = 0; i < num_qsets; i++) { + node.node_teid = buf->rdma_qsets[i].qset_teid; + status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, + &node); + if (status) { + ret = ice_status_to_errno(status); + break; + } + qset_teid[i] = le32_to_cpu(node.node_teid); + } +rdma_error_exit: + mutex_unlock(&pi->sched_lock); + kfree(buf); + return ret; +} + +/** + * ice_dis_vsi_rdma_qset - free RDMA resources + * @pi: port_info struct + * @count: number of RDMA Qsets to free + * @qset_teid: TEID of Qset node + * @q_id: list of queue IDs being disabled + */ +int +ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, + u16 *q_id) +{ + struct ice_aqc_dis_txq_item *qg_list; + enum ice_status status = 0; + struct ice_hw *hw; + u16 qg_size; + int i; + + if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) + return -EIO; + + hw = pi->hw; + + qg_size = struct_size(qg_list, q_id, 1); + qg_list = kzalloc(qg_size, GFP_KERNEL); + if (!qg_list) + return -ENOMEM; + + mutex_lock(&pi->sched_lock); + + for (i = 0; i < count; i++) { + struct ice_sched_node *node; + + node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); + if (!node) + continue; + + qg_list->parent_teid = node->info.parent_teid; + qg_list->num_qs = 1; + qg_list->q_id[0] = + cpu_to_le16(q_id[i] | + ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); + + status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, + ICE_NO_RESET, 0, NULL); + if (status) + break; + + ice_free_sched_node(pi, node); + } + + mutex_unlock(&pi->sched_lock); + kfree(qg_list); + return ice_status_to_errno(status); +} + +/** * ice_replay_pre_init - replay pre initialization * @hw: pointer to the HW struct * diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 7a9d2dfb21a2..0fdda597fbc8 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -147,6 +147,15 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, bool write, struct ice_sq_cd *cd); +int +ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, + u16 *max_rdmaqs); +int +ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u16 *rdma_qset, u16 num_qsets, u32 *qset_teid); +int +ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, + u16 *q_id); enum ice_status ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, u16 *q_handle, u16 *q_ids, u32 *q_teids, diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index df02cffdf209..857dc62da7a8 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -275,6 +275,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) struct ice_dcbx_cfg *old_cfg, *curr_cfg; struct device *dev = ice_pf_to_dev(pf); int ret = ICE_DCB_NO_HW_CHG; + struct iidc_event *event; struct ice_vsi *pf_vsi; curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; @@ -313,6 +314,15 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) goto free_cfg; } + /* Notify AUX drivers about impending change to TCs */ + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + return -ENOMEM; + + set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type); + ice_send_event_to_aux(pf, event); + kfree(event); + /* avoid race conditions by holding the lock while disabling and * re-enabling the VSI */ @@ -640,6 +650,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf) void ice_pf_dcb_recfg(struct ice_pf *pf) { struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + struct iidc_event *event; u8 tc_map = 0; int v, ret; @@ -675,6 +686,14 @@ void ice_pf_dcb_recfg(struct ice_pf *pf) if (vsi->type == ICE_VSI_PF) ice_dcbnl_set_all(vsi); } + /* Notify the AUX drivers that TC change is finished */ + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + return; + + set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type); + ice_send_event_to_aux(pf, event); + kfree(event); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index de38a0fc9665..65b18b3e2bcc 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -110,8 +110,6 @@ #define VPGEN_VFRSTAT_VFRD_M BIT(0) #define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4)) #define VPGEN_VFRTRIG_VFSWR_M BIT(0) -#define PFHMC_ERRORDATA 0x00520500 -#define PFHMC_ERRORINFO 0x00520400 #define GLINT_CTL 0x0016CC54 #define GLINT_CTL_DIS_AUTOMASK_M BIT(0) #define GLINT_CTL_ITR_GRAN_200_S 16 @@ -160,6 +158,7 @@ #define PFINT_OICR_GRST_M BIT(20) #define PFINT_OICR_PCI_EXCEPTION_M BIT(21) #define PFINT_OICR_HMC_ERR_M BIT(26) +#define PFINT_OICR_PE_PUSH_M BIT(27) #define PFINT_OICR_PE_CRITERR_M BIT(28) #define PFINT_OICR_VFLR_M BIT(29) #define PFINT_OICR_SWINT_M BIT(31) diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c new file mode 100644 index 000000000000..1f2afdf6cd48 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021, Intel Corporation. */ + +/* Inter-Driver Communication */ +#include "ice.h" +#include "ice_lib.h" +#include "ice_dcb_lib.h" + +/** + * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct + * @pf: pointer to PF struct + * + * This function has to be called with a device_lock on the + * pf->adev.dev to avoid race conditions. + */ +static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf) +{ + struct auxiliary_device *adev; + + adev = pf->adev; + if (!adev || !adev->dev.driver) + return NULL; + + return container_of(adev->dev.driver, struct iidc_auxiliary_drv, + adrv.driver); +} + +/** + * ice_send_event_to_aux - send event to RDMA AUX driver + * @pf: pointer to PF struct + * @event: event struct + */ +void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event) +{ + struct iidc_auxiliary_drv *iadrv; + + if (!pf->adev) + return; + + device_lock(&pf->adev->dev); + iadrv = ice_get_auxiliary_drv(pf); + if (iadrv && iadrv->event_handler) + iadrv->event_handler(pf, event); + device_unlock(&pf->adev->dev); +} + +/** + * ice_find_vsi - Find the VSI from VSI ID + * @pf: The PF pointer to search in + * @vsi_num: The VSI ID to search for + */ +static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) +{ + int i; + + ice_for_each_vsi(pf, i) + if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) + return pf->vsi[i]; + return NULL; +} + +/** + * ice_add_rdma_qset - Add Leaf Node for RDMA Qset + * @pf: PF struct + * @qset: Resource to be allocated + */ +int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) +{ + u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS]; + struct ice_vsi *vsi; + struct device *dev; + u32 qset_teid; + u16 qs_handle; + int status; + int i; + + if (WARN_ON(!pf || !qset)) + return -EINVAL; + + dev = ice_pf_to_dev(pf); + + if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) + return -EINVAL; + + vsi = ice_get_main_vsi(pf); + if (!vsi) { + dev_err(dev, "RDMA QSet invalid VSI\n"); + return -EINVAL; + } + + ice_for_each_traffic_class(i) + max_rdmaqs[i] = 0; + + max_rdmaqs[qset->tc]++; + qs_handle = qset->qs_handle; + + status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_rdmaqs); + if (status) { + dev_err(dev, "Failed VSI RDMA Qset config\n"); + return status; + } + + status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc, + &qs_handle, 1, &qset_teid); + if (status) { + dev_err(dev, "Failed VSI RDMA Qset enable\n"); + return status; + } + vsi->qset_handle[qset->tc] = qset->qs_handle; + qset->teid = qset_teid; + + return 0; +} +EXPORT_SYMBOL_GPL(ice_add_rdma_qset); + +/** + * ice_del_rdma_qset - Delete leaf node for RDMA Qset + * @pf: PF struct + * @qset: Resource to be freed + */ +int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) +{ + struct ice_vsi *vsi; + u32 teid; + u16 q_id; + + if (WARN_ON(!pf || !qset)) + return -EINVAL; + + vsi = ice_find_vsi(pf, qset->vport_id); + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n"); + return -EINVAL; + } + + q_id = qset->qs_handle; + teid = qset->teid; + + vsi->qset_handle[qset->tc] = 0; + + return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id); +} +EXPORT_SYMBOL_GPL(ice_del_rdma_qset); + +/** + * ice_rdma_request_reset - accept request from RDMA to perform a reset + * @pf: struct for PF + * @reset_type: type of reset + */ +int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type) +{ + enum ice_reset_req reset; + + if (WARN_ON(!pf)) + return -EINVAL; + + switch (reset_type) { + case IIDC_PFR: + reset = ICE_RESET_PFR; + break; + case IIDC_CORER: + reset = ICE_RESET_CORER; + break; + case IIDC_GLOBR: + reset = ICE_RESET_GLOBR; + break; + default: + dev_err(ice_pf_to_dev(pf), "incorrect reset request\n"); + return -EINVAL; + } + + return ice_schedule_reset(pf, reset); +} +EXPORT_SYMBOL_GPL(ice_rdma_request_reset); + +/** + * ice_rdma_update_vsi_filter - update main VSI filters for RDMA + * @pf: pointer to struct for PF + * @vsi_id: VSI HW idx to update filter on + * @enable: bool whether to enable or disable filters + */ +int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable) +{ + struct ice_vsi *vsi; + int status; + + if (WARN_ON(!pf)) + return -EINVAL; + + vsi = ice_find_vsi(pf, vsi_id); + if (!vsi) + return -EINVAL; + + status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable); + if (status) { + dev_err(ice_pf_to_dev(pf), "Failed to %sable RDMA filtering\n", + enable ? "en" : "dis"); + } else { + if (enable) + vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; + else + vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; + } + + return status; +} +EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter); + +/** + * ice_get_qos_params - parse QoS params for RDMA consumption + * @pf: pointer to PF struct + * @qos: set of QoS values + */ +void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos) +{ + struct ice_dcbx_cfg *dcbx_cfg; + unsigned int i; + u32 up2tc; + + dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); + + qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg); + for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++) + qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i]; +} +EXPORT_SYMBOL_GPL(ice_get_qos_params); + +/** + * ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver + * @pf: board private structure to initialize + */ +static int ice_reserve_rdma_qvector(struct ice_pf *pf) +{ + if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { + int index; + + index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix, + ICE_RES_RDMA_VEC_ID); + if (index < 0) + return index; + pf->num_avail_sw_msix -= pf->num_rdma_msix; + pf->rdma_base_vector = (u16)index; + } + return 0; +} + +/** + * ice_adev_release - function to be mapped to AUX dev's release op + * @dev: pointer to device to free + */ +static void ice_adev_release(struct device *dev) +{ + struct iidc_auxiliary_dev *iadev; + + iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev); + kfree(iadev); +} + +/** + * ice_plug_aux_dev - allocate and register AUX device + * @pf: pointer to pf struct + */ +int ice_plug_aux_dev(struct ice_pf *pf) +{ + struct iidc_auxiliary_dev *iadev; + struct auxiliary_device *adev; + int ret; + + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); + if (!iadev) + return -ENOMEM; + + adev = &iadev->adev; + pf->adev = adev; + iadev->pf = pf; + + adev->id = pf->aux_idx; + adev->dev.release = ice_adev_release; + adev->dev.parent = &pf->pdev->dev; + adev->name = IIDC_RDMA_ROCE_NAME; + + ret = auxiliary_device_init(adev); + if (ret) { + pf->adev = NULL; + kfree(iadev); + return ret; + } + + ret = auxiliary_device_add(adev); + if (ret) { + pf->adev = NULL; + auxiliary_device_uninit(adev); + return ret; + } + + return 0; +} + +/* ice_unplug_aux_dev - unregister and free AUX device + * @pf: pointer to pf struct + */ +void ice_unplug_aux_dev(struct ice_pf *pf) +{ + if (!pf->adev) + return; + + auxiliary_device_delete(pf->adev); + auxiliary_device_uninit(pf->adev); + pf->adev = NULL; +} + +/** + * ice_init_rdma - initializes PF for RDMA use + * @pf: ptr to ice_pf + */ +int ice_init_rdma(struct ice_pf *pf) +{ + struct device *dev = &pf->pdev->dev; + int ret; + + /* Reserve vector resources */ + ret = ice_reserve_rdma_qvector(pf); + if (ret < 0) { + dev_err(dev, "failed to reserve vectors for RDMA\n"); + return ret; + } + + return ice_plug_aux_dev(pf); +} diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h new file mode 100644 index 000000000000..b7796b8aecbd --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021, Intel Corporation. */ + +#ifndef _ICE_IDC_INT_H_ +#define _ICE_IDC_INT_H_ + +#include <linux/net/intel/iidc.h> +#include "ice.h" + +struct ice_pf; + +void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event); + +#endif /* !_ICE_IDC_INT_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 4599fc3b4ed8..37c18c66b5c7 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -172,6 +172,7 @@ ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info) } ice_clear_sriov_cap(pf); + ice_clear_rdma_cap(pf); lag->bonded = true; lag->role = ICE_LAG_UNSET; @@ -222,6 +223,7 @@ ice_lag_unlink(struct ice_lag *lag, } ice_set_sriov_cap(pf); + ice_set_rdma_cap(pf); lag->bonded = false; lag->role = ICE_LAG_NONE; } diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 82e2ce23df3d..56e1ae558761 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -617,6 +617,17 @@ bool ice_is_safe_mode(struct ice_pf *pf) } /** + * ice_is_aux_ena + * @pf: pointer to the PF struct + * + * returns true if AUX devices/drivers are supported, false otherwise + */ +bool ice_is_aux_ena(struct ice_pf *pf) +{ + return test_bit(ICE_FLAG_AUX_ENA, pf->flags); +} + +/** * ice_vsi_clean_rss_flow_fld - Delete RSS configuration * @vsi: the VSI being cleaned up * diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 511c2316c40c..5ec857f71459 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -102,7 +102,7 @@ enum ice_status ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); bool ice_is_safe_mode(struct ice_pf *pf); - +bool ice_is_aux_ena(struct ice_pf *pf); bool ice_is_dflt_vsi_in_use(struct ice_sw *sw); bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 4ee85a217c6f..254cfc14d6b4 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -35,6 +35,8 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); #endif /* !CONFIG_DYNAMIC_DEBUG */ +static DEFINE_IDA(ice_aux_ida); + static struct workqueue_struct *ice_wq; static const struct net_device_ops ice_netdev_safe_mode_ops; static const struct net_device_ops ice_netdev_ops; @@ -454,6 +456,8 @@ ice_prepare_for_reset(struct ice_pf *pf) if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) return; + ice_unplug_aux_dev(pf); + /* Notify VFs of impending reset */ if (ice_check_sq_alive(hw, &hw->mailboxq)) ice_vc_notify_reset(pf); @@ -2118,6 +2122,8 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) return -EBUSY; } + ice_unplug_aux_dev(pf); + switch (reset) { case ICE_RESET_PFR: set_bit(ICE_PFR_REQ, pf->state); @@ -2608,6 +2614,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf) PFINT_OICR_PCI_EXCEPTION_M | PFINT_OICR_VFLR_M | PFINT_OICR_HMC_ERR_M | + PFINT_OICR_PE_PUSH_M | PFINT_OICR_PE_CRITERR_M); wr32(hw, PFINT_OICR_ENA, val); @@ -2678,8 +2685,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) /* If a reset cycle isn't already in progress, we set a bit in * pf->state so that the service task can start a reset/rebuild. - * We also make note of which reset happened so that peer - * devices/drivers can be informed. */ if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { if (reset == ICE_RESET_CORER) @@ -2706,11 +2711,19 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) } } - if (oicr & PFINT_OICR_HMC_ERR_M) { - ena_mask &= ~PFINT_OICR_HMC_ERR_M; - dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n", - rd32(hw, PFHMC_ERRORINFO), - rd32(hw, PFHMC_ERRORDATA)); +#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) + if (oicr & ICE_AUX_CRIT_ERR) { + struct iidc_event *event; + + ena_mask &= ~ICE_AUX_CRIT_ERR; + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (event) { + set_bit(IIDC_EVENT_CRIT_ERR, event->type); + /* report the entire OICR value to AUX driver */ + event->reg = oicr; + ice_send_event_to_aux(pf, event); + kfree(event); + } } /* Report any remaining unexpected interrupts */ @@ -2720,8 +2733,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) /* If a critical error is pending there is no choice but to * reset the device. */ - if (oicr & (PFINT_OICR_PE_CRITERR_M | - PFINT_OICR_PCI_EXCEPTION_M | + if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | PFINT_OICR_ECC_ERR_M)) { set_bit(ICE_PFR_REQ, pf->state); ice_service_task_schedule(pf); @@ -3276,6 +3288,12 @@ static void ice_set_pf_caps(struct ice_pf *pf) { struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); + clear_bit(ICE_FLAG_AUX_ENA, pf->flags); + if (func_caps->common_cap.rdma) { + set_bit(ICE_FLAG_RDMA_ENA, pf->flags); + set_bit(ICE_FLAG_AUX_ENA, pf->flags); + } clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); if (func_caps->common_cap.dcb) set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); @@ -3355,11 +3373,12 @@ static int ice_init_pf(struct ice_pf *pf) */ static int ice_ena_msix_range(struct ice_pf *pf) { - int v_left, v_actual, v_other, v_budget = 0; + int num_cpus, v_left, v_actual, v_other, v_budget = 0; struct device *dev = ice_pf_to_dev(pf); int needed, err, i; v_left = pf->hw.func_caps.common_cap.num_msix_vectors; + num_cpus = num_online_cpus(); /* reserve for LAN miscellaneous handler */ needed = ICE_MIN_LAN_OICR_MSIX; @@ -3381,13 +3400,23 @@ static int ice_ena_msix_range(struct ice_pf *pf) v_other = v_budget; /* reserve vectors for LAN traffic */ - needed = min_t(int, num_online_cpus(), v_left); + needed = num_cpus; if (v_left < needed) goto no_hw_vecs_left_err; pf->num_lan_msix = needed; v_budget += needed; v_left -= needed; + /* reserve vectors for RDMA auxiliary driver */ + if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { + needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; + if (v_left < needed) + goto no_hw_vecs_left_err; + pf->num_rdma_msix = needed; + v_budget += needed; + v_left -= needed; + } + pf->msix_entries = devm_kcalloc(dev, v_budget, sizeof(*pf->msix_entries), GFP_KERNEL); if (!pf->msix_entries) { @@ -3417,16 +3446,46 @@ static int ice_ena_msix_range(struct ice_pf *pf) err = -ERANGE; goto msix_err; } else { - int v_traffic = v_actual - v_other; + int v_remain = v_actual - v_other; + int v_rdma = 0, v_min_rdma = 0; + + if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { + /* Need at least 1 interrupt in addition to + * AEQ MSIX + */ + v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; + v_min_rdma = ICE_MIN_RDMA_MSIX; + } if (v_actual == ICE_MIN_MSIX || - v_traffic < ICE_MIN_LAN_TXRX_MSIX) + v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) { + dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n"); + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); + + pf->num_rdma_msix = 0; pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; - else - pf->num_lan_msix = v_traffic; + } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || + (v_remain - v_rdma < v_rdma)) { + /* Support minimum RDMA and give remaining + * vectors to LAN MSIX + */ + pf->num_rdma_msix = v_min_rdma; + pf->num_lan_msix = v_remain - v_min_rdma; + } else { + /* Split remaining MSIX with RDMA after + * accounting for AEQ MSIX + */ + pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + + ICE_RDMA_NUM_AEQ_MSIX; + pf->num_lan_msix = v_remain - pf->num_rdma_msix; + } dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", pf->num_lan_msix); + + if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) + dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", + pf->num_rdma_msix); } } @@ -3441,6 +3500,7 @@ no_hw_vecs_left_err: needed, v_left); err = -ERANGE; exit_err: + pf->num_rdma_msix = 0; pf->num_lan_msix = 0; return err; } @@ -4268,8 +4328,29 @@ probe_done: /* ready to go, so clear down state bit */ clear_bit(ICE_DOWN, pf->state); + if (ice_is_aux_ena(pf)) { + pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL); + if (pf->aux_idx < 0) { + dev_err(dev, "Failed to allocate device ID for AUX driver\n"); + err = -ENOMEM; + goto err_netdev_reg; + } + + err = ice_init_rdma(pf); + if (err) { + dev_err(dev, "Failed to initialize RDMA: %d\n", err); + err = -EIO; + goto err_init_aux_unroll; + } + } else { + dev_warn(dev, "RDMA is not supported on this device\n"); + } + return 0; +err_init_aux_unroll: + pf->adev = NULL; + ida_free(&ice_aux_ida, pf->aux_idx); err_netdev_reg: err_send_version_unroll: ice_vsi_release_all(pf); @@ -4379,10 +4460,12 @@ static void ice_remove(struct pci_dev *pdev) ice_free_vfs(pf); } - set_bit(ICE_DOWN, pf->state); ice_service_task_stop(pf); ice_aq_cancel_waiting_tasks(pf); + ice_unplug_aux_dev(pf); + ida_free(&ice_aux_ida, pf->aux_idx); + set_bit(ICE_DOWN, pf->state); mutex_destroy(&(&pf->hw)->fdir_fltr_lock); ice_deinit_lag(pf); @@ -4538,6 +4621,8 @@ static int __maybe_unused ice_suspend(struct device *dev) */ disabled = ice_service_task_stop(pf); + ice_unplug_aux_dev(pf); + /* Already suspended?, then there is nothing to do */ if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { if (!disabled) @@ -6208,6 +6293,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) /* if we get here, reset flow is successful */ clear_bit(ICE_RESET_FAILED, pf->state); + + ice_plug_aux_dev(pf); return; err_vsi_rebuild: @@ -6246,7 +6333,9 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; + struct iidc_event *event; u8 count = 0; + int err = 0; if (new_mtu == (int)netdev->mtu) { netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); @@ -6279,27 +6368,38 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) return -EBUSY; } + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + return -ENOMEM; + + set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); + ice_send_event_to_aux(pf, event); + clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); + netdev->mtu = (unsigned int)new_mtu; /* if VSI is up, bring it down and then back up */ if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { - int err; - err = ice_down(vsi); if (err) { netdev_err(netdev, "change MTU if_down err %d\n", err); - return err; + goto event_after; } err = ice_up(vsi); if (err) { netdev_err(netdev, "change MTU if_up err %d\n", err); - return err; + goto event_after; } } netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); - return 0; +event_after: + set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); + ice_send_event_to_aux(pf, event); + kfree(event); + + return err; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2f097637e405..a17e24e54cf3 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -596,6 +596,50 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) } /** + * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC + * @hw: pointer to the HW struct + * @vsi_handle: VSI handle + * @tc: TC number + * @new_numqs: number of queues + */ +static enum ice_status +ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) +{ + struct ice_vsi_ctx *vsi_ctx; + struct ice_q_ctx *q_ctx; + + vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; + /* allocate RDMA queue contexts */ + if (!vsi_ctx->rdma_q_ctx[tc]) { + vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), + new_numqs, + sizeof(*q_ctx), + GFP_KERNEL); + if (!vsi_ctx->rdma_q_ctx[tc]) + return ICE_ERR_NO_MEMORY; + vsi_ctx->num_rdma_q_entries[tc] = new_numqs; + return 0; + } + /* num queues are increased, update the queue contexts */ + if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) { + u16 prev_num = vsi_ctx->num_rdma_q_entries[tc]; + + q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, + sizeof(*q_ctx), GFP_KERNEL); + if (!q_ctx) + return ICE_ERR_NO_MEMORY; + memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc], + prev_num * sizeof(*q_ctx)); + devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]); + vsi_ctx->rdma_q_ctx[tc] = q_ctx; + vsi_ctx->num_rdma_q_entries[tc] = new_numqs; + } + return 0; +} + +/** * ice_aq_rl_profile - performs a rate limiting task * @hw: pointer to the HW struct * @opcode: opcode for add, query, or remove profile(s) @@ -1774,13 +1818,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, if (!vsi_ctx) return ICE_ERR_PARAM; - prev_numqs = vsi_ctx->sched.max_lanq[tc]; + if (owner == ICE_SCHED_NODE_OWNER_LAN) + prev_numqs = vsi_ctx->sched.max_lanq[tc]; + else + prev_numqs = vsi_ctx->sched.max_rdmaq[tc]; /* num queues are not changed or less than the previous number */ if (new_numqs <= prev_numqs) return status; - status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); - if (status) - return status; + if (owner == ICE_SCHED_NODE_OWNER_LAN) { + status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); + if (status) + return status; + } else { + status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs); + if (status) + return status; + } if (new_numqs) ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); @@ -1795,7 +1848,10 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, new_num_nodes, owner); if (status) return status; - vsi_ctx->sched.max_lanq[tc] = new_numqs; + if (owner == ICE_SCHED_NODE_OWNER_LAN) + vsi_ctx->sched.max_lanq[tc] = new_numqs; + else + vsi_ctx->sched.max_rdmaq[tc] = new_numqs; return 0; } @@ -1861,6 +1917,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, * recreate the child nodes all the time in these cases. */ vsi_ctx->sched.max_lanq[tc] = 0; + vsi_ctx->sched.max_rdmaq[tc] = 0; } /* update the VSI child nodes */ @@ -1990,6 +2047,8 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) } if (owner == ICE_SCHED_NODE_OWNER_LAN) vsi_ctx->sched.max_lanq[i] = 0; + else + vsi_ctx->sched.max_rdmaq[i] = 0; } status = 0; diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 357d3073d814..3b6c1420aa7b 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018, Intel Corporation. */ +#include "ice_lib.h" #include "ice_switch.h" #define ICE_ETH_DA_OFFSET 0 @@ -302,6 +303,10 @@ static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]); vsi->lan_q_ctx[i] = NULL; } + if (vsi->rdma_q_ctx[i]) { + devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]); + vsi->rdma_q_ctx[i] = NULL; + } } } @@ -423,6 +428,29 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, } /** + * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI + * @hw: pointer to HW struct + * @vsi_handle: VSI SW index + * @enable: boolean for enable/disable + */ +int +ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) +{ + struct ice_vsi_ctx *ctx; + + ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!ctx) + return -EIO; + + if (enable) + ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; + else + ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; + + return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL)); +} + +/** * ice_aq_alloc_free_vsi_list * @hw: pointer to the HW struct * @vsi_list_id: VSI list ID returned or used for lookup diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 8b4f9d35c860..6bb7358ff67b 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -26,6 +26,8 @@ struct ice_vsi_ctx { u8 vf_num; u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS]; struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS]; + u16 num_rdma_q_entries[ICE_MAX_TRAFFIC_CLASS]; + struct ice_q_ctx *rdma_q_ctx[ICE_MAX_TRAFFIC_CLASS]; }; enum ice_sw_fwd_act_type { @@ -223,6 +225,8 @@ enum ice_status ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list); enum ice_status ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list); +int +ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable); void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 4474dd6a7ba1..c580b87c76ee 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -45,6 +45,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R) #define ICE_DBG_FLOW BIT_ULL(9) #define ICE_DBG_SW BIT_ULL(13) #define ICE_DBG_SCHED BIT_ULL(14) +#define ICE_DBG_RDMA BIT_ULL(15) #define ICE_DBG_PKG BIT_ULL(16) #define ICE_DBG_RES BIT_ULL(17) #define ICE_DBG_AQ_MSG BIT_ULL(24) @@ -262,6 +263,7 @@ struct ice_hw_common_caps { u8 rss_table_entry_width; /* RSS Entry width in bits */ u8 dcb; + u8 rdma; bool nvm_update_pending_nvm; bool nvm_update_pending_orom; @@ -440,6 +442,7 @@ struct ice_sched_node { u8 tc_num; u8 owner; #define ICE_SCHED_NODE_OWNER_LAN 0 +#define ICE_SCHED_NODE_OWNER_RDMA 2 }; /* Access Macros for Tx Sched Elements data */ @@ -511,6 +514,7 @@ struct ice_sched_vsi_info { struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS]; struct list_head list_entry; u16 max_lanq[ICE_MAX_TRAFFIC_CLASS]; + u16 max_rdmaq[ICE_MAX_TRAFFIC_CLASS]; }; /* driver defines the policy */ diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h index f41387a8969f..41f24b5241ab 100644 --- a/include/linux/net/intel/i40e_client.h +++ b/include/linux/net/intel/i40e_client.h @@ -4,6 +4,8 @@ #ifndef _I40E_CLIENT_H_ #define _I40E_CLIENT_H_ +#include <linux/auxiliary_bus.h> + #define I40E_CLIENT_STR_LENGTH 10 /* Client interface version should be updated anytime there is a change in the @@ -48,7 +50,7 @@ struct i40e_qv_info { struct i40e_qvlist_info { u32 num_vectors; - struct i40e_qv_info qv_info[1]; + struct i40e_qv_info qv_info[]; }; @@ -78,6 +80,7 @@ struct i40e_info { u8 lanmac[6]; struct net_device *netdev; struct pci_dev *pcidev; + struct auxiliary_device *aux_dev; u8 __iomem *hw_addr; u8 fid; /* function id, PF id or VF id */ #define I40E_CLIENT_FTYPE_PF 0 @@ -100,6 +103,11 @@ struct i40e_info { u32 fw_build; /* firmware build number */ }; +struct i40e_auxiliary_device { + struct auxiliary_device aux_dev; + struct i40e_info *ldev; +}; + #define I40E_CLIENT_RESET_LEVEL_PF 1 #define I40E_CLIENT_RESET_LEVEL_CORE 2 #define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1) @@ -187,6 +195,8 @@ static inline bool i40e_client_is_registered(struct i40e_client *client) return test_bit(__I40E_CLIENT_REGISTERED, &client->state); } +void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client); +void i40e_client_device_unregister(struct i40e_info *ldev); /* used by clients */ int i40e_register_client(struct i40e_client *client); int i40e_unregister_client(struct i40e_client *client); diff --git a/include/linux/net/intel/iidc.h b/include/linux/net/intel/iidc.h new file mode 100644 index 000000000000..e32f6712aee0 --- /dev/null +++ b/include/linux/net/intel/iidc.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021, Intel Corporation. */ + +#ifndef _IIDC_H_ +#define _IIDC_H_ + +#include <linux/auxiliary_bus.h> +#include <linux/dcbnl.h> +#include <linux/device.h> +#include <linux/if_ether.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> + +enum iidc_event_type { + IIDC_EVENT_BEFORE_MTU_CHANGE, + IIDC_EVENT_AFTER_MTU_CHANGE, + IIDC_EVENT_BEFORE_TC_CHANGE, + IIDC_EVENT_AFTER_TC_CHANGE, + IIDC_EVENT_CRIT_ERR, + IIDC_EVENT_NBITS /* must be last */ +}; + +enum iidc_reset_type { + IIDC_PFR, + IIDC_CORER, + IIDC_GLOBR, +}; + +#define IIDC_MAX_USER_PRIORITY 8 + +/* Struct to hold per RDMA Qset info */ +struct iidc_rdma_qset_params { + /* Qset TEID returned to the RDMA driver in + * ice_add_rdma_qset and used by RDMA driver + * for calls to ice_del_rdma_qset + */ + u32 teid; /* Qset TEID */ + u16 qs_handle; /* RDMA driver provides this */ + u16 vport_id; /* VSI index */ + u8 tc; /* TC branch the Qset should belong to */ +}; + +struct iidc_qos_info { + u64 tc_ctx; + u8 rel_bw; + u8 prio_type; + u8 egress_virt_up; + u8 ingress_virt_up; +}; + +/* Struct to pass QoS info */ +struct iidc_qos_params { + struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS]; + u8 up2tc[IIDC_MAX_USER_PRIORITY]; + u8 vport_relative_bw; + u8 vport_priority_type; + u8 num_tc; +}; + +struct iidc_event { + DECLARE_BITMAP(type, IIDC_EVENT_NBITS); + u32 reg; +}; + +struct ice_pf; + +int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset); +int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset); +int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type); +int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable); +void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos); + +#define IIDC_RDMA_ROCE_NAME "roce" + +/* Structure representing auxiliary driver tailored information about the core + * PCI dev, each auxiliary driver using the IIDC interface will have an + * instance of this struct dedicated to it. + */ + +struct iidc_auxiliary_dev { + struct auxiliary_device adev; + struct ice_pf *pf; +}; + +/* structure representing the auxiliary driver. This struct is to be + * allocated and populated by the auxiliary driver's owner. The core PCI + * driver will access these ops by performing a container_of on the + * auxiliary_device->dev.driver. + */ +struct iidc_auxiliary_drv { + struct auxiliary_driver adrv; + /* This event_handler is meant to be a blocking call. For instance, + * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not + * return until the auxiliary driver is ready for the MTU change to + * happen. + */ + void (*event_handler)(struct ice_pf *pf, struct iidc_event *event); +}; + +#endif /* _IIDC_H_*/ |